diff --git a/.circleci/config.yml b/.circleci/config.yml index 2c13a1a1d5..737c5cc777 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,7 +1,7 @@ version: 2.1 orbs: - python: circleci/python@0.3.2 + python: circleci/python@2.1.1 jobs: manylinux2014-aarch64: @@ -13,7 +13,7 @@ jobs: type: string machine: - image: ubuntu-2004:202101-01 + image: default resource_class: arm.medium @@ -49,25 +49,17 @@ jobs: # choose available python versions from pyenv pyenv_py_ver="" case << parameters.NRN_PYTHON_VERSION >> in - 38) pyenv_py_ver="3.8.7" ;; - 39) pyenv_py_ver="3.9.1" ;; - 310) pyenv_py_ver="3.10.1" ;; - 311) pyenv_py_ver="3.11.0" ;; - *) echo "Error: pyenv python version not specified!" && exit 1;; + 38) pyenv_py_ver="3.8" ;; + 39) pyenv_py_ver="3.9" ;; + 310) pyenv_py_ver="3.10" ;; + 311) pyenv_py_ver="3.11" ;; + 312) pyenv_py_ver="3.12" ;; + *) echo "Error: pyenv python version not specified or not supported." && exit 1;; esac - # install python dependencies: .10 is not available pyenv - if [ "<< parameters.NRN_PYTHON_VERSION >>" == "310" ]; then - sudo apt install software-properties-common -y - sudo add-apt-repository ppa:deadsnakes/ppa -y - sudo apt install python3.10 libpython3.10 python3.10-venv - export PYTHON_EXE=$(which python3.10) - else - cd /opt/circleci/.pyenv/plugins/python-build/../.. && git pull && cd - - env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install $pyenv_py_ver --force - pyenv global $pyenv_py_ver - export PYTHON_EXE=$(which python) - fi + env PYTHON_CONFIGURE_OPTS="--enable-shared" pyenv install $pyenv_py_ver --force + pyenv global $pyenv_py_ver + export PYTHON_EXE=$(which python) # test wheel packaging/python/test_wheels.sh $PYTHON_EXE $(ls -t wheelhouse/*.whl) @@ -76,6 +68,7 @@ jobs: name: Upload nightly wheel to pypi.org command: | if [ "<< parameters.NRN_NIGHTLY_UPLOAD >>" == "true" ]; then + python -m pip install --upgrade pip python -m pip install twine python -m twine upload --verbose --skip-existing -u $TWINE_USERNAME -p $TWINE_PASSWORD wheelhouse/*.whl else @@ -94,7 +87,7 @@ workflows: - /circleci\/.*/ matrix: parameters: - NRN_PYTHON_VERSION: ["311"] + NRN_PYTHON_VERSION: ["312"] NRN_NIGHTLY_UPLOAD: ["false"] nightly: @@ -109,5 +102,5 @@ workflows: - manylinux2014-aarch64: matrix: parameters: - NRN_PYTHON_VERSION: ["38", "39", "310", "311"] + NRN_PYTHON_VERSION: ["38", "39", "310", "311", "312"] NRN_NIGHTLY_UPLOAD: ["true"] diff --git a/.clang-format.changes b/.clang-format.changes index 0091d2af2f..11de33ffbf 100644 --- a/.clang-format.changes +++ b/.clang-format.changes @@ -1,9 +1,10 @@ SortIncludes: false Standard: c++17 -StatementMacros: [MKDLL, MKDLLdec, MKDLLif, MKDLLvp, MKDLLvpf, PyObject_HEAD, +StatementMacros: [MKDLL, MKDLLdec, MKDLLif, MKDLLvp, MKDLLvpf, MUTDEC, PyObject_HEAD, declareActionCallback, declareAdjustStepper, declareArrowGlyph, declareFieldEditorCallback, declareFieldSEditorCallback, declareFileChooserCallback, declareIOCallback, declareList, declarePtrList, declareRubberCallback, declareSelectionCallback, declareTable, declareTable2, +HocContextRestore, HocTopContextSet, implementActionCallback, implementAdjustStepper, implementArrowGlyph, implementFieldEditorCallback, implementFieldSEditorCallback, implementFileChooserCallback, implementIOCallback, implementList, implementPtrList, implementRubberCallback, implementSelectionCallback, implementTable, diff --git a/.github/ISSUE_TEMPLATE/release-patch.md b/.github/ISSUE_TEMPLATE/release-patch.md index 8a10236082..9b7bbb4d1b 100644 --- a/.github/ISSUE_TEMPLATE/release-patch.md +++ b/.github/ISSUE_TEMPLATE/release-patch.md @@ -13,8 +13,7 @@ Action items Pre-release --- - [ ] Create a cherrypicks branch where all commits go into new release and open a PR against `release/x.y` branch -- [ ] Create CoreNEURON tag on the `release/x.y` branch after cherrypicking required commits, update semantic version in its `CMakeLists.txt`, tag it & update submodule in cherrypicks PR -- [ ] Look out for ModelDB regressions by manually submitting and analyzing [nrn-modeldb-ci](https://github.com/neuronsimulator/nrn-modeldb-ci/actions/workflows/nrn-modeldb-ci.yaml?query=event%3Aschedule++) for the cherrypicks branch vs previous version +- [ ] Make sure to look out for ModelDB regressions by manually submitting and analyzing [nrn-modeldb-ci](https://github.com/neuronsimulator/nrn-modeldb-ci/actions/workflows/nrn-modeldb-ci.yaml?query=event%3Aschedule++) for the cherrypicks branch vs previous version - [ ] Update cherrypicks PR: - [ ] Update semantic version in `CMakeLists.txt` - [ ] Update changelog below and agree on it with everyone; then commit it to `docs/changelog` in the cherrypicks PR (copy structure as-is) @@ -23,31 +22,32 @@ Pre-release Sanity checks --- - [ ] After cherrypicks PR is merged, make sure GitHub, Azure and CircleCI builds pass for `release/x.y` branch -- [ ] Run [nrn-build-ci](https://github.com/neuronsimulator/nrn-build-ci/actions/workflows/build-neuron.yml) for the `release/x.y` branch; see [nrn-build-ci guide](https://github.com/neuronsimulator/nrn-build-ci#azure-wheels-testing---manual-workflow) +- [ ] Run [nrn-build-ci](https://github.com/neuronsimulator/nrn-build-ci/actions/workflows/build-neuron.yml) for the `release/x.y` branch; see [nrn-build-ci guide](https://github.com/neuronsimulator/nrn-build-ci#azure-wheels-testing---manual-workflow) - [ ] Activate ReadTheDocs build for `release/x.y` & make it hidden. Check docs are fine after build is done. -- [ ] Run BBP Simulation Stack, CoreNEURON CI & other relevant tests -- [ ] Build release wheels but WITHOUT upload ([see details](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure)) +- [ ] Run BBP Simulation Stack & other relevant tests Releasing --- -- [ ] Create new release+tag on GitHub via [release workflow](https://github.com/neuronsimulator/nrn/actions/workflows/release.yml?query=workflow%3A%22NEURON+Release%22). Note that the GitHub release will be marked as pre-release. -- [ ] Create, test and upload manual artifacts +- [ ] Create new release+tag on GitHub via [release workflow](https://github.com/neuronsimulator/nrn/actions/workflows/release.yml?query=workflow%3A%22NEURON+Release%22). Note that the GitHub release will be marked as pre-release and will contain the full-src-package and the Windows installer at the end of the release workflow. +- [ ] Build release wheels but WITHOUT upload ([see details](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure)) +- [ ] Create, test and upload manual artifacts - [ ] MacOS package installer (manual task, ask Michael) - [ ] arm64 wheels (manual task, check with Alex or Pramod) - [ ] aarch64 wheels (use existing `release/x.y-aarch64` branch for this, see [guide](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-circleci)) - [ ] Publish the `x.y.z` wheels on PyPI; see [wheel publishing instructions](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure) - [ ] Once wheels are published, activate the `x.y.z` tag on ReadTheDocs -- [ ] Upload Windows installer from the wheels publishing Azure run (to get correct tag) -- [ ] Publish release on GitHub (edit https://github.com/neuronsimulator/nrn/releases/tag/x.y.z and un-tick the pre-release checkbox) +- [ ] Rename the Windows installer in the GitHub release to match the new version and the supported python versions (i.e. `nrn-8.2.2.w64-mingw-py-37-38-39-310-311-setup.exe`) +- [ ] Publish release on GitHub (edit https://github.com/neuronsimulator/nrn/releases/tag/x.y.z and un-tick the pre-release checkbox) Post-release --- - [ ] Deactivate ReadTheDocs build for `release/x.y` -- [ ] Go to [ReadTheDocs advanced settings](https://readthedocs.org/dashboard/nrn/advanced/) and set `Default version` to `x.y.z` +- [ ] Go to [ReadTheDocs advanced settings](https://readthedocs.org/dashboard/nrn/advanced/) and set `Default version` to `x.y.z` - [ ] Let people know :rocket: - [ ] Cherrypick changelog and installer links to `master` +- [ ] Update the changelog for the release on GitHub Changelog diff --git a/.github/ISSUE_TEMPLATE/release.md b/.github/ISSUE_TEMPLATE/release.md index 72e8508151..d222995500 100644 --- a/.github/ISSUE_TEMPLATE/release.md +++ b/.github/ISSUE_TEMPLATE/release.md @@ -12,16 +12,14 @@ Action items Pre-release --- -- [ ] Look out for ModelDB regressions by analyzing [nrn-modeldb-ci last version vs nightly reports](https://github.com/neuronsimulator/nrn-modeldb-ci/actions/workflows/nrn-modeldb-ci.yaml?query=event%3Aschedule++) -- [ ] Create CoreNEURON release branch, update semantic version in `CMakeLists.txt`, tag it & update submodule in NEURON +- [ ] Make sur to look out for ModelDB regressions by launching analyzing [nrn-modeldb-ci last version vs nightly reports](https://github.com/neuronsimulator/nrn-modeldb-ci/actions/workflows/nrn-modeldb-ci.yaml?query=event%3Aschedule++) Sanity checks --- - [ ] Create `release/x.y` branch and make sure GitHub, Azure and CircleCI builds pass -- [ ] Run [nrn-build-ci](https://github.com/neuronsimulator/nrn-build-ci/actions/workflows/build-neuron.yml) for the respective Azure build; see [Azure drop guide](https://github.com/neuronsimulator/nrn-build-ci#azure-wheels-testing---manual-workflow) +- [ ] Run [nrn-build-ci](https://github.com/neuronsimulator/nrn-build-ci/actions/workflows/build-neuron.yml) for the respective Azure build; see [Azure drop guide](https://github.com/neuronsimulator/nrn-build-ci#azure-wheels-testing---manual-workflow) - [ ] Activate ReadTheDocs build for `release/x.y` & make it hidden. Check docs are fine after build is done. -- [ ] Run BBP Simulation Stack, CoreNEURON CI & other relevant tests -- [ ] Build release wheels but WITHOUT upload ([see details](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure)) +- [ ] Run BBP Simulation Stack & other relevant tests Releasing @@ -30,26 +28,29 @@ Releasing - [ ] Update changelog below and agree on it with everyone; then commit it to `docs/changelog` (copy structure as-is) - [ ] Update `docs/index.rst` accordingly with the new `.pkg` and `.exe` links for `PKG installer` and `Windows Installer` - [ ] Run the ReadTheDocs build again for `release-x.y`, make sure the build passes and inspect the Changelog page. -- [ ] Create new release+tag on GitHub via [release workflow](https://github.com/neuronsimulator/nrn/actions/workflows/release.yml?query=workflow%3A%22NEURON+Release%22). Note that the GitHub release will be marked as pre-release. -- [ ] Create, test and upload manual artifacts +- [ ] Create new release+tag on GitHub via [release workflow](https://github.com/neuronsimulator/nrn/actions/workflows/release.yml?query=workflow%3A%22NEURON+Release%22). Note that the GitHub release will be marked as pre-release and will contain the full-src-package and the Windows installer at the end of the release workflow. +- [ ] Build release wheels but WITHOUT upload ([see details](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure)) +- [ ] Create, test and upload manual artifacts - [ ] MacOS package installer (manual task, ask Michael) - [ ] arm64 wheels (manual task, check with Alex or Pramod) - [ ] aarch64 wheels (create a `release/x.y-aarch64` branch for this, see [guide](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-circleci)) - [ ] Publish the `x.y.z` wheels on Pypi; see [wheel publishing instructions](https://nrn.readthedocs.io/en/latest/install/python_wheels.html#publishing-the-wheels-on-pypi-via-azure) - [ ] Once wheels are published, activate the `x.y.z` tag on ReadTheDocs -- [ ] Upload Windows installer from the wheels publishing Azure run (to get correct tag) +- [ ] Rename the Windows installer in the GitHub release to match the new version and the supported python versions (i.e. `nrn-8.2.2.w64-mingw-py-37-38-39-310-311-setup.exe` +) - [ ] Publish release on GitHub (edit https://github.com/neuronsimulator/nrn/releases/tag/x.y.z and un-tick the pre-release checkbox) Post-release --- - [ ] To mark the start of a new development cycle, tag `master` as follows: - - minor version: `x.(y+1).dev` + - minor version: `x.(y+1).dev` - major version: `(x+1).0.dev` - [ ] Deactivate ReadTheDocs build for `release/x.y` -- [ ] Go to [ReadTheDocs advanced settings](https://readthedocs.org/dashboard/nrn/advanced/) and set `Default version` to `x.y.z` +- [ ] Go to [ReadTheDocs advanced settings](https://readthedocs.org/dashboard/nrn/advanced/) and set `Default version` to `x.y.z` - [ ] Let people know :rocket: - [ ] Cherrypick changelog and installer links to `master` +- [ ] Update the changelog for the release on GitHub Changelog ====== diff --git a/.github/problem-matchers/thread.json b/.github/problem-matchers/thread.json new file mode 100644 index 0000000000..d76f493c80 --- /dev/null +++ b/.github/problem-matchers/thread.json @@ -0,0 +1,15 @@ + +{ + "problemMatcher": [ + { + "owner": "tsan-problem-matcher", + "severity": "warning", + "pattern": [ + { + "regexp": "^.*ThreadSanitizer: (.*)$", + "message": 1 + } + ] + } + ] +} diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index e16d57931c..d7c273564c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -5,6 +5,7 @@ concurrency: cancel-in-progress: true on: + merge_group: push: branches: - master @@ -21,6 +22,7 @@ on: env: PY_MIN_VERSION: '3.8' + PY_MID_VERSION: '3.10' PY_MAX_VERSION: '3.11' jobs: @@ -29,7 +31,7 @@ jobs: name: Code Coverage - timeout-minutes: 45 + timeout-minutes: 60 env: DISPLAY: ${{ ':0' }} @@ -40,7 +42,7 @@ jobs: - name: Install apt packages run: | - sudo apt-get install xfonts-100dpi build-essential doxygen lcov libboost-all-dev libopenmpi-dev libmpich-dev libx11-dev libxcomposite-dev mpich openmpi-bin patchelf gpg + sudo apt-get install xfonts-100dpi build-essential doxygen lcov libboost-all-dev libopenmpi-dev libmpich-dev libx11-dev libxcomposite-dev mpich openmpi-bin gpg ninja-build flex bison libfl-dev shell: bash - name: Setup Caliper profiler @@ -55,7 +57,7 @@ jobs: run: | python3 -m venv music-venv source music-venv/bin/activate - python3 -m pip install mpi4py cython numpy + python3 -m pip install mpi4py "cython<3" numpy sudo mkdir -p $MUSIC_INSTALL_DIR sudo chown -R $USER $MUSIC_INSTALL_DIR curl -L -o MUSIC.zip https://github.com/INCF/MUSIC/archive/refs/tags/${MUSIC_VERSION}.zip @@ -64,19 +66,24 @@ jobs: ./configure --with-python-sys-prefix --prefix=$MUSIC_INSTALL_DIR --disable-anysource make -j install deactivate - working-directory: ${{runner.temp}} + working-directory: ${{runner.temp}} - name: Setup Xvfb run: | sudo apt-get install xvfb sudo /usr/bin/Xvfb $DISPLAY -screen 0 1600x1200x24 -noreset -nolock -shmem & # run in bg - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 2 + - name: Clone nmodl + working-directory: ${{runner.workspace}}/nrn + run: | + git submodule update --init --recursive --force --depth 1 -- external/nmodl + - name: Set up Python@${{ env.PY_MIN_VERSION }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PY_MIN_VERSION }} @@ -84,9 +91,16 @@ jobs: working-directory: ${{runner.workspace}}/nrn run: | python -m pip install --upgrade pip -r nrn_requirements.txt + python -m pip install --upgrade -r external/nmodl/requirements.txt + python -m pip install --upgrade -r ci_requirements.txt + + - name: Set up Python@${{ env.PY_MID_VERSION }} + uses: actions/setup-python@v5 + with: + python-version: ${{ env.PY_MID_VERSION }} - name: Set up Python@${{ env.PY_MAX_VERSION }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PY_MAX_VERSION }} @@ -94,6 +108,9 @@ jobs: working-directory: ${{runner.workspace}}/nrn run: | python -m pip install --upgrade pip -r nrn_requirements.txt + python -m pip install --upgrade -r external/nmodl/requirements.txt + python -m pip install --upgrade -r ci_requirements.txt + - name: Build & Test id: build-test @@ -108,23 +125,39 @@ jobs: # Python setup export PYTHON_MIN=$(which $PYTHON_MIN_NAME); + export PYTHON_MID=$(which $PYTHON_MID_NAME); export PYTHON_MAX=$(which $PYTHON_MAX_NAME); mkdir build && cd build; # CMake options & flags - export COVERAGE_FLAGS="--coverage -O0 -fno-inline -g"; - export CMAKE_OPTION="-DNRN_ENABLE_MPI=ON -DNRN_ENABLE_INTERVIEWS=ON -DNRN_ENABLE_PYTHON=ON -DNRN_ENABLE_PYTHON_DYNAMIC=ON -DNRN_PYTHON_DYNAMIC=${PYTHON_MIN};${PYTHON_MAX} -DNRN_ENABLE_CORENEURON=ON -DNRN_ENABLE_PROFILING=ON -DNRN_ENABLE_BACKTRACE=ON -DNRN_ENABLE_MUSIC=ON -DCMAKE_PREFIX_PATH=${MUSIC_INSTALL_DIR} -DMUSIC_ROOT=${MUSIC_INSTALL_DIR}"; - cmake $CMAKE_OPTION -DCMAKE_C_COMPILER=$CC -DCMAKE_CXX_COMPILER=$CXX -DNRN_ENABLE_TESTS=ON -DNRN_ENABLE_PERFORMANCE_TESTS=OFF -DCMAKE_C_FLAGS="${COVERAGE_FLAGS}" -DCMAKE_CXX_FLAGS="${COVERAGE_FLAGS}" -DCORENRN_ENABLE_UNIT_TESTS=ON ..; - + cmake_args=(-G Ninja \ + -DCMAKE_BUILD_TYPE=Debug \ + -DCMAKE_C_COMPILER="$CC" \ + -DCMAKE_CXX_COMPILER="$CXX" \ + -DNRN_ENABLE_BACKTRACE=ON \ + -DNRN_ENABLE_CORENEURON=ON \ + -DNRN_ENABLE_COVERAGE=ON \ + -DNRN_ENABLE_INTERVIEWS=ON \ + -DNRN_ENABLE_MPI=ON \ + -DNRN_ENABLE_PERFORMANCE_TESTS=OFF \ + -DNRN_ENABLE_PROFILING=ON \ + -DNRN_ENABLE_PYTHON=ON \ + -DNRN_ENABLE_PYTHON_DYNAMIC=ON \ + -DNRN_PYTHON_DYNAMIC="${PYTHON_MIN};${PYTHON_MAX}" \ + -DNRN_PYTHON_EXTRA_FOR_TESTS=${PYTHON_MID} \ + -DNRN_ENABLE_TESTS=ON \ + -DNRN_ENABLE_MUSIC=ON \ + -DCMAKE_PREFIX_PATH="${MUSIC_INSTALL_DIR}" \ + -DMUSIC_ROOT="${MUSIC_INSTALL_DIR}") + cmake .. "${cmake_args[@]}" # Coverage # The Linux runners apparently have 2 cores, but jobs were being killed when we did not specify this explicitly. # https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources # By default we get a modern version of CMake that understands --parallel. cmake --build . --parallel 2 - (cd ..; lcov --capture --initial --directory . --no-external --output-file build/coverage-base.info) - export PATH=`pwd`/bin:$PATH; - xvfb-run ctest -VV --output-on-failure; + cmake --build . --target cover_baseline + xvfb-run ctest --rerun-failed --output-on-failure; for python in "${PYTHON_MIN}" "${PYTHON_MAX}" do echo "Using ${python}" @@ -135,12 +168,12 @@ jobs: DYLD_LIBRARY_PATH="${PWD}/lib:${DYLD_LIBRARY_PATH}" \ "${python}" -c "from neuron import h; import neuron; neuron.test();neuron.test_rxd();" done - (cd ..; lcov --capture --directory . --no-external --output-file build/coverage-run.info) - lcov --add-tracefile coverage-base.info --add-tracefile coverage-run.info --output-file coverage-combined.info - + cmake --build . --target cover_collect + cmake --build . --target cover_combine env: MATRIX_EVAL: "CC=gcc CXX=g++" PYTHON_MIN_NAME: "python${{ env.PY_MIN_VERSION }}" + PYTHON_MID_NAME: "python${{ env.PY_MID_VERSION }}" PYTHON_MAX_NAME: "python${{ env.PY_MAX_VERSION }}" # This step will set up an SSH connection on tmate.io for live debugging. @@ -151,7 +184,7 @@ jobs: if: failure() && contains(github.event.pull_request.title, 'live-debug-coverage') uses: mxschmitt/action-tmate@v3 - - uses: codecov/codecov-action@v3 + - uses: codecov/codecov-action@v4 with: directory: ./build fail_ci_if_error: true diff --git a/.github/workflows/docs.yml b/.github/workflows/docs.yml index 0b5860298f..ffc24629e8 100644 --- a/.github/workflows/docs.yml +++ b/.github/workflows/docs.yml @@ -5,6 +5,7 @@ concurrency: cancel-in-progress: true on: + merge_group: push: branches: - master @@ -23,23 +24,23 @@ jobs: name: Documentation - timeout-minutes: 20 + timeout-minutes: 25 steps: - name: Install apt packages run: | sudo apt-get update --fix-missing - sudo apt-get install build-essential libopenmpi-dev libmpich-dev libx11-dev libxcomposite-dev mpich openmpi-bin patchelf + sudo apt-get install build-essential libopenmpi-dev libmpich-dev libx11-dev libxcomposite-dev mpich openmpi-bin sudo apt-get install ffmpeg doxygen pandoc shell: bash - name: Set up Python@${{ env.DEFAULT_PY_VERSION }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.DEFAULT_PY_VERSION }} - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Install Python dependencies working-directory: ${{runner.workspace}}/nrn diff --git a/.github/workflows/external.yml b/.github/workflows/external.yml index 9b877be67b..088168c161 100644 --- a/.github/workflows/external.yml +++ b/.github/workflows/external.yml @@ -1,6 +1,6 @@ name: External CIs -concurrency: +concurrency: group: ${{ github.workflow }}#${{ github.ref }} cancel-in-progress: true @@ -10,7 +10,7 @@ on: env: PR_URL: ${{ github.event.pull_request.html_url }} GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} - + jobs: get-last-azure-url: runs-on: ubuntu-latest @@ -19,7 +19,7 @@ jobs: azure_drop_url: ${{ steps.drop.outputs.azure_drop_url }} pr_azure_sha: ${{ steps.drop.outputs.pr_azure_sha }} steps: - - id: drop + - id: drop run: | # use jq to get the last Azure drop URL from the PR and the SHA1 from the same body export pr_json=$(gh pr view $PR_URL --json comments -q 'last(.comments[] .body | capture(".*(?[0-9a-f]{40}).*?(?https://dev.azure.com/neuronsimulator/.*=zip)"))') @@ -32,7 +32,7 @@ jobs: export pr_azure_sha=$(echo $pr_json | jq -r .pr_azure_sha) echo azure_drop_url=$azure_drop_url >> $GITHUB_OUTPUT echo pr_azure_sha=$pr_azure_sha >> $GITHUB_OUTPUT - + - id: remove-label if: always() run: | @@ -47,7 +47,7 @@ jobs: env: pr_azure_sha: ${{ steps.drop.outputs.pr_azure_sha }} azure_drop_url: ${{ steps.drop.outputs.azure_drop_url }} - + nrn-modeldb-ci: needs: get-last-azure-url uses: neuronsimulator/nrn-modeldb-ci/.github/workflows/nrn-modeldb-ci.yaml@master @@ -56,7 +56,7 @@ jobs: neuron_v2: neuron-nightly pr-update: - needs: + needs: - nrn-modeldb-ci - get-last-azure-url runs-on: ubuntu-latest @@ -68,4 +68,3 @@ jobs: env: ARTIFACTS_URL: ${{ github.server_url }}/${{ github.repository }}/actions/runs/${{ github.run_id }} pr_azure_sha: ${{ needs.get-last-azure-url.outputs.pr_azure_sha }} - diff --git a/.github/workflows/formatting.yml b/.github/workflows/formatting.yml index 1da74bbee4..8b6dbf62db 100644 --- a/.github/workflows/formatting.yml +++ b/.github/workflows/formatting.yml @@ -1,12 +1,13 @@ name: Check formatting -concurrency: +concurrency: group: ${{ github.workflow }}#${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true on: + merge_group: push: - branches: + branches: - release/** pull_request: branches: @@ -19,10 +20,31 @@ jobs: runs-on: ubuntu-22.04 timeout-minutes: 5 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 - name: Update submodule working-directory: ${{runner.workspace}}/nrn run: git submodule update --init external/coding-conventions - name: Check formatting working-directory: ${{runner.workspace}}/nrn run: external/coding-conventions/bin/format -v --dry-run + + # If formatting fails, apply formatting and push changes. + # This will trigger another workflow run, which will cancel the current one. + - name: Apply formatting + working-directory: ${{runner.workspace}}/nrn + if: failure() && github.event_name == 'pull_request' + run: | + # Checkout PR + gh pr checkout ${{ github.event.pull_request.number }} + + # Apply formatting + external/coding-conventions/bin/format -v + + # Commit & push changes + git config --global user.name "github-actions[bot]" + git config --global user.email "41898282+github-actions[bot]@users.noreply.github.com" + git add -u :/ + git commit -a -m "Fix formatting" + git push + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.github/workflows/neuron-ci.yml b/.github/workflows/neuron-ci.yml index 481cda2d46..0c3ecc14a5 100644 --- a/.github/workflows/neuron-ci.yml +++ b/.github/workflows/neuron-ci.yml @@ -1,12 +1,13 @@ name: NEURON CI -concurrency: +concurrency: group: ${{ github.workflow }}#${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true on: + merge_group: push: - branches: + branches: # If nothing else, this is important for the ccache logic below... - master - release/** @@ -27,54 +28,62 @@ jobs: name: ${{ matrix.os }} - ${{ matrix.config.build_mode }} (${{ matrix.config.cmake_option }}${{ matrix.config.config_options }}${{ matrix.config.matrix_eval }}${{ matrix.config.sanitizer }}) - timeout-minutes: 45 + timeout-minutes: 75 env: INSTALL_DIR: install SDK_ROOT: $(xcrun --sdk macosx --show-sdk-path) SKIP_WHEELHOUSE_REPAIR: true BUILD_TYPE: Release - DESIRED_CMAKE_VERSION: 3.15.0 + DESIRED_CMAKE_VERSION: 3.17 + DYNAMIC_PYTHON_CMAKE_VERSION: 3.18 PY_MIN_VERSION: ${{ matrix.config.python_min_version || '3.8' }} PY_MAX_VERSION: ${{ matrix.config.python_max_version || '3.11' }} MUSIC_INSTALL_DIR: /opt/MUSIC - MUSIC_VERSION: 1.2.0 + MUSIC_VERSION: 1.2.1 strategy: matrix: - os: [ macOS-11, ubuntu-20.04] + os: [ macOS-12, ubuntu-20.04] config: - { matrix_eval : "CC=gcc-9 CXX=g++-9", build_mode: "setuptools"} - - { matrix_eval : "CC=gcc-8 CXX=g++-8", build_mode: "cmake", music: ON} - - { matrix_eval : "CC=gcc-9 CXX=g++-9", build_mode: "cmake", python_dynamic: ON} - - { matrix_eval : "CC=gcc-7 CXX=g++-7" , build_mode: "cmake", cmake_option: "-DNRN_ENABLE_CORENEURON=ON"} - - { matrix_eval : "CC=gcc-7 CXX=g++-7", build_mode: "cmake", cmake_option: "-DNRN_ENABLE_MPI=OFF -DNRN_ENABLE_INTERVIEWS=OFF -DNRN_ENABLE_CORENEURON=ON"} + - { matrix_eval : "CC=gcc-10 CXX=g++-10", build_mode: "cmake", music: ON} + - { matrix_eval : "CC=gcc-10 CXX=g++-10", build_mode: "cmake", python_dynamic: ON} + - { matrix_eval : "CC=gcc-9 CXX=g++-9" , build_mode: "cmake", cmake_option: "-DNRN_ENABLE_CORENEURON=ON"} + - { matrix_eval : "CC=gcc-9 CXX=g++-9", build_mode: "cmake", cmake_option: "-DNRN_ENABLE_MPI=OFF -DNRN_ENABLE_INTERVIEWS=OFF -DNRN_ENABLE_CORENEURON=ON"} - { matrix_eval : "CC=gcc-10 CXX=g++-10", build_mode: "cmake", cmake_option: "-DNRN_ENABLE_PYTHON=OFF -DNRN_ENABLE_RX3D=OFF -DNRN_ENABLE_CORENEURON=ON"} include: - os: ubuntu-22.04 config: build_mode: cmake - # TODO: -DCORENRN_ENABLE_NMODL=ON -DNMODL_SANITIZERS=undefined cmake_option: -DNRN_ENABLE_CORENEURON=ON - -DNRN_ENABLE_INTERVIEWS=OFF + -DNRN_ENABLE_INTERVIEWS=OFF -DNMODL_SANITIZERS=undefined flag_warnings: ON sanitizer: undefined - os: ubuntu-22.04 config: build_mode: cmake - # TODO: -DCORENRN_ENABLE_NMODL=ON -DNMODL_SANITIZERS=address,leak # TODO: CoreNEURON is only LeakSanitizer-clean if we disable MPI cmake_option: -DNRN_ENABLE_CORENEURON=ON - -DNRN_ENABLE_INTERVIEWS=OFF + -DNRN_ENABLE_INTERVIEWS=OFF -DNMODL_SANITIZERS=address # TODO: address-leak is the dream, but there are many problems, # including external ones from the MPI implementations sanitizer: address - - os: macOS-12 + - os: ubuntu-22.04 + config: + build_mode: cmake + # Cannot use a non-instrumented OpenMP with TSan, and we don't + # have a TSan-instrumented OpenMP runtime available. + # TODO: debug RX3D + TSan + cmake_option: -DNRN_ENABLE_CORENEURON=ON -DNRN_ENABLE_MPI=OFF + -DCORENRN_ENABLE_OPENMP=OFF -DNRN_ENABLE_RX3D=OFF + sanitizer: thread + - os: macOS-13 config: build_mode: cmake # TODO: investigate rxd test timeouts in this build and re-enable them cmake_option: -DNRN_ENABLE_CORENEURON=ON -DNRN_ENABLE_INTERVIEWS=OFF - -DNRN_ENABLE_RX3D=OFF + -DNRN_ENABLE_RX3D=OFF -DNMODL_SANITIZERS=address sanitizer: address fail-fast: false @@ -82,16 +91,20 @@ jobs: - name: Setup cmake uses: jwlawson/actions-setup-cmake@v1 with: - cmake-version: ${{ env.DESIRED_CMAKE_VERSION }} + cmake-version : ${{(matrix.config.python_dynamic || matrix.config.build_mode == 'setuptools') && env.DYNAMIC_PYTHON_CMAKE_VERSION || env.DESIRED_CMAKE_VERSION}} - name: Install homebrew packages if: startsWith(matrix.os, 'macOS') run: | - brew install ccache coreutils doxygen flex mpich ninja xz autoconf autoconf automake libtool + # Unlink and re-link to prevent errors when GitHub macOS runner images + # install Python outside of brew; See actions/setup-python#577 and BlueBrain/libsonata/pull/317 + brew list -1 | grep python | while read formula; do brew unlink $formula; brew link --overwrite $formula; done + brew install ccache coreutils doxygen flex bison mpich ninja xz autoconf autoconf automake libtool + # We use both for dynamic mpi in nrn brew unlink mpich brew install openmpi brew install --cask xquartz - echo /usr/local/opt/flex/bin >> $GITHUB_PATH + echo /usr/local/opt/flex/bin:/usr/local/opt/bison/bin >> $GITHUB_PATH # Core https://docs.github.com/en/actions/using-github-hosted-runners/about-github-hosted-runners#supported-runners-and-hardware-resources echo CMAKE_BUILD_PARALLEL_LEVEL=3 >> $GITHUB_ENV echo CTEST_PARALLEL_LEVEL=3 >> $GITHUB_ENV @@ -103,7 +116,7 @@ jobs: run: | sudo apt-get install build-essential ccache libopenmpi-dev \ libmpich-dev libx11-dev libxcomposite-dev mpich ninja-build \ - openmpi-bin patchelf + openmpi-bin flex libfl-dev bison # The sanitizer builds use ubuntu 22.04 if [[ "${{matrix.os}}" == "ubuntu-20.04" ]]; then sudo apt-get install g++-7 g++-8 @@ -114,13 +127,18 @@ jobs: echo CI_OS_NAME=linux >> $GITHUB_ENV shell: bash - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: fetch-depth: 2 + - name: Clone nmodl + working-directory: ${{runner.workspace}}/nrn + run: | + git submodule update --init --recursive --force --depth 1 -- external/nmodl + - name: Set up Python@${{ env.PY_MIN_VERSION }} if: ${{matrix.config.python_dynamic == 'ON'}} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PY_MIN_VERSION }} @@ -129,9 +147,11 @@ jobs: working-directory: ${{runner.workspace}}/nrn run: | python -m pip install --upgrade pip -r nrn_requirements.txt + python -m pip install --upgrade -r external/nmodl/requirements.txt + python -m pip install --upgrade -r ci_requirements.txt - name: Set up Python@${{ env.PY_MAX_VERSION }} - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: ${{ env.PY_MAX_VERSION }} @@ -139,22 +159,25 @@ jobs: working-directory: ${{runner.workspace}}/nrn run: | python -m pip install --upgrade pip -r nrn_requirements.txt + python -m pip install --upgrade -r external/nmodl/requirements.txt + python -m pip install --upgrade -r ci_requirements.txt - name: Setup MUSIC@${{ env.MUSIC_VERSION }} if: matrix.config.music == 'ON' run: | python3 -m venv music-venv source music-venv/bin/activate - python3 -m pip install mpi4py cython numpy + python3 -m pip install mpi4py "cython<3" numpy sudo mkdir -p $MUSIC_INSTALL_DIR sudo chown -R $USER $MUSIC_INSTALL_DIR curl -L -o MUSIC.zip https://github.com/INCF/MUSIC/archive/refs/tags/${MUSIC_VERSION}.zip unzip MUSIC.zip && mv MUSIC-* MUSIC && cd MUSIC ./autogen.sh - ./configure --with-python-sys-prefix --prefix=$MUSIC_INSTALL_DIR --disable-anysource + # on some systems MPI library detection fails, provide exact flags/compilers + ./configure --with-python-sys-prefix --prefix=$MUSIC_INSTALL_DIR --disable-anysource MPI_CXXFLAGS="-g -O3" MPI_CFLAGS="-g -O3" MPI_LDFLAGS=" " CC=mpicc CXX=mpicxx make -j install deactivate - working-directory: ${{runner.temp}} + working-directory: ${{runner.temp}} - name: Register gcc problem matcher if: ${{matrix.config.flag_warnings == 'ON'}} @@ -175,12 +198,12 @@ jobs: # Workaround for https://github.com/actions/cache/issues/92 - name: Checkout cache action - uses: actions/checkout@v3 + uses: actions/checkout@v4 with: repository: actions/cache ref: v3 path: tmp/actions/cache - + - name: Make actions/cache@v3 run even on failure run: | sed -i'.bak' -e '/ post-if: /d' tmp/actions/cache/action.yml @@ -244,10 +267,18 @@ jobs: # Sanitizer-specific setup if [[ -n "${{matrix.config.sanitizer}}" ]]; then if [ "$RUNNER_OS" == "Linux" ]; then - CC=$(command -v clang-14) - CXX=$(command -v clang++-14) - symbolizer_path=$(realpath $(command -v llvm-symbolizer-14)) - cmake_args+=(-DLLVM_SYMBOLIZER_PATH="${symbolizer_path}") + if [[ "${{matrix.config.sanitizer}}" == "thread" ]]; then + # GitHub/ubuntu-22.04 + clang-14 seems to have problems with TSan. + # Vanilla 22.04 + clang-16 from apt.llvm.org seemed to work. + # Use gcc-12 instead, as GitHub/ubuntu-22.04 already has it. + CC=$(command -v gcc-12) + CXX=$(command -v g++-12) + else + CC=$(command -v clang-14) + CXX=$(command -v clang++-14) + symbolizer_path=$(realpath $(command -v llvm-symbolizer-14)) + cmake_args+=(-DLLVM_SYMBOLIZER_PATH="${symbolizer_path}") + fi fi cmake_args+=(-DCMAKE_BUILD_TYPE=Custom \ -DCMAKE_C_FLAGS="-O1 -g" \ @@ -305,6 +336,7 @@ jobs: ccache -vs 2>/dev/null || ccache -s if [ "$RUNNER_OS" == "macOS" ] then + mkdir -p src/nrnpython echo $'[install]\nprefix='>src/nrnpython/setup.cfg; fi if [[ "$NRN_ENABLE_PYTHON_DYNAMIC" == "ON" ]]; then @@ -388,3 +420,19 @@ jobs: - name: live debug session on failure (manual steps required, check `.github/neuron-ci.yml`) if: failure() && contains(github.event.pull_request.title, 'live-debug-ci') uses: mxschmitt/action-tmate@v3 + + # see https://github.com/orgs/community/discussions/26822 + final: + name: Final CI + needs: [ci] + if: ${{ always() }} + runs-on: ubuntu-latest + steps: + - name: Check ci matrix all done + if: >- + ${{ + contains(needs.*.result, 'failure') + || contains(needs.*.result, 'cancelled') + || contains(needs.*.result, 'skipped') + }} + run: exit 1 diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml index 44e8978313..3ba15345b3 100644 --- a/.github/workflows/release.yml +++ b/.github/workflows/release.yml @@ -23,8 +23,9 @@ jobs: name: tag-n-release ${{ github.event.inputs.rel_tag }} (${{ github.event.inputs.rel_branch }}) outputs: release_url: ${{ steps.create_release.outputs.upload_url }} + rel_tag: ${{ env.REL_TAG }} steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 name: Checkout branch ${{ env.REL_BRANCH }} with: ref: ${{ env.REL_BRANCH }} @@ -49,7 +50,7 @@ jobs: name: Release ${{ env.REL_TAG }} prerelease: true - full-src-package: + nrn-full-src-package: runs-on: ubuntu-latest needs: tag-n-release steps: @@ -71,18 +72,20 @@ jobs: cmake -DNRN_ENABLE_PYTHON=OFF -DNRN_ENABLE_RX3D=OFF -DNRN_ENABLE_MPI=OFF -DNRN_ENABLE_INTERVIEWS=OFF ../nrn make nrnversion_h VERBOSE=1 - - name: Create full-src-package + - name: Create nrn-full-src-package id: tar run: | - tar -czvf ${REL_TAG}.tar.gz nrn - echo "asset_file=${REL_TAG}.tar.gz" >> $GITHUB_OUTPUT + tar -czvf nrn-full-src-package-${REL_TAG}.tar.gz nrn + echo "asset_file=nrn-full-src-package-${REL_TAG}.tar.gz" >> $GITHUB_OUTPUT - - name: Upload full-src-package to release - uses: actions/upload-release-asset@v1 - with: - upload_url: ${{ needs.tag-n-release.outputs.release_url }} - asset_name: ${{ github.job }}-${{ steps.tar.outputs.asset_file }} - asset_content_type: application/gzip - asset_path: ${{ steps.tar.outputs.asset_file }} + - name: Upload nrn-full-src-package to release + run: | + gh release upload ${{ needs.tag-n-release.outputs.rel_tag }} ${{ steps.tar.outputs.asset_file }} env: GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} + + windows-installer: + needs: tag-n-release + uses: neuronsimulator/nrn/.github/workflows/windows.yml@master + with: + tag: ${{ needs.tag-n-release.outputs.rel_tag }} diff --git a/.github/workflows/windows.yml b/.github/workflows/windows.yml index eb5fe2bf11..052a0d6679 100644 --- a/.github/workflows/windows.yml +++ b/.github/workflows/windows.yml @@ -1,12 +1,21 @@ name: Windows Installer -concurrency: +concurrency: group: ${{ github.workflow }}#${{ github.ref }} - cancel-in-progress: true + cancel-in-progress: true on: + merge_group: + workflow_call: + inputs: + tag: + description: 'Release version (tag name)' + default: '' + required: true + type: string push: - branches: + branches: + - master - release/** pull_request: branches: @@ -26,9 +35,9 @@ jobs: timeout-minutes: 45 steps: - - uses: actions/checkout@v3 + - uses: actions/checkout@v4 with: - fetch-depth: 0 + ref: ${{ inputs.tag }} - name: Retrieve rxd test data run: | @@ -37,7 +46,7 @@ jobs: working-directory: ${{runner.workspace}}\nrn - name: Set up Python3 - uses: actions/setup-python@v4 + uses: actions/setup-python@v5 with: python-version: '3.11' @@ -46,12 +55,12 @@ jobs: .\nrn\ci\win_download_deps.cmd shell: powershell working-directory: ${{runner.workspace}} - + - name: Install Dependencies run: .\nrn\ci\win_install_deps.cmd shell: powershell working-directory: ${{runner.workspace}} - + - name: Build and Create Installer run: | rm.exe C:\WINDOWS\system32\bash.EXE @@ -70,7 +79,7 @@ jobs: uses: mxschmitt/action-tmate@v3 - name: Upload build artifact - uses: actions/upload-artifact@v3 + uses: actions/upload-artifact@v4 with: name: nrn-nightly-AMD64.exe path: ${{runner.workspace}}\nrn\nrn-nightly-AMD64.exe @@ -84,3 +93,11 @@ jobs: run: .\ci\win_test_installer.cmd shell: cmd working-directory: ${{runner.workspace}}\nrn + + - name: Publish Release Installer + working-directory: ${{runner.workspace}}\nrn + if: inputs.tag != '' + run: | + gh release upload ${{ inputs.tag }} nrn-nightly-AMD64.exe + env: + GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }} diff --git a/.gitlab-ci.yml b/.gitlab-ci.yml index cc646de137..fa9dc5cf0e 100644 --- a/.gitlab-ci.yml +++ b/.gitlab-ci.yml @@ -7,7 +7,6 @@ include: - project: hpc/gitlab-upload-logs file: enable-upload.yml - # see https://gitlab.com/gitlab-org/gitlab/-/issues/263401 for why we specify the flags like this now # 130 characters seems to be the point at which jobs refuse to run .matrix: @@ -37,19 +36,38 @@ mac_m1_cmake_build: tags: - macos-arm64 script: - - python3 -m virtualenv venv + # Using macOS + ASan + virtual environments requires that you remove shims + # before creating the venv. CMake would tell us this if we got it wrong. + # See sanitizers.cmake for more information. If we find this gets copied to + # too many different places, put the .py script in the repository. + - | + cat > resolve_shim.py << END_SCRIPT + import ctypes + dyld = ctypes.cdll.LoadLibrary('/usr/lib/system/libdyld.dylib') + namelen = ctypes.c_ulong(1024) + name = ctypes.create_string_buffer(b'\\000', namelen.value) + dyld._NSGetExecutablePath(ctypes.byref(name), ctypes.byref(namelen)) + print(name.value.decode()) + END_SCRIPT + - real_python=$(python3 resolve_shim.py) + - echo "python3=$(command -v python3) is really ${real_python}" + - PYTHONEXECUTABLE=${real_python} ${real_python} -mvenv venv + - venv/bin/python3 -m ensurepip --upgrade --default-pip - venv/bin/pip install --upgrade pip -r nrn_requirements.txt - - venv/bin/python --version - - 'venv/bin/python -c "import os,matplotlib; f = open(os.path.join(os.path.dirname(matplotlib.__file__), \"mpl-data/matplotlibrc\"),\"a\"); f.write(\"backend: TkAgg\");f.close();"' + - git submodule update --init --recursive --force --depth 1 -- external/nmodl + - venv/bin/pip install --upgrade -r external/nmodl/requirements.txt + - source ./venv/bin/activate + - export PYTHON=${PWD}/venv/bin/python + - ${PYTHON} --version + - '${PYTHON} -c "import os,matplotlib; f = open(os.path.join(os.path.dirname(matplotlib.__file__), \"mpl-data/matplotlibrc\"),\"a\"); f.write(\"backend: TkAgg\");f.close();"' - 'export CXX=${CXX:-g++}' - 'export CC=${CC:-gcc}' - - export PYTHON=$(pwd)/venv/bin/python3 + - brew install flex bison + - export PATH="/opt/homebrew/opt/flex/bin:/opt/homebrew/opt/bison/bin:$PATH" - export INSTALL_DIR=$(pwd)/install - echo $LANG - echo $LC_ALL - - source venv/bin/activate - 'export PYTHONPATH=$(${PYTHON} -c "import site; print(\":\".join(site.getsitepackages()))")' - - 'export PYTHONPATH=$PYTHONPATH:$INSTALL_DIR/lib/python/' - ${PYTHON} -c 'import os,sys; os.set_blocking(sys.stdout.fileno(), True)' - cmake_args=(-G Ninja) - if [[ -n "${sanitizer}" ]]; then @@ -81,6 +99,7 @@ mac_m1_cmake_build: - ctest --output-on-failure - cmake --build . --target install - 'export PATH=${INSTALL_DIR}/bin:${PATH}' + - 'export PYTHONPATH=$PYTHONPATH:$INSTALL_DIR/lib/python/' - if [[ -f "${INSTALL_DIR}/bin/nrn-enable-sanitizer" ]]; then - echo --- bin/nrn-enable-sanitizer --- - cat bin/nrn-enable-sanitizer @@ -90,10 +109,7 @@ mac_m1_cmake_build: - else - echo nrn-enable-sanitizer not found, not using it - fi - - if [[ ! "${cmake_args[*]}" =~ "NRN_ENABLE_PYTHON=OFF" ]]; then - - $PYTHON --version && ${nrn_enable_sanitizer_preload} python -c 'import neuron; neuron.test()' - - fi; - - ${nrn_enable_sanitizer} neurondemo -nogui -c 'demo(4)' -c 'run()' -c 'quit()' + - $PYTHON --version && ${nrn_enable_sanitizer_preload} python -c 'import neuron; neuron.test()' - if [[ ! "${cmake_args[*]}" =~ "NRN_ENABLE_RX3D=OFF" && ! "${cmake_args[*]}" =~ "NRN_ENABLE_CORENEURON=ON" ]]; then - ${nrn_enable_sanitizer_preload} python ../share/lib/python/neuron/rxdtests/run_all.py @@ -155,7 +171,7 @@ simulation_stack: .spack_intel: variables: - SPACK_PACKAGE_COMPILER: intel + SPACK_PACKAGE_COMPILER: oneapi .spack_nvhpc: variables: SPACK_PACKAGE_COMPILER: nvhpc @@ -171,6 +187,7 @@ simulation_stack: bb5_constraint: volta bb5_cpus_per_task: 2 bb5_partition: prod # assume this is a good source of GPU nodes + bb5_exclusive: user # allocate gpu node exclusively for the CI user (to avoid errors from oversubscription) .test_neuron: extends: [.ctest] variables: @@ -190,53 +207,37 @@ build:nmodl: extends: [.build_neuron] needs: ["build:nmodl"] -.build_neuron_mod2c: - extends: [.build_neuron] - -build:neuron:mod2c:intel:shared: - extends: [.build_neuron_mod2c, .spack_intel] - variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper~gpu+coreneuron~legacy-unit~nmodl~openmp+shared+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy - build:neuron:nmodl:intel:legacy: extends: [.build_neuron_nmodl, .spack_intel] variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper~gpu+coreneuron~legacy-unit+nmodl~openmp~shared~sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy + SPACK_PACKAGE_SPEC: ~rx3d~caliper~gpu+coreneuron~legacy-unit~openmp~shared~sympy+tests build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy build:neuron:nmodl:intel:shared: extends: [.build_neuron_nmodl, .spack_intel] variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper~gpu+coreneuron~legacy-unit+nmodl~openmp+shared+sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy - -build:neuron:mod2c:nvhpc:acc:shared: - extends: [.build_neuron_mod2c, .spack_nvhpc] - variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper+gpu+coreneuron~legacy-unit~nmodl~openmp+shared+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy + SPACK_PACKAGE_SPEC: ~rx3d~caliper~gpu+coreneuron~legacy-unit~openmp+shared+sympy+tests build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy build:neuron:nmodl:nvhpc:acc:legacy: extends: [.build_neuron_nmodl, .spack_nvhpc] variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper+gpu+coreneuron~legacy-unit+nmodl~openmp~shared~sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy + SPACK_PACKAGE_SPEC: ~rx3d~caliper+gpu+coreneuron~legacy-unit~openmp~shared~sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy build:neuron:nmodl:nvhpc:acc:shared: extends: [.build_neuron_nmodl, .spack_nvhpc] variables: - SPACK_PACKAGE_SPEC: ~rx3d~caliper+gpu+coreneuron~legacy-unit+nmodl~openmp+shared+sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy + SPACK_PACKAGE_SPEC: ~rx3d~caliper+gpu+coreneuron~legacy-unit~openmp+shared+sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy build:neuron:nmodl:nvhpc:omp:legacy: extends: [.build_neuron_nmodl, .spack_nvhpc] variables: - SPACK_PACKAGE_SPEC: ~rx3d+caliper+gpu+coreneuron~legacy-unit+nmodl+openmp~shared~sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy ^caliper+cuda cuda_arch=70 + SPACK_PACKAGE_SPEC: ~rx3d+caliper+gpu+coreneuron~legacy-unit+openmp~shared~sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy ^caliper+cuda cuda_arch=70 build:neuron:nmodl:nvhpc:omp: extends: [.build_neuron_nmodl, .spack_nvhpc] variables: - SPACK_PACKAGE_SPEC: ~rx3d+caliper+gpu+coreneuron~legacy-unit+nmodl+openmp~shared+sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy ^caliper+cuda cuda_arch=70 + SPACK_PACKAGE_SPEC: ~rx3d+caliper+gpu+coreneuron~legacy-unit+openmp~shared+sympy+tests~unified build_type=FastDebug model_tests=channel-benchmark,olfactory,tqperf-heavy ^caliper+cuda cuda_arch=70 # Test NEURON -test:neuron:mod2c:intel:shared: - extends: [.test_neuron] - needs: ["build:neuron:mod2c:intel:shared"] test:neuron:nmodl:intel:legacy: extends: [.test_neuron] @@ -246,10 +247,6 @@ test:neuron:nmodl:intel:shared: extends: [.test_neuron] needs: ["build:neuron:nmodl:intel:shared"] -test:neuron:mod2c:nvhpc:acc:shared: - extends: [.test_neuron, .gpu_node] - needs: ["build:neuron:mod2c:nvhpc:acc:shared"] - test:neuron:nmodl:nvhpc:acc:legacy: extends: [.test_neuron, .gpu_node] needs: ["build:neuron:nmodl:nvhpc:acc:legacy"] @@ -265,3 +262,60 @@ test:neuron:nmodl:nvhpc:omp:legacy: test:neuron:nmodl:nvhpc:omp: extends: [.test_neuron, .gpu_node] needs: ["build:neuron:nmodl:nvhpc:omp"] + + +# Container building +mac_m1_container_build: + stage: .pre + tags: + - macos-arm64 + script: + - if [ -z "${ARM64_IMAGE_TAG}" ]; then + - echo "Please set the ARM64_IMAGE_TAG variable" + - exit 1 + - fi + - cd packaging/python + - echo "Replacing symlinks with their targets to keep podman happy" + - find . -type l -exec cp $(realpath {}) ./TEMP \; -exec rm {} \; -exec mv TEMP {} \; + - ls -l + - export BUILDAH_FORMAT=docker # enables ONBUILD instructions which are not OCI compatible + - machine_status=$(podman machine inspect | awk '/State/ {print $2}' | tr -d '",') + # If you start the machine yourself, make sure BUILDAH_FORMAT and the http proxy variables are set in the shell before doing so! + - if [[ "${machine_status}" != "running" ]]; then + - echo "Machine is in ${machine_status} status - starting" + - podman machine start + - fi + - podman build -t neuronsimulator/neuron_wheel:${ARM64_IMAGE_TAG} --build-arg MANYLINUX_IMAGE=manylinux2014_aarch64 -f Dockerfile . + - podman login -u ${DOCKER_HUB_USER} -p ${DOCKER_HUB_AUTH_TOKEN} docker.io + - podman push neuronsimulator/neuron_wheel:${ARM64_IMAGE_TAG} + - podman rmi localhost/neuronsimulator/neuron_wheel:${ARM64_IMAGE_TAG} + rules: + - if: $CI_PIPELINE_SOURCE == "web" + when: manual + +x86_64_container_build: + stage: .pre + image: + name: quay.io/buildah/stable + entrypoint: [""] + variables: + KUBERNETES_CPU_LIMIT: 4 + KUBERNETES_CPU_REQUEST: 2 + KUBERNETES_MEMORY_LIMIT: 8Gi + KUBERNETES_MEMORY_REQUEST: 4Gi + tags: + - kubernetes + rules: + - if: $CI_PIPELINE_SOURCE == "web" + when: manual + script: + - if [ -z "${X86_IMAGE_TAG}" ]; then + - echo "Please set the X86_IMAGE_TAG variable" + - exit 1 + - fi + - export STORAGE_DRIVER=vfs # allows to build inside containers without additional mounts + - export BUILDAH_FORMAT=docker # enables ONBUILD instructions which are not OCI compatible + - cd packaging/python + - buildah bud --iidfile image_id -t neuronsimulator/neuron_wheel:${X86_IMAGE_TAG} -f Dockerfile . + - buildah login -u ${DOCKER_HUB_USER} -p ${DOCKER_HUB_AUTH_TOKEN} docker.io + - buildah push $((double vector[2] const&) +implicit-integer-sign-change:double vector[2] Eigen::internal::pnegate(double vector[2] const&) +implicit-integer-sign-change:Eigen::internal::Packet1cd Eigen::internal::pconj(Eigen::internal::Packet1cd const&) +implicit-integer-sign-change:Eigen::internal::Packet1cd Eigen::internal::pmul(Eigen::internal::Packet1cd const&, Eigen::internal::Packet1cd const&) implicit-integer-sign-change:fgets_unlimited_nltrans(HocStr*, _IO_FILE*, int) implicit-integer-sign-change:invlfiresha.c implicit-integer-sign-change:ivRegexp::Search(char const*, int, int, int) @@ -12,6 +15,8 @@ nonnull-attribute:_ode_reinit(double*) nonnull-attribute:_rhs_variable_step(double const*, double*) null:GPolyLine::brush(ivBrush const*) null:GPolyLine::color(ivColor const*) +null:ColorPalette::color(int) +null:BrushPalette::brush(int) pointer-overflow:_rhs_variable_step(double const*, double*) pointer-overflow:_rhs_variable_step_ecs(double const*, double*) pointer-overflow:coreneuron::_net_receive_kernel(double, coreneuron::Point_process*, int, double) diff --git a/CMakeLists.txt b/CMakeLists.txt index ef52a44989..85dc3cd3d6 100644 --- a/CMakeLists.txt +++ b/CMakeLists.txt @@ -74,6 +74,8 @@ option(NRN_ENABLE_RX3D "Enable rx3d support" ${NRN_ENABLE_RX3D_DEFAULT}) option(NRN_ENABLE_CORENEURON "Enable CoreNEURON support" ${NRN_ENABLE_CORENEURON_DEFAULT}) option(NRN_ENABLE_BACKTRACE "Enable pretty-printed backtraces" ${NRN_ENABLE_BACKTRACE_DEFAULT}) option(NRN_ENABLE_TESTS "Enable unit tests" ${NRN_ENABLE_TESTS_DEFAULT}) +option(NRN_ENABLE_MATH_OPT "Enable extra math optimisations (to enable SIMD)" + ${NRN_ENABLE_MATH_OPT_DEFAULT}) set(NRN_ENABLE_MODEL_TESTS "${NRN_ENABLE_MODEL_TESTS_DEFAULT}" CACHE STRING "Comma-separated list of detailed models to enable tests of.") @@ -89,18 +91,16 @@ option(NRN_AVOID_ABSOLUTE_PATHS "Avoid embedding absolute paths in generated code (ccache optimisation)" ${NRN_AVOID_ABSOLUTE_PATHS_DEFAULT}) mark_as_advanced(NRN_AVOID_ABSOLUTE_PATHS) -option(NRN_DYNAMIC_UNITS_USE_LEGACY "Use legacy units as default for dynamic units" - ${NRN_DYNAMIC_UNITS_USE_LEGACY_DEFAULT}) # note that if CoreNEURON is enabled then it is not necessary to enable this option option(NRN_ENABLE_MOD_COMPATIBILITY "Enable CoreNEURON compatibility for MOD files" ${NRN_ENABLE_MOD_COMPATIBILITY_DEFAULT}) option(NRN_ENABLE_REL_RPATH "Use relative RPATH in binaries. for relocatable installs/Python" ${NRN_ENABLE_REL_RPATH_DEFAULT}) -option(NRN_WHEEL_BUILD ${NRN_WHEEL_BUILD_DEFAULT}) +option(NRN_BINARY_DIST_BUILD ${NRN_BINARY_DIST_BUILD_DEFAULT}) option(NRN_WHEEL_STATIC_READLINE "Use static readline libraries for the wheels." ${NRN_WHEEL_STATIC_READLINE_DEFAULT}) mark_as_advanced(NRN_ENABLE_REL_RPATH) -mark_as_advanced(NRN_WHEEL_BUILD) +mark_as_advanced(NRN_BINARY_DIST_BUILD) # ============================================================================= # Build options (string) @@ -111,15 +111,27 @@ mark_as_advanced(NRN_WHEEL_BUILD) # - ON : install in ${CMAKE_INSTALL_PREFIX} (default) # NOTE: When building the wheel, this is set to OFF. # Dynamic Python version support: -# - OFF : nrnpython interface is linked into libnrniv.so -# - ON : nrnpython interface consistent with default python3 (falling back to python) -# is built and loaded dynamically at run time (nrniv still works in the absence -# of any Python at all). -# - : semicolon (;) separated list of python executable used to create a separate -# interface for each. When one of those versions of Python is launched, -# "import neuron" will automatically load the appropriate module interface along -# with the rest of neuron. Also nrniv -pyexe will work with any -# in the list of python executables. +# - NRN_ENABLE_PYTHON_DYNAMIC=OFF : libnrniv is linked against a single Python version, which +# can be steered with PYTHON_EXECUTABLE +# - NRN_ENABLE_PYTHON_DYNAMIC=ON : libnrniv is not linked against Python, distinct libnrnpython +# libraries are built for each configured Python major.minor +# version but are only linked against the relevant libpython +# if NRN_LINK_AGAINST_PYTHON is true, which is by default only +# the case on windows. +# nrniv/special still works in the absence of Python, and +# dynamically loads the appropriate libnrnpythonX.Y as needed +# - NRN_PYTHON_DYNAMIC=pythonX;... : semicolon separated list of Python versions used when +# NRN_ENABLE_PYTHON_DYNAMIC=ON, if not set then it defaults to +# PYTHON_EXECUTABLE. This must include all major.minor +# versions that you want nrniv -python to be able to load. If +# this *is* set then the first entry will be used as the +# default Python version. +# Extra options for extended Python tests: +# - NRN_PYTHON_EXTRA_FOR_TESTS=... : semicolon separated list of Python executables that NEURON +# is *not* built against. These are only used for testing, and +# there must be no overlap in major.minor versions between +# this list and the set of versions that NEURON *is* built +# against. This allows tests of failure modes. # Dynamic MPI support: # - OFF : nrnmpi is linked into libnrniv.so # - ON : nrnmpi interface consistent with default mpi is built and loaded dynamically @@ -138,12 +150,18 @@ option(NRN_ENABLE_MODULE_INSTALL ${NRN_ENABLE_MODULE_INSTALL_DEFAULT}) option(NRN_ENABLE_PYTHON_DYNAMIC "Enable dynamic Python version support" ${NRN_ENABLE_PYTHON_DYNAMIC_DEFAULT}) +option(NRN_LINK_AGAINST_PYTHON + "Link libnrnpythonX.Y against libpythonX.Y when NRN_ENABLE_PYTHON_DYNAMIC=ON" + ${NRN_LINK_AGAINST_PYTHON_DEFAULT}) set(NRN_PYTHON_DYNAMIC "" CACHE STRING "semicolon (;) separated list of python executables to create interface for (default python3)" ) +set(NRN_PYTHON_EXTRA_FOR_TESTS + "" + CACHE STRING "semicolon (;) separated list of python executables to use for testing") option(NRN_ENABLE_MPI_DYNAMIC "Enable dynamic MPI library support" OFF) set(NRN_MPI_DYNAMIC @@ -183,25 +201,6 @@ set(NRN_PROFILER "${NRN_PROFILER_DEFAULT}" CACHE STRING "Set which profiler to build against ('caliper', 'likwid')") -# ============================================================================= -# Set Python additional versions earlier (especially for old CMake) -# ============================================================================= -set(Python_ADDITIONAL_VERSIONS - 3 - 3.9 - 3.8 - 3.7 - 3.6 - 3.5 - 3.4 - 3.3 - 3.2 - 3.1 - 3.0 - 2.8 - 2.7 - 2.6) - # ============================================================================= # Include cmake modules # ============================================================================= @@ -217,7 +216,7 @@ include(cmake/modules/FindPythonModule.cmake) include(cmake/Coverage.cmake) # set CMAKE_BUILD_TYPE and associated flags using allowableBuildTypes and CMAKE_BUILD_TYPE_DEFAULT -set(allowableBuildTypes Custom Debug Release RelWithDebInfo Fast) +set(allowableBuildTypes Custom Debug Release RelWithDebInfo Fast FastDebug) include(ReleaseDebugAutoFlags) # Try and emit an intelligent warning if the version number currently set in the CMake project(...) @@ -230,7 +229,7 @@ include(cmake/CheckGitDescribeCompatibility.cmake) set(NRN_VERSION_MAJOR ${PROJECT_VERSION_MAJOR}) set(NRN_VERSION_MINOR ${PROJECT_VERSION_MINOR}) set(NRN_VERSION_PATCH ${PROJECT_VERSION_PATCH}) -configure_file(src/nrnoc/nrnsemanticversion.h.in src/nrnoc/nrnsemanticversion.h @ONLY) +configure_file(src/nrnoc/nrnsemanticversion.h.in include/nrnsemanticversion.h @ONLY) # ============================================================================= # Add coding-conventions submodule so we can use helper functions defined there @@ -246,17 +245,35 @@ if(NOT EXISTS "${CODING_CONV_CMAKE}/3rdparty.cmake") endif() include("${CODING_CONV_CMAKE}/3rdparty.cmake") cpp_cc_git_submodule(Random123) +cpp_cc_git_submodule(eigen) -# Since PythonInterp module prefers system-wide python, if PYTHON_EXECUTABLE is not set, look it up -# in the PATH exclusively. Need to set PYTHON_EXECUTABLE before calling SanitizerHelper.cmake -if(NOT PYTHON_EXECUTABLE) - message( - STATUS "-DPYTHON_EXECUTABLE not specified. Looking for `python3` in the PATH exclusively...") - find_program( - PYTHON_EXECUTABLE python3 - PATHS ENV PATH - NO_DEFAULT_PATH) - message(STATUS "\tSetting PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}") +# ================================================================================================= +# Enable sanitizer support if the NRN_SANITIZERS variable is set. Comes befores PythonHelper.cmake. +# ================================================================================================= +include(cmake/SanitizerHelper.cmake) +# Make a CMake list NRN_SANITIZERS_LIST +string(REPLACE "," ";" NRN_SANITIZERS_LIST "${NRN_SANITIZERS}") + +# ================================================================================================= +# Juggle PYTHON_EXECUTABLE and NRN_ENABLE_PYTHON_DYNAMIC to make sure that: +# +# * NRN_DEFAULT_PYTHON_EXECUTABLE is the default Python version, and NRN_DEFAULT_PYTHON_INCLUDES and +# NRN_DEFAULT_PYTHON_LIBRARIES are its include direcory and library +# * NRN_PYTHON_EXECUTABLES is a list of absolute paths to all Python executables to be built against +# (length >=1 if NRN_ENABLE_PYTHON_DYNAMIC else == 1) +# * NRN_PYTHON_VERSIONS (3.8, 3.11, ...), NRN_PYTHON_INCLUDES and NRN_PYTHON_LIBRARIES are populated +# with values patching NRN_PYTHON_EXECUTABLES +# * NRN_PYTHON_COUNT is set to the number of entries in those lists (i.e. number of Pythons), and +# NRN_PYTHON_ITERATION_LIMIT is set to ${NRN_PYTHON_COUNT} - 1 +# ================================================================================================= +set(NRN_MINIMUM_PYTHON_VERSION 3.8) +include(cmake/PythonHelper.cmake) + +# This needs NRN_DEFAULT_PYTHON_EXECUTABLE, which comes from PythonHelper.cmake +if(NRN_SANITIZERS) + configure_file(bin/nrn-enable-sanitizer.in bin/nrn-enable-sanitizer @ONLY) + install(PROGRAMS ${PROJECT_BINARY_DIR}/bin/nrn-enable-sanitizer + DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) endif() # ============================================================================= @@ -301,6 +318,9 @@ if(NRN_ENABLE_RX3D) message(SEND_ERROR "NRN_ENABLE_RX3D requires NRN_ENABLE_PYTHON feature.") else() find_package(Cython REQUIRED) + if(CYTHON_VERSION VERSION_GREATER_EQUAL 3) + message(FATAL_ERROR "Cython 3+ is not supported") + endif() endif() endif() if(MINGW) @@ -315,12 +335,9 @@ if(NRN_ENABLE_MPI) # find_package(MPI REQUIRED) has a CMAKE_OSX_ARCHITECTURES edge case nrn_mpi_find_package() set(NRNMPI 1) - set(PARANEURON 1) # avoid linking to C++ bindings - add_definitions("-DMPI_NO_CPPBIND=1") - add_definitions("-DOMPI_SKIP_MPICXX=1") - add_definitions("-DMPICH_SKIP_MPICXX=1") + add_compile_definitions(MPI_NO_CPPBIND=1 OMPI_SKIP_MPICXX=1 MPICH_SKIP_MPICXX=1) # Launching mpi executable with full path can mangle different python versions and libraries (see # issue #894). ${MPIEXEC_NAME} would reinsert the full path, but ${CMAKE_COMMAND} -E env @@ -345,7 +362,6 @@ if(NRN_ENABLE_MPI) endif() else() set(NRNMPI 0) - set(PARANEURON 0) endif() # ============================================================================= @@ -359,8 +375,8 @@ if(NRN_ENABLE_MUSIC) message(FATAL "MUSIC requires -DNRN_ENABLE_PYTHON=ON") endif() find_package(Cython REQUIRED) - if(NOT Cython_FOUND) - message(FATAL "MUSIC requires Cython") + if(CYTHON_VERSION VERSION_GREATER_EQUAL 3) + message(FATAL_ERROR "Cython 3+ is not supported") endif() find_package(MUSIC REQUIRED) set(NRN_MUSIC 1) @@ -413,30 +429,11 @@ endif() # ============================================================================= # Enable Python support # ============================================================================= - -find_package(PythonInterp 3.8 REQUIRED) if(NRN_ENABLE_PYTHON) - # start afresh with PythonLibsNew's find_library without this, -DPYTHON_EXECUTABLE=`which python3` - # -DNRN_PYTHON_DYNAMIC="python3.8;python3.9" would end up with a NRN_DEFAULT_PYTHON_LIBRARIES of - # /usr/lib/x86_64-linux-gnu/libpython3.8.so - unset(PYTHON_LIBRARY CACHE) - - find_package(PythonLibsNew ${PYTHON_VERSION_MAJOR} REQUIRED) - set(NRN_DEFAULT_PYTHON_EXECUTABLE ${PYTHON_EXECUTABLE}) - set(NRN_DEFAULT_PYTHON_LIBRARIES ${PYTHON_LIBRARIES}) - set(NRN_DEFAULT_PYTHON_INCLUDE_DIRS ${PYTHON_INCLUDE_DIRS}) - set(USE_PYTHON 1) -else() - set(USE_PYTHON 0) + # Make sure the USE_PYTHON macro is defined in the C++ code + list(APPEND NRN_COMPILE_DEFS USE_PYTHON) endif() -# ============================================================================= -# Enable sanitizer support if the NRN_SANITIZERS variable is set -# ============================================================================= -include(cmake/SanitizerHelper.cmake) -# Make a CMake list NRN_SANITIZERS_LIST -string(REPLACE "," ";" NRN_SANITIZERS_LIST "${NRN_SANITIZERS}") - # ============================================================================= # Enable Threads support # ============================================================================= @@ -458,9 +455,9 @@ if(NRN_ENABLE_PROFILING) add_definitions("-DNRN_CALIPER") set(CALIPER_LIB "caliper") elseif(NRN_PROFILER STREQUAL "likwid") - find_package(likwid REQUIRED) - include_directories(${likwid_INCLUDE_DIRS}) - add_definitions("-DLIKWID_PERFMON") + include(GetLIKWID) + set(LIKWID_LIB nrn_likwid) + endif() endif() @@ -479,7 +476,6 @@ endif() # ============================================================================= include(NeuronFileLists) include(MPIDynamicHelper) -include(PythonDynamicHelper) # Set variable to include mpi headers set(NRN_INCLUDE_MPI_HEADERS OFF) @@ -488,6 +484,9 @@ if(NOT NRN_ENABLE_MPI_DYNAMIC OR ${num_mpi} EQUAL 1) set(NRN_INCLUDE_MPI_HEADERS ON) endif() +# initialize CLI11 submodule +cpp_cc_git_submodule(CLI11 BUILD PACKAGE CLI11 REQUIRED) + # ============================================================================= # Enable CoreNEURON support # ============================================================================= @@ -500,14 +499,10 @@ if(NRN_ENABLE_CORENEURON) list(APPEND CMAKE_MODULE_PATH ${PROJECT_SOURCE_DIR}/cmake/coreneuron ${PROJECT_SOURCE_DIR}/cmake/coreneuron/packages) - set(nrn_using_ext_corenrn FALSE) # If NEURON tests are enabled then enable CoreNEURON tests too set(CORENRN_ENABLE_UNIT_TESTS ${NRN_ENABLE_TESTS} CACHE BOOL "" FORCE) - set(CORENRN_ENABLE_LEGACY_UNITS - ${NRN_DYNAMIC_UNITS_USE_LEGACY} - CACHE BOOL "" FORCE) if(NRN_ENABLE_PROFILING) if(NRN_PROFILER STREQUAL "caliper") set(CORENRN_ENABLE_CALIPER_PROFILING ON) @@ -515,22 +510,7 @@ if(NRN_ENABLE_CORENEURON) set(CORENRN_ENABLE_LIKWID_PROFILING ON) endif() endif() - # Propagate NEURON MPI option to CoreNEURON if not set explicitly - if(NOT DEFINED CORENRN_ENABLE_MPI) - set(CORENRN_ENABLE_MPI - ${NRN_ENABLE_MPI} - CACHE BOOL "" FORCE) - endif() - if(NOT DEFINED CORENRN_ENABLE_MPI_DYNAMIC) - set(CORENRN_ENABLE_MPI_DYNAMIC - ${NRN_ENABLE_MPI_DYNAMIC} - CACHE BOOL "" FORCE) - endif() set(CORENEURON_DIR ${PROJECT_SOURCE_DIR}/src/coreneuron) - include(PythonDynamicHelper) - - # initialize CLI11 submodule - cpp_cc_git_submodule(CLI11 BUILD PACKAGE CLI11 REQUIRED) add_subdirectory(${PROJECT_SOURCE_DIR}/src/coreneuron) @@ -541,6 +521,7 @@ if(NRN_ENABLE_CORENEURON) get_property(CORENRN_LIB_LINK_FLAGS GLOBAL PROPERTY CORENRN_LIB_LINK_FLAGS) get_property(CORENRN_NEURON_LINK_FLAGS GLOBAL PROPERTY CORENRN_NEURON_LINK_FLAGS) get_property(CORENRN_ENABLE_SHARED GLOBAL PROPERTY CORENRN_ENABLE_SHARED) + get_property(CORENRN_NMODL_BINARY GLOBAL PROPERTY CORENRN_NMODL_BINARY) # NEURON tests that link against CoreNEURON need to depend on it. set(CORENEURON_TARGET_TO_DEPEND coreneuron-for-tests) @@ -585,6 +566,8 @@ endif() # ============================================================================= # Add project directories AFTER CMake modules # ============================================================================= +add_subdirectory(src/sparse13) +add_subdirectory(src/gnu) add_subdirectory(src/nrniv) # Collect the environment variables that are needed to execute NEURON from the build directory. This @@ -625,6 +608,10 @@ if(MINGW) endif() if(NRN_ENABLE_DOCS) + if(APPLE AND "address" IN_LIST NRN_SANITIZERS) + # IPython notebook execution doesn't have enough magic to preload ASan + message(FATAL_ERROR "macOS + ASan + docs is not supported") + endif() # Do we need to set extra environment variables to find NEURON? set(NRN_DOCS_COMMAND_PREFIX ${CMAKE_COMMAND} -E env) if(NOT NRN_ENABLE_DOCS_WITH_EXTERNAL_INSTALLATION) @@ -649,8 +636,8 @@ if(NRN_ENABLE_DOCS) # This is needed for ipython, which is pip installable but not importable. if("${docs_requirement}" STREQUAL "# do not check import of next line") set(skip_next TRUE) - elseif("${docs_requirement}" MATCHES "^([a-zA-Z_][a-zA-Z0-9]*)") - nrn_find_python_module(${CMAKE_MATCH_0} REQUIRED) + elseif("${docs_requirement}" MATCHES "^([a-zA-Z_][a-zA-Z0-9_]*)$") + nrn_find_python_module(MODULE ${CMAKE_MATCH_0} REQUIRED) endif() endforeach() @@ -690,7 +677,7 @@ if(NRN_ENABLE_DOCS) sphinx COMMAND ${NRN_DOCS_COMMAND_PREFIX} ${SPHINX_EXECUTABLE} -j auto -b html "${PROJECT_SOURCE_DIR}/docs" "${PROJECT_SOURCE_DIR}/docs/_build" - COMMAND echo "Copy/Paste to Browser ${PROJECT_SOURCE_DIR}/docs/_build/index.html" + COMMAND echo "Copy/Paste to Browser file://${PROJECT_SOURCE_DIR}/docs/_build/index.html" WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}/docs COMMENT "Generating documentation with Sphinx") @@ -775,7 +762,7 @@ if(NRN_ENABLE_PYTHON) add_custom_target( help_data_dat COMMAND ${CMAKE_COMMAND} -E make_directory ${PROJECT_BINARY_DIR}/lib/python/neuron - COMMAND ${PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/docs/parse_rst.py + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${PROJECT_SOURCE_DIR}/docs/parse_rst.py ${PROJECT_SOURCE_DIR}/docs/python ${PROJECT_BINARY_DIR}/lib/python/neuron/help_data.dat COMMENT "Generating help_data.dat" VERBATIM) @@ -822,13 +809,12 @@ if(NRN_ENABLE_TESTS) include(Catch) endif() include(CTest) - nrn_find_python_module(pytest) - nrn_find_python_module(pytest_cov) if(NRN_ENABLE_PYTHON) - if(NOT PYTEST_FOUND) - message(SEND_ERROR "pytest Python package is required.") - elseif(NOT PYTEST_COV_FOUND) - message(INFO "pytest-cov package not installed. Python coverage will not be generated.") + # Need this for *all* Python versions we try and run tests with + nrn_find_python_module(MODULE pytest ALL REQUIRED) + nrn_find_python_module(MODULE pytest_cov ALL) + if(NOT PYTEST_COV_FOUND) + message(STATUS "pytest-cov package not installed. Python coverage will not be generated.") endif() endif() add_dependencies(nrniv_lib copy_share_demo_to_build) @@ -842,10 +828,11 @@ if(NRN_ENABLE_TESTS) "${neurondemo_prefix}/${CMAKE_SHARED_LIBRARY_PREFIX}nrnmech${CMAKE_SHARED_LIBRARY_SUFFIX}") add_custom_command( OUTPUT ${neurondemo_files} - COMMAND - ${CMAKE_COMMAND} -E env ${NRN_RUN_FROM_BUILD_DIR_ENV} ${NRN_SANITIZER_ENABLE_ENVIRONMENT} - "${PROJECT_BINARY_DIR}/bin/neurondemo" "-nobanner" "-nogui" "-c" "quit()" + COMMAND ${CMAKE_COMMAND} -E env ${NRN_RUN_FROM_BUILD_DIR_ENV} + ${NRN_SANITIZER_ENABLE_ENVIRONMENT} "${PROJECT_BINARY_DIR}/bin/nrnivmodl" + WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/share/nrn/demo/release VERBATIM) + add_custom_target(generate-neurondemo-mechanism-library ALL DEPENDS ${neurondemo_files} nrniv copy_share_lib_to_build) # Initialize the submodule *before* including the test/CMakeLists.txt that uses it. This ensures @@ -872,11 +859,30 @@ install(FILES ${PROJECT_BINARY_DIR}/share/nrn/lib/nrnunits.lib install(PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/share/lib/cleanup DESTINATION ${NRN_INSTALL_SHARE_DIR}/lib) -# find neuron (excluding coreneuron) headers to install -nrn_find_project_files(NRN_HEADERS_PATHS ${HEADER_FILES_TO_INSTALL}) -list(FILTER NRN_HEADERS_PATHS EXCLUDE REGEX "src/coreneuron") -file(COPY ${NRN_HEADERS_PATHS} ${PROJECT_BINARY_DIR}/src/nrnoc/nrnsemanticversion.h - DESTINATION ${PROJECT_BINARY_DIR}/include) +# Copy NEURON headers that will be included in the installation into the build directory. +set(headers_in_build_dir) +foreach(header_below_src ${HEADER_FILES_TO_INSTALL}) + get_filename_component(header_name "${header_below_src}" NAME) + set(output_name "${PROJECT_BINARY_DIR}/include/${header_name}") + cpp_cc_build_time_copy( + INPUT "${PROJECT_SOURCE_DIR}/src/${header_below_src}" + OUTPUT "${output_name}" + NO_TARGET) + list(APPEND headers_in_build_dir "${output_name}") +endforeach() +foreach(header_below_src ${STRUCTURED_HEADER_FILES_TO_INSTALL}) + set(output_name "${PROJECT_BINARY_DIR}/include/${header_below_src}") + cpp_cc_build_time_copy( + INPUT "${PROJECT_SOURCE_DIR}/src/${header_below_src}" + OUTPUT "${output_name}" + NO_TARGET) + list(APPEND headers_in_build_dir "${output_name}") +endforeach() +add_custom_target( + copy_headers_to_build + COMMENT "Copying headers to build directory" + DEPENDS ${headers_in_build_dir}) +add_dependencies(nrniv_lib copy_headers_to_build) install(DIRECTORY ${PROJECT_BINARY_DIR}/include DESTINATION ${CMAKE_INSTALL_PREFIX}) if(NRN_MACOS_BUILD) @@ -927,7 +933,7 @@ if(BUILD_TYPE_UPPER MATCHES "CUSTOM") else() set(COMPILER_FLAGS "${CMAKE_CXX_FLAGS_${BUILD_TYPE_UPPER}}") endif() -string(JOIN " " COMPILER_FLAGS "${COMPILER_FLAGS}" ${NRN_COMPILE_FLAGS}) +string(JOIN " " COMPILER_FLAGS "${COMPILER_FLAGS}" ${NRN_COMPILE_FLAGS} ${CMAKE_CXX_FLAGS}) message(STATUS "") message(STATUS "Configured NEURON ${PROJECT_VERSION}") @@ -954,11 +960,6 @@ message(STATUS "CXX COMPILER | ${CMAKE_CXX_COMPILER}") message(STATUS "BUILD_TYPE | ${CMAKE_BUILD_TYPE} (allowed: ${allowableBuildTypes})") message(STATUS "COMPILE FLAGS | ${COMPILER_FLAGS}") message(STATUS "Shared | ${NRN_ENABLE_SHARED}") -if(NRN_DYNAMIC_UNITS_USE_LEGACY) - message(STATUS "Default units | legacy units") -else() - message(STATUS "Default units | modern units (2019 nist constants)") -endif() message(STATUS "MPI | ${NRN_ENABLE_MPI}") if(NRN_ENABLE_MPI) message(STATUS " DYNAMIC | ${NRN_ENABLE_MPI_DYNAMIC}") @@ -973,11 +974,11 @@ if(NRN_ENABLE_MPI) endforeach(val) else() if(NRN_INCLUDE_MPI_HEADERS) - message(STATUS " INC | ${MPI_INCLUDE_PATH}") + message(STATUS " INC | ${MPI_C_INCLUDE_DIRS}") else() message(STATUS " INC | N/A") endif() - message(STATUS " LIB | ${MPI_LIBRARY}") + message(STATUS " LIB | ${MPI_C_LIBRARIES}") endif() endif() if(NRN_ENABLE_MUSIC) @@ -985,28 +986,22 @@ if(NRN_ENABLE_MUSIC) endif() message(STATUS "Python | ${NRN_ENABLE_PYTHON}") if(NRN_ENABLE_PYTHON) - message(STATUS " EXE | ${NRN_DEFAULT_PYTHON_EXECUTABLE}") - message(STATUS " INC | ${NRN_DEFAULT_PYTHON_INCLUDE_DIRS}") - message(STATUS " LIB | ${NRN_DEFAULT_PYTHON_LIBRARIES}") - message(STATUS " MODULE | ${NRN_ENABLE_MODULE_INSTALL}") message(STATUS " DYNAMIC | ${NRN_ENABLE_PYTHON_DYNAMIC}") - if(NRN_ENABLE_PYTHON_DYNAMIC) - list(LENGTH NRN_PYTHON_EXE_LIST _num_pythons) - math(EXPR num_pythons "${_num_pythons} - 1") - foreach(val RANGE ${num_pythons}) - list(GET NRN_PYTHON_EXE_LIST ${val} exe) - list(GET NRN_PYTHON_VER_LIST ${val} version) - if(${version} LESS 3) - message(SEND_ERROR "Python 3 required. Please upgrade.") - endif() - list(GET NRN_PYTHON_INCLUDE_LIST ${val} include) - list(GET NRN_PYTHON_LIB_LIST ${val} lib) - message(STATUS " EXE | ${exe}") - message(STATUS " INC | ${include}") - message(STATUS " LIB | ${lib}") - - endforeach(val) - endif() + message(STATUS " MODULE | ${NRN_ENABLE_MODULE_INSTALL}") + foreach(val RANGE ${NRN_PYTHON_ITERATION_LIMIT}) + list(GET NRN_PYTHON_EXECUTABLES ${val} exe) + list(GET NRN_PYTHON_VERSIONS ${val} pyver) + list(GET NRN_PYTHON_INCLUDES ${val} pyinc) + list(GET NRN_PYTHON_LIBRARIES ${val} pylib) + unset(suffix) + if(val EQUAL 0) + set(suffix " (default)") + endif() + message(STATUS " python${pyver}${suffix}") + message(STATUS " EXE | ${exe}") + message(STATUS " INC | ${pyinc}") + message(STATUS " LIB | ${pylib}") + endforeach() endif() if(READLINE_FOUND) message(STATUS "Readline | ${Readline_LIBRARY}") @@ -1035,9 +1030,6 @@ message(STATUS "CoreNEURON | ${NRN_ENABLE_CORENEURON}") if(NRN_ENABLE_CORENEURON) message(STATUS " PATH | ${CORENEURON_DIR}") message(STATUS " LINK FLAGS | ${CORENRN_LIB_LINK_FLAGS}") - if(NOT coreneuron_FOUND) - message(STATUS " Legacy Units| ${CORENRN_ENABLE_LEGACY_UNITS}") - endif() endif() if(NRN_UNIVERSAL2_BUILD) message(STATUS "CMAKE_OSX_ARCH| ${CMAKE_OSX_ARCHITECTURES}") diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1bf30fdfc4..ad0d7dccaa 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -26,7 +26,7 @@ favor of Python. The hoc code itself is in the nrn repository and also should no There are several NEURON-related repositories hosted elsewhere which also encourage contributions. Each of these will have its own *Contributing* document. 1. [NMODL](https://github.com/BlueBrain/nmodl) -- improved method for compiling and porting *.mod* files using *abstract syntax trees* -1. [CoreNEURON](https://github.com/BlueBrain/CoreNeuron) -- an optimized NEURON for running on high performance computers (HPCs) +1. [CoreNEURON](https://github.com/neuronsimulator/nrn/tree/master/src/coreneuron) -- an optimized NEURON for running on high performance computers (HPCs) 1. [NetPyNE](https://github.com/Neurosim-lab/netpyne) -- multiscale modeling tool for developing, simulating and analyzing networks which include complex cells, potentially with detailed molecular-modeling. 1. [NetPyNE-UI](https://github.com/MetaCell/NetPyNE-UI) -- graphical user interface for NetPyNE diff --git a/README.md b/README.md index 96d5b467f2..f6cba70ead 100644 --- a/README.md +++ b/README.md @@ -1,4 +1,4 @@ -[![Build Status](https://dev.azure.com/neuronsimulator/nrn/_apis/build/status/neuronsimulator.nrn?branchName=master)](https://dev.azure.com/neuronsimulator/nrn/_build/latest?definitionId=1&branchName=master) [![Actions Status](https://github.com/neuronsimulator/nrn/workflows/Windows%20Installer/badge.svg)](https://github.com/neuronsimulator/nrn/actions) [![Actions Status](https://github.com/neuronsimulator/nrn/workflows/NEURON%20CI/badge.svg)](https://github.com/neuronsimulator/nrn/actions) [![codecov](https://codecov.io/gh/neuronsimulator/nrn/branch/master/graph/badge.svg?token=T7PIDw6LrC)](https://codecov.io/gh/neuronsimulator/nrn) [![Documentation Status](https://readthedocs.org/projects/nrn/badge/?version=latest)](http://nrn.readthedocs.io/?badge=latest) +[![Build Status](https://dev.azure.com/neuronsimulator/nrn/_apis/build/status/neuronsimulator.nrn?branchName=master)](https://dev.azure.com/neuronsimulator/nrn/_build/latest?definitionId=1&branchName=master) [![Actions Status](https://github.com/neuronsimulator/nrn/actions/workflows/windows.yml/badge.svg?branch=master)](https://github.com/neuronsimulator/nrn/actions) [![Actions Status](https://github.com/neuronsimulator/nrn/workflows/NEURON%20CI/badge.svg)](https://github.com/neuronsimulator/nrn/actions) [![codecov](https://codecov.io/gh/neuronsimulator/nrn/branch/master/graph/badge.svg?token=T7PIDw6LrC)](https://codecov.io/gh/neuronsimulator/nrn) [![Documentation Status](https://readthedocs.org/projects/nrn/badge/?version=latest)](http://nrn.readthedocs.io/?badge=latest) # NEURON NEURON is a simulator for models of neurons and networks of neuron. See [http://neuron.yale.edu](http://neuron.yale.edu) for installers, source code, documentation, tutorials, announcements of diff --git a/azure-pipelines.yml b/azure-pipelines.yml index 6bb60036d0..4a237d6b43 100644 --- a/azure-pipelines.yml +++ b/azure-pipelines.yml @@ -29,7 +29,7 @@ stages: jobs: - job: 'ManyLinuxWheels' - timeoutInMinutes: 30 + timeoutInMinutes: 45 pool: vmImage: 'ubuntu-20.04' strategy: @@ -42,6 +42,8 @@ stages: python.version: '3.10' Python311: python.version: '3.11' + Python312: + python.version: '3.12' steps: # Secure files documentation: @@ -67,7 +69,7 @@ stages: -e NRN_RELEASE_UPLOAD \ -e SETUPTOOLS_SCM_PRETEND_VERSION \ -e NRN_BUILD_FOR_UPLOAD=1 \ - 'neuronsimulator/neuron_wheel:latest-gcc9-x86_64' \ + 'neuronsimulator/neuron_wheel:latest-x86_64' \ packaging/python/build_wheels.bash linux $(python.version) coreneuron displayName: 'Building ManyLinux Wheel' @@ -79,80 +81,32 @@ stages: - template: ci/azure-wheel-test-upload.yml - - job: 'ManyLinuxGPUWheels' - timeoutInMinutes: 30 - pool: - vmImage: 'ubuntu-20.04' - variables: - GPU_BUILD: 'true' - strategy: - matrix: - Python38: - python.version: '3.8' - Python39: - python.version: '3.9' - Python310: - python.version: '3.10' - Python311: - python.version: '3.11' - steps: - - # Secure files documentation: - # https://docs.microsoft.com/en-us/azure/devops/pipelines/library/secure-files?view=azure-devops - # NOTE: when uploading new secure files, access must be permitted from the Azure pipeline interface (check message there) - - task: DownloadSecureFile@1 - name: mpt_headersSF - displayName: 'Download mpt_headers secure file' - inputs: - secureFile: 'mpt_headears.2.21.tar.gz' - - # Note that mpt headers must be mounted in the docker imager under `/nrnwheel/mpt` - # This path is checked by `packaging/python/build_wheels.bash` when run in the image. - - script: | - sudo mkdir -p /opt/nrnwheel/mpt - sudo tar -zxf $(mpt_headersSF.secureFilePath) --directory /opt/nrnwheel/mpt - docker run --rm \ - -w /root/nrn \ - -v $PWD:/root/nrn \ - -v /opt/nrnwheel/mpt:/nrnwheel/mpt \ - -e NEURON_NIGHTLY_TAG \ - -e NRN_NIGHTLY_UPLOAD \ - -e NRN_RELEASE_UPLOAD \ - -e SETUPTOOLS_SCM_PRETEND_VERSION \ - -e NRN_BUILD_FOR_UPLOAD=1 \ - 'neuronsimulator/neuron_wheel_gpu:nvhpc-22.1-cuda-11.5-gcc9' \ - packaging/python/build_wheels.bash linux $(python.version) coreneuron-gpu - displayName: 'Building ManyLinux Wheel' - - - script: | - sudo apt update - sudo apt install -y mpich openmpi-bin libopenmpi-dev libmpich-dev - displayName: 'Install Test System Depdendencies' - - - template: ci/azure-wheel-test-upload.yml - # Jobs to build OSX wheels natively - job: 'MacOSWheels' - timeoutInMinutes: 40 + timeoutInMinutes: 60 pool: - vmImage: 'macOS-11' + vmImage: 'macOS-12' strategy: matrix: Python38: python.version: '3.8' python.org.version: '3.8.10' - python.installer.name: 'macosx10.9.pkg' + python.installer.name: 'macos11.pkg' Python39: python.version: '3.9' python.org.version: '3.9.13' - python.installer.name: 'macosx10.9.pkg' + python.installer.name: 'macos11.pkg' Python310: python.version: '3.10' - python.org.version: '3.10.5' + python.org.version: '3.10.11' python.installer.name: 'macos11.pkg' Python311: python.version: '3.11' - python.org.version: '3.11.1' + python.org.version: '3.11.7' + python.installer.name: 'macos11.pkg' + Python312: + python.version: '3.12' + python.org.version: '3.12.0' python.installer.name: 'macos11.pkg' steps: @@ -184,7 +138,7 @@ stages: name: readlineSF displayName: 'Download readline secure file' inputs: - secureFile: 'readline7.0.tar.gz' + secureFile: 'readline7.0-ncurses6.4.tar.gz' # 10.14 is required for full C++17 support according to # https://cibuildwheel.readthedocs.io/en/stable/cpp_standards, but it @@ -202,42 +156,6 @@ stages: - template: ci/azure-wheel-test-upload.yml - # Jobs to build NEURON installer natively - - job: 'WindowsInstaller' - timeoutInMinutes: 45 - pool: - vmImage: windows-latest - variables: - MSYS2_ROOT: C:\msys64 - - steps: - - checkout: self - submodules: 'true' - clean: 'true' - - - task: BatchScript@1 - inputs: - filename: ci/win_download_deps.cmd - displayName: "Download Dependencies" - condition: succeeded() - - - task: BatchScript@1 - inputs: - filename: ci/win_install_deps.cmd - displayName: "Install Dependencies" - condition: succeeded() - - # WSL is enabled by default in the image, with no installed linux distribution. - # WSL creates an unusable `bash.EXE` that can be resolved first from the environment. - # This is why we remove that file. - - script: | - rm.exe "C:/WINDOWS/system32/bash.EXE" - %MSYS2_ROOT%\usr\bin\bash -lc "$BUILD_SOURCESDIRECTORY/ci/win_build_cmake.sh" - displayName: "Build and Create Installer" - condition: succeeded() - - - template: ci/azure-win-installer-upload.yml - - stage: Final jobs: - job: AzureDropURL diff --git a/bin/CMakeLists.txt b/bin/CMakeLists.txt index 176ce13d73..a60c5888f4 100644 --- a/bin/CMakeLists.txt +++ b/bin/CMakeLists.txt @@ -2,6 +2,8 @@ # Set various variables used in template files # ============================================================================= # TODO: for nrn.defaults but these are repeated in cmake_config/CMakeLists.txt +include(${CODING_CONV_CMAKE}/build-time-copy.cmake) + set(modsubdir ${CMAKE_SYSTEM_PROCESSOR}) set(nrndef_unix "//") @@ -24,6 +26,7 @@ set(CMAKE_INSTALL_BINDIR bin) set(CMAKE_INSTALL_LIBDIR lib) set(CMAKE_INSTALL_INCLUDEDIR include) set(CMAKE_INSTALL_DATADIR share/nrn) +set(CMAKE_INSTALL_DATADIR_NMODL share/nmodl) # ============================================================================= # Include nrnivmodl makefile generator @@ -34,12 +37,13 @@ include(CMakeListsNrnMech) # nrnmech_makefile (based on coreneuron Configure templates) # ============================================================================= nrn_configure_file(nrngui bin) -configure_file(sortspike sortspike COPYONLY) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/sortspike ${CMAKE_CURRENT_BINARY_DIR}/sortspike COPYONLY) configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nrnivmodl_makefile_cmake.in ${PROJECT_BINARY_DIR}/bin/nrnmech_makefile @ONLY) +string(JOIN " " NRN_PYTHON_VERSIONS_STRING ${NRN_PYTHON_VERSIONS}) +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/nrnpyenv.sh.in ${PROJECT_BINARY_DIR}/bin/nrnpyenv.sh + @ONLY) -# if running from the build folder (e.g. make test) may need this. -file(COPY ${CMAKE_CURRENT_SOURCE_DIR}/nrnpyenv.sh DESTINATION ${PROJECT_BINARY_DIR}/bin) # Make sure nrnivmodl and neurondemo are executable in the build folder, so we can execute it to # prepare test files. This can be done more elegantly in newer CMake versions; v3.19+ have # file(CHMOD ...) and v3.20+ support setting permissions directly in configure_file(...). @@ -67,7 +71,6 @@ install(PROGRAMS ${PROJECT_BINARY_DIR}/bin/nrngui ${PROJECT_BINARY_DIR}/bin/neur ${PROJECT_BINARY_DIR}/bin/nrnivmodl DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) install(FILES ${PROJECT_BINARY_DIR}/bin/nrnmech_makefile DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) -install( - PROGRAMS ${CMAKE_CURRENT_SOURCE_DIR}/nrnpyenv.sh ${CMAKE_CURRENT_BINARY_DIR}/sortspike - ${CMAKE_CURRENT_SOURCE_DIR}/mkthreadsafe ${CMAKE_CURRENT_SOURCE_DIR}/nrnpyenv.sh - ${CMAKE_CURRENT_SOURCE_DIR}/set_nrnpyenv.sh DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) +install(PROGRAMS ${CMAKE_CURRENT_BINARY_DIR}/sortspike ${CMAKE_CURRENT_SOURCE_DIR}/mkthreadsafe + ${PROJECT_BINARY_DIR}/bin/nrnpyenv.sh ${CMAKE_CURRENT_SOURCE_DIR}/set_nrnpyenv.sh + DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) diff --git a/bin/nrn-enable-sanitizer.in b/bin/nrn-enable-sanitizer.in index ed7104b076..6f0488180f 100755 --- a/bin/nrn-enable-sanitizer.in +++ b/bin/nrn-enable-sanitizer.in @@ -10,7 +10,7 @@ if [[ "$1" == "--preload" ]]; then # shims. This is done from CMake, and PYTHON_EXECUTABLE contains the real # binary. shift - @NRN_SANITIZER_LD_PRELOAD@ @NRN_SANITIZER_ENABLE_ENVIRONMENT_STRING@ @PYTHON_EXECUTABLE@ "$@" + @NRN_SANITIZER_LD_PRELOAD@ @NRN_SANITIZER_ENABLE_ENVIRONMENT_STRING@ @NRN_DEFAULT_PYTHON_EXECUTABLE@ "$@" else @NRN_SANITIZER_LD_PRELOAD@ @NRN_SANITIZER_ENABLE_ENVIRONMENT_STRING@ "$@" fi diff --git a/bin/nrnivmodl-core.in b/bin/nrnivmodl-core.in index 742409d887..96726d4d9e 100755 --- a/bin/nrnivmodl-core.in +++ b/bin/nrnivmodl-core.in @@ -36,7 +36,7 @@ params_BUILD_TYPE="@COMPILE_LIBRARY_TYPE@" params_NRN_PRCELLSTATE="@CORENRN_NRN_PRCELLSTATE@" # prefix for common options : make sure to rename these if options are changed. -MAKE_OPTIONS="MECHLIB_SUFFIX MOD2CPP_BINARY MOD2CPP_RUNTIME_FLAGS DESTDIR INCFLAGS LINKFLAGS MODS_PATH VERBOSE BUILD_TYPE NRN_PRCELLSTATE" +MAKE_OPTIONS="MECHLIB_SUFFIX NMODL_BINARY NMODL_RUNTIME_FLAGS DESTDIR INCFLAGS LINKFLAGS MODS_PATH VERBOSE BUILD_TYPE NRN_PRCELLSTATE" # parse CLI args while getopts "n:m:a:d:i:l:Vp:r:b:h" OPT; do @@ -45,11 +45,11 @@ while getopts "n:m:a:d:i:l:Vp:r:b:h" OPT; do # suffix for mechanism library params_MECHLIB_SUFFIX="$OPTARG";; m) - # nmodl or mod2c binary to use - params_MOD2CPP_BINARY="$OPTARG";; + # nmodl binary to use + params_NMODL_BINARY="$OPTARG";; a) # additional nmodl flags to be used - params_MOD2CPP_RUNTIME_FLAGS="$OPTARG";; + params_NMODL_RUNTIME_FLAGS="$OPTARG";; d) # destination install directory params_DESTDIR="$OPTARG";; diff --git a/bin/nrnivmodl.in b/bin/nrnivmodl.in index 0c5379b7c4..100c173cc9 100755 --- a/bin/nrnivmodl.in +++ b/bin/nrnivmodl.in @@ -46,6 +46,8 @@ LinkCoreNEURON=false UserINCFLAGS="" UserLDFLAGS="" UserCOREFLAGS=() +UserNMODLBIN="" +UserNMODLFLAGS="" # - options come first but can be in any order. while [ "$1" ] ; do @@ -68,6 +70,17 @@ while [ "$1" ] ; do UserCOREFLAGS+=(-l "${2}") shift shift;; + -nmodl) + echo "[NMODL][warning] Code generation with NMODL is pre-alpha, lacks features and is intended only for development use" + UserNMODLBIN="$2" + UserNMODLFLAGS="--neuron $UserNMODLFLAGS" + shift + shift;; + -nmodlflags) + echo "[NMODL][warning] If sympy is enabled, NMODL needs to be found in PYTHONPATH" + UserNMODLFLAGS="$UserNMODLFLAGS $2" + shift + shift;; -*) echo "$1 unrecognized" exit 1;; @@ -159,7 +172,7 @@ for i in "${files[@]}" ; do echo "\ ./${base_name// /\\ }.cpp: ${f}.mod @printf \" -> \$(C_GREEN)NMODL\$(C_RESET) \$<\\\n\" - (cd \"$dir_name\"; @NRN_NOCMODL_SANITIZER_ENVIRONMENT_STRING@ MODLUNIT=\$(NRNUNITS) \$(NOCMODL) \"$base_name.mod\" -o \"$mdir\") + (cd \"$dir_name\"; @NRN_NOCMODL_SANITIZER_ENVIRONMENT_STRING@ MODLUNIT=\$(NRNUNITS) \$(NOCMODL) \"$base_name.mod\" -o \"$mdir\" $UserNMODLFLAGS) ./${base_name// /\\ }.o: ./${base_name// /\\ }.cpp @printf \" -> \$(C_GREEN)Compiling\$(C_RESET) \$<\\\n\" @@ -246,5 +259,5 @@ if [ "$LinkCoreNEURON" = true ] ; then fi fi -make -j 4 -f "${bindir}/nrnmech_makefile" "ROOT=${prefix}" "MODOBJFILES=$MODOBJS" "UserLDFLAGS=$UserLDFLAGS" "UserINCFLAGS=$UserINCFLAGS" "LinkCoreNEURON=$LinkCoreNEURON" special && +make -j 4 -f "${bindir}/nrnmech_makefile" "ROOT=${prefix}" "MODOBJFILES=$MODOBJS" "UserLDFLAGS=$UserLDFLAGS" "UserINCFLAGS=$UserINCFLAGS" "LinkCoreNEURON=$LinkCoreNEURON" "UserNMODLBIN=$UserNMODLBIN" "UserNMODLFLAGS=$UserNMODLFLAGS" special && echo "Successfully created $MODSUBDIR/special" diff --git a/bin/nrnivmodl_core_makefile.in b/bin/nrnivmodl_core_makefile.in index 1555ffb6e9..90acecb724 100644 --- a/bin/nrnivmodl_core_makefile.in +++ b/bin/nrnivmodl_core_makefile.in @@ -25,7 +25,7 @@ CORENRN_BIN_DIR := $(ROOT)/bin CORENRN_LIB_DIR := $(ROOT)/lib CORENRN_INC_DIR := $(ROOT)/include CORENRN_SHARE_CORENRN_DIR:= $(ROOT)/share/coreneuron -CORENRN_SHARE_MOD2CPP_DIR := $(ROOT)/share/mod2c +CORENRN_SHARE_NMODL_DIR := $(ROOT)/share/nmodl # name of the CoreNEURON binary SPECIAL_EXE = $(OUTPUT_DIR)/special-core @@ -43,7 +43,7 @@ LDFLAGS = $(LINKFLAGS) @CORENRN_COMMON_LDFLAGS@ # coreneuron/utils/randoms goes first because it needs to override the NEURON # directory in INCFLAGS INCLUDES = -I$(CORENRN_INC_DIR)/coreneuron/utils/randoms $(INCFLAGS) -I$(CORENRN_INC_DIR) -ifeq (@CORENRN_ENABLE_MPI_DYNAMIC@, OFF) +ifeq (@NRN_ENABLE_MPI_DYNAMIC@, OFF) INCLUDES += $(if @MPI_CXX_INCLUDE_PATH@, -I$(subst ;, -I,@MPI_CXX_INCLUDE_PATH@),) endif @@ -52,21 +52,11 @@ ifeq ($(origin CXX), default) CXX = @CMAKE_CXX_COMPILER@ endif -ifeq (@CORENRN_ENABLE_GPU@, ON) - ifneq ($(shell $(CXX) --version | grep -o nvc++), nvc++) - $(error GPU wheels are only compatible with the NVIDIA C++ compiler nvc++, but CXX=$(CXX) and --version gives $(shell $(CXX) --version)) - endif - # nvc++ -dumpversion is simpler, but only available from 22.2 - ifeq ($(findstring nvc++ @CORENRN_NVHPC_MAJOR_MINOR_VERSION@, $(shell $(CXX) --version)),) - $(error GPU wheels are currently not compatible across NVIDIA HPC SDK versions. You have $(shell $(CXX) -V | grep nvc++) but this wheel was built with @CORENRN_NVHPC_MAJOR_MINOR_VERSION@.) - endif -endif - # In case of wheel, python and perl exe paths are from the build machine. # First prefer env variables set by neuron's nrnivmodl wrapper then check # binary used during build. If they don't exist then simply use python and # perl as the name of binaries. -CORENRN_PYTHONEXE ?= @PYTHON_EXECUTABLE@ +CORENRN_PYTHONEXE ?= @NRN_DEFAULT_PYTHON_EXECUTABLE@ CORENRN_PERLEXE ?= @PERL_EXECUTABLE@ ifeq ($(wildcard $(CORENRN_PYTHONEXE)),) CORENRN_PYTHONEXE=python @@ -81,15 +71,12 @@ CXX_LINK_EXE_CMD = $(CXX) $(CXXFLAGS) @CMAKE_EXE_LINKER_FLAGS@ CXX_SHARED_LIB_CMD = $(CXX) $(CXXFLAGS) @CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS@ @CMAKE_SHARED_LIBRARY_CXX_FLAGS@ @CMAKE_SHARED_LINKER_FLAGS@ # env variables required for mod2c or nmodl -MOD2CPP_ENV_VAR = @CORENRN_SANITIZER_ENABLE_ENVIRONMENT_STRING@ PYTHONPATH=@CORENRN_NMODL_PYTHONPATH@:${CORENRN_LIB_DIR}/python MODLUNIT=$(CORENRN_SHARE_MOD2CPP_DIR)/nrnunits.lib - -# nmodl options -ifeq (@CORENRN_ENABLE_NMODL@, ON) - ifeq (@CORENRN_ENABLE_GPU@, ON) - nmodl_arguments_c=@NMODL_ACC_BACKEND_ARGS@ @NMODL_COMMON_ARGS@ - else - nmodl_arguments_c=@NMODL_CPU_BACKEND_ARGS@ @NMODL_COMMON_ARGS@ - endif +NMODL_ENV_VAR = @CORENRN_SANITIZER_ENABLE_ENVIRONMENT_STRING@ PYTHONPATH=@CORENRN_NMODL_PYTHONPATH@:${CORENRN_LIB_DIR}/python MODLUNIT=$(CORENRN_SHARE_NMODL_DIR)/nrnunits.lib + +ifeq (@CORENRN_ENABLE_GPU@, ON) + nmodl_arguments_c=@NMODL_ACC_BACKEND_ARGS@ @NMODL_COMMON_ARGS@ +else + nmodl_arguments_c=@NMODL_CPU_BACKEND_ARGS@ @NMODL_COMMON_ARGS@ endif # name of the mechanism library with suffix if provided @@ -110,12 +97,12 @@ else corenrnmech_lib_target = coremech_lib_shared endif -# Binary of MOD2C/NMODL depending on CMake option activated +# Binary of NMODL depending on CMake option activated ifeq (@nmodl_FOUND@, TRUE) - MOD2CPP_BINARY_PATH = $(if $(MOD2CPP_BINARY),$(MOD2CPP_BINARY), @CORENRN_MOD2CPP_BINARY@) - INCLUDES += -I@CORENRN_MOD2CPP_INCLUDE@ + NMODL_BINARY_PATH = $(if $(NMODL_BINARY),$(NMODL_BINARY), @CORENRN_NMODL_BINARY@) + INCLUDES += -I@CORENRN_NMODL_INCLUDE@ else - MOD2CPP_BINARY_PATH = $(if $(MOD2CPP_BINARY),$(MOD2CPP_BINARY), $(CORENRN_BIN_DIR)/@nmodl_binary_name@) + NMODL_BINARY_PATH = $(if $(NMODL_BINARY),$(NMODL_BINARY), $(CORENRN_BIN_DIR)/@nmodl_binary_name@) endif # MOD files with full path, without path and names without .mod extension @@ -145,15 +132,13 @@ ALL_OBJS = $(MOD_FUNC_OBJ) $(mod_cpp_objs) C_RESET := \033[0m C_GREEN := \033[32m -# Default nmodl flags. Override if MOD2CPP_RUNTIME_FLAGS is not empty -ifeq (@CORENRN_ENABLE_NMODL@, ON) - MOD2CPP_FLAGS_C = $(if $(MOD2CPP_RUNTIME_FLAGS),$(MOD2CPP_RUNTIME_FLAGS),$(nmodl_arguments_c)) -endif +# Default nmodl flags. Override if NMODL_RUNTIME_FLAGS is not empty +NMODL_FLAGS_C = $(if $(NMODL_RUNTIME_FLAGS),$(NMODL_RUNTIME_FLAGS),$(nmodl_arguments_c)) $(info Default NMODL flags: @nmodl_arguments_c@) -ifneq ($(MOD2CPP_RUNTIME_FLAGS),) - $(warning Runtime nmodl flags (they replace the default ones): $(MOD2CPP_RUNTIME_FLAGS)) +ifneq ($(NMODL_RUNTIME_FLAGS),) + $(warning Runtime nmodl flags (they replace the default ones): $(NMODL_RUNTIME_FLAGS)) endif # ======== MAIN BUILD RULES ============ @@ -200,7 +185,7 @@ $(MOD_OBJS_DIR)/%.o: $(MOD_TO_CPP_DIR)/%.cpp | $(MOD_OBJS_DIR) # translate MOD files to CPP using mod2c/NMODL $(mod_cpp_files): $(MOD_TO_CPP_DIR)/%.cpp: $(MODS_PATH)/%.mod | $(MOD_TO_CPP_DIR) - $(MOD2CPP_ENV_VAR) $(MOD2CPP_BINARY_PATH) $< -o $(MOD_TO_CPP_DIR)/ $(MOD2CPP_FLAGS_C) + $(NMODL_ENV_VAR) $(NMODL_BINARY_PATH) $< -o $(MOD_TO_CPP_DIR)/ $(NMODL_FLAGS_C) # generate mod registration function. Dont overwrite if it's not changed $(MOD_FUNC_CPP): build_always | $(MOD_TO_CPP_DIR) @@ -209,7 +194,7 @@ $(MOD_FUNC_CPP): build_always | $(MOD_TO_CPP_DIR) mv $(MOD_FUNC_CPP).tmp $(MOD_FUNC_CPP) # symlink to cpp files provided by coreneuron -$(MOD_TO_CPP_DIR)/%.cpp: $(CORENRN_SHARE_MOD2CPP_DIR)/%.cpp | $(MOD_TO_CPP_DIR) +$(MOD_TO_CPP_DIR)/%.cpp: $(CORENRN_SHARE_NMODL_DIR)/%.cpp | $(MOD_TO_CPP_DIR) ln -s $< $@ # create directories needed diff --git a/bin/nrnivmodl_makefile_cmake.in b/bin/nrnivmodl_makefile_cmake.in index 62733c5f51..0bbf3d9375 100644 --- a/bin/nrnivmodl_makefile_cmake.in +++ b/bin/nrnivmodl_makefile_cmake.in @@ -4,6 +4,8 @@ # UserLDFLAGS # UserINCFLAGS # LinkCoreNEURON +# UserNMODLBIN +# UserNMODLFLAGS # Rules to build MODOBJFILES from mod files are found in makemod2c_inc # Mechanisms version are by default 0.0, but should be overriden @@ -15,6 +17,7 @@ OUTPUT = . DESTDIR = UserINCFLAGS = UserLDFLAGS = +UserNMODLBIN = # install dirs bindir := ${ROOT}/@CMAKE_INSTALL_BINDIR@ @@ -40,29 +43,8 @@ endif # - @NRN_LINK_DEFS LDFLAGS = $(IV_LINK) $(READLINE_LINK) $(LINKFLAGS) $(UserLDFLAGS) @NRN_LINK_DEFS@ -# In GPU wheel distributions then the shipped libnrniv.so is linked against -# some NVIDIA runtime libraries that are shipped with the wheel. If we use -# nrnivmodl on the user machine then the NVIDIA compilers will link the local -# versions of these libraries too, causing duplication. -libnrniv_without_nvidia: $(libdir)/libnrniv.so - cp -v $(libdir)/libnrniv.so $(OUTPUT)/libnrniv-without-nvidia.so - patchelf $(OUTPUT)/libnrniv-without-nvidia.so --print-needed | grep '^libnv\(hpcatm\|omp\|cpumath\|cpumath-avx2\|c\)-[a-f0-9]\{8\}\.so' | xargs -t -r -n 1 patchelf $(OUTPUT)/libnrniv-without-nvidia.so --remove-needed - patchelf $(OUTPUT)/libnrniv-without-nvidia.so --set-soname libnrniv-without-nvidia.so - patchelf $(OUTPUT)/libnrniv-without-nvidia.so --print-rpath - ldd $(OUTPUT)/libnrniv-without-nvidia.so - -# In a GPU wheel build then we need to fudge libnrniv.so before linking to it. -# NEURONDEMO should be set when we run this as part of the wheel build, in -# which case we do *not* want this hack. -ifeq (@NRN_ENABLE_CORENEURON@@NRN_WHEEL_BUILD@@CORENRN_ENABLE_GPU@$(if $(NRNDEMO),OFF,ON), ONONONON) - NRNLIB_FLAGS = -L$(OUTPUT) -lnrniv-without-nvidia - NRNLIB_RPATH_FLAGS = -Wl,-rpath,\$$ORIGIN -Wl,-rpath,\$$ORIGIN/.. - nrn_lib = libnrniv_without_nvidia -else - NRNLIB_FLAGS = -L$(libdir) -lnrniv - NRNLIB_RPATH_FLAGS = -Wl,-rpath,$(libdir) - nrn_lib = -endif +NRNLIB_FLAGS = -L$(libdir) -lnrniv +NRNLIB_RPATH_FLAGS = -Wl,-rpath,$(libdir) OS_NAME := $(shell uname) _cm =, @@ -86,9 +68,13 @@ CFLAGS = @BUILD_TYPE_C_FLAGS@ @CMAKE_C_FLAGS@ @CMAKE_C17_STANDARD_COMPILE_OPTION CCOMPILE = $(CC) $(CFLAGS) @NRN_COMPILE_DEFS_STRING@ @NRN_COMPILE_FLAGS_STRING@ CXX_LINK_EXE = $(CXX) $(CXXFLAGS) @CMAKE_EXE_LINKER_FLAGS@ @NRN_LINK_FLAGS_STRING@ -CXX_LINK_SHARED = $(CXX) $(CXXFLAGS) @CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS@ @CMAKE_SHARED_LIBRARY_CXX_FLAGS@ @CMAKE_SHARED_LINKER_FLAGS@ +CXX_LINK_SHARED = $(CXX) $(CXXFLAGS) @CMAKE_SHARED_LIBRARY_CREATE_CXX_FLAGS@ @CMAKE_SHARED_LIBRARY_CXX_FLAGS@ @CMAKE_SHARED_LINKER_FLAGS@ @NRN_LINK_FLAGS_STRING@ +ifeq ($(UserNMODLBIN), ) NOCMODL = $(bindir)/nocmodl +else +NOCMODL = $(UserNMODLBIN) +endif NRNUNITS = $(datadir_lib)/nrnunits.lib # File path config (internal) @@ -142,13 +128,13 @@ special: $(mech_lib) $(mech_lib): $(mech_lib_type) -mech_lib_shared: mod_func.o $(mod_objs) $(nrn_lib) build_always +mech_lib_shared: mod_func.o $(mod_objs) build_always @printf " => $(C_GREEN)LINKING$(C_RESET) shared library $(mech_lib)\n" $(CXX_LINK_SHARED) -I $(incdir) -o ${mech_lib} ${_SONAME} \ $(mod_func_o) $(mod_objs) $(NRNLIB_FLAGS) $(NRNLIB_RPATH_FLAGS) $(LDFLAGS) rm -f $(OBJS_DIR)/.libs/libnrnmech.so ; mkdir -p $(OBJS_DIR)/.libs ; cp $(mech_lib) $(OBJS_DIR)/.libs/libnrnmech.so -mech_lib_static: mod_func.o $(mod_objs) $(nrn_lib) build_always +mech_lib_static: mod_func.o $(mod_objs) build_always @printf " => $(C_GREEN)LINKING$(C_RESET) static library $(mech_lib)\n" ar cq ${mech_lib} $(mod_func_o) $(mod_objs) $(cobjs); diff --git a/bin/nrnpyenv.sh b/bin/nrnpyenv.sh.in similarity index 71% rename from bin/nrnpyenv.sh rename to bin/nrnpyenv.sh.in index c6fa9b3b40..9294cd9e7a 100755 --- a/bin/nrnpyenv.sh +++ b/bin/nrnpyenv.sh.in @@ -5,7 +5,6 @@ # environment as python # May specify the python executable with explicit first argument. -# Without arg use python3 and if that does not exist then python # Overcome environment issues when --with-nrnpython=dynamic . @@ -14,7 +13,6 @@ # and not loading the correct python library. #Run python and generate the following on stdout -#export PYTHONHOME=... #export PYTHONPATH=... #export LD_LIBRARY_PATH=... #export PATH=... @@ -33,9 +31,21 @@ export originalPATH="$PATH" export originalPYTHONPATH="$PYTHONPATH" -export originalPYTHONHOME="$PYTHONHOME" export originalLDLIBRARYPATH="$LD_LIBRARY_PATH" +# list the Python versions that this NEURON build supports +# the format is {major}.{minor}, i.e. 3.8, 3.9, 3.10, ... +_this_neuron_py_versions=(@NRN_PYTHON_VERSIONS_STRING@) + +# order of preference is: -pyexe, $NRN_PYTHONEXE, python, python3, +# pythonX..pythonY, where X..Y are the versions in +# _this_neuron_py_versions. In this order, we accept the first one yielding a +# version we support +_pythons_to_try=(python python3) +for _ver_with_dot in "${_this_neuron_py_versions[@]}"; do + _pythons_to_try+=("python${_ver_with_dot}") +done + # get the last argument for last; do true; done @@ -47,10 +57,8 @@ export PATH=/cygdrive/${last//:/}/mingw/usr/bin:/cygdrive/${last//:/}/mingw/ming set -- "${@:1:$(($#-1))}" fi -if test "$PYTHONHOME" != "" ; then - echo "# Ignoring existing PYTHONHOME=$PYTHONHOME." - unset PYTHONHOME -fi +# The first argument passed to this script is the value of nrniv -pyexe +pyexe_arg="$1" WHICH=which @@ -75,98 +83,141 @@ function trypy { fi } -# On some windows systems python is an empty executable which, when -# launched in a Command Prompt, directs the user to the Microsoft Store. -# With bash, it returns a 128 exit status. So we loop until we -# find a working python (or no python). Each time a python is non-working -# we remove that path from the PATH. If not Windows, break out after first -# attempt at finding a Python. -while true ; do - PYTHON="" - # Priority is the argument -pyexe, NRN_PYTHONEXE, python3 and then python - - # check -pyexe option - if test "$1" != "" ; then - if $WHICH "$1" >& /dev/null ; then - PYTHON="$1" +unset PYTHON +unset PYTHON_VERSION +function try_python { + cmd_name="$1" + if [ -z "${cmd_name}" ]; then + return 1 + fi + ver_and_path=$("${cmd_name}" -c "import sys; print('{}.{} {}'.format(*sys.version_info[:2], sys.executable))" 2>&1) + code="$?" + if [ $code -ne 0 ]; then + echo "# failed to run ${cmd_name} (${ver_and_path})" + if [ $code -eq 128 ]; then + PYTHON_COMMANDS_THAT_RETURNED_CODE_128+=("${cmd_name}") fi - # If NRN_PYTHONEXE is set (e.g. from wheel wrapper) then use it - elif test "$NRN_PYTHONEXE" != ""; then - PYTHON=$NRN_PYTHONEXE - elif $WHICH python3 >& /dev/null ; then - PYTHON=python3 - elif $WHICH python >& /dev/null ; then - PYTHON=python + return 1 fi - - # do not do the following craziness if not Windows. - if test "$OS" != "Windows_NT" ; then - break + full_path=${ver_and_path#* } + version=${ver_and_path%" ${full_path}"} + if [[ ! " ${_this_neuron_py_versions[*]} " =~ " ${version} " ]]; then + echo "# ran ${cmd_name} (${full_path}) but Python ${version} is not supported by this NEURON installation (supported: ${_this_neuron_py_versions[*]})." + return 1 fi + PYTHON="${full_path}" + PYTHON_VERSION="${version}" +} - if test "$PYTHON" == "" ; then - break - else - if $PYTHON -c 'quit()' >& /dev/null ; then #working - break - else # remove from PATH - oldpath="$PATH" - a="`$WHICH $PYTHON`" - b="`dirname \"$a\"`" - PATH="`echo \"$PATH\" | sed \"s,:$b:,:,\"`" #remove b from path if internal - PATH="`echo \"$PATH\" | sed \"s,^$b:,,\"`" #remove b from path if begin - PATH="`echo \"$PATH\" | sed \"s,:$b\$,\",`" #remove b from path if end - export PATH - if test "$oldpath" = "$PATH" ; then - echo "\"$b\", that contained a failing Python, did not get removed from PATH=\"$PATH\"" 1>&2 - exit 1 - fi - fi +# If either -pyexe or NRN_PYTHONEXE was set, it is an immediate hard error if +# they do not point to a a valid Python +_explicit_pythons_to_try=("${pyexe_arg}" "${NRN_PYTHONEXE}") +for _python in "${_explicit_pythons_to_try[@]}"; do + if [ -z "${_python}" ]; then + # don't bother distinguishing between "not set" and "set to empty string" + continue + fi + if ! try_python "${_python}"; then + echo "Given the explicit instructions:" + echo " -pyexe=${pyexe_arg}" + echo " NRN_PYTHONEXE=${NRN_PYTHONEXE}" + echo "we determined that '${_python}' is not valid." + echo "Because this was an explicit request, this script is returning an" + echo "error code instead of falling back to other search strategies..." + exit 1 fi done -if test "$PYTHON" = "" ; then - # Often people install Anaconda on Windows without adding it to PATH - if test "$OS" = "Windows_NT" -a "$APPDATA" != "" ; then - smenu="$APPDATA/Microsoft/Windows/Start Menu/Programs" - if test "$PYTHON" = "" ; then - trypy "$smenu" "Anaconda3 (64-bit)" "Anaconda Prompt (anaconda3).lnk" activate.bat - # Anaconda3 2020 may need more PATH for numpy to work. - if test "$PYTHON" != "" ; then - if ! $PYTHON -c 'import numpy' >& /dev/null ; then - # first item added in trypy - a="`echo $PATH | sed 's/:.*//'`" - export PATH="$PATH:$a/Library/mingw-w64/bin:$a/Library/usr/bin:$a/Library/bin:$a/Scripts:$a/bin:$a/condabin" - # Actually get this PATH when scripts do a -- eval "`nrnpyenv.sh`" - echo "export PATH=\"$PATH\"" - fi +# Fall back to PATH-based searches if this explicit approach didn't work +if [ -z "${PYTHON}" ]; then + # On some windows systems python is an empty executable which, when + # launched in a Command Prompt, directs the user to the Microsoft Store. + # With bash, it returns a 128 exit status. So we loop until we + # find a working python (or no python). Each time a python is non-working + # we remove that path from the PATH. If not Windows, break out after first + # attempt at finding a Python. + while true ; do + # _pythons_to_try is a list of command names to be looked up in $PATH + PYTHON_COMMANDS_THAT_RETURNED_CODE_128=() # hack for Windows, see below + for _python in "${_pythons_to_try[@]}"; do + if try_python "${_python}"; then + break 2 # break out of the inner `for` and the outer `while` fi + done + + # do not do the following craziness if not Windows. + if test "$OS" != "Windows_NT" ; then + break fi - if test "$PYTHON" = "" ; then - trypy "$smenu" Anaconda3 "Anaconda Prompt.lnk" activate.bat + + if [ ${#PYTHON_COMMANDS_THAT_RETURNED_CODE_128[@]} -eq 0 ]; then + # Don't bother messing with PATH if we didn't get any of the 128 status + # codes referred to above + break fi - if test "$PYTHON" = "" ; then - trypy "$smenu" Anaconda2 "Anaconda Prompt.lnk" activate.bat + # try and remove from PATH the location of the first command we tried that + # returned code 128 + echo "# ${PYTHON_COMMANDS_THAT_RETURNED_CODE_128[@]} returned code 128" + oldpath="${PATH}" + a=$($WHICH "${PYTHON_COMMANDS_THAT_RETURNED_CODE_128[0]}") + b=$(dirname "$a") + echo "# trying to remove ${b} from the PATH" + PATH="`echo \"$PATH\" | sed \"s,:$b:,:,\"`" #remove b from path if internal + PATH="`echo \"$PATH\" | sed \"s,^$b:,,\"`" #remove b from path if begin + PATH="`echo \"$PATH\" | sed \"s,:$b\$,\",`" #remove b from path if end + export PATH + if [ "$oldpath" = "$PATH" ]; then + echo "\"$b\", that contained a failing Python, did not get removed from PATH=\"$PATH\"" 1>&2 + exit 1 fi - if test "$PYTHON" = "" ; then - trypy "$smenu" Anaconda "Anaconda Prompt.lnk" activate.bat + unset PYTHON_COMMANDS_THAT_RETURNED_CODE_128 + done +fi + +# Searching PATH didn't work; there are even more hacks to try on Windows +if [ -z "${PYTHON}" -a "${OS}" = "Windows_NT" -a -n "${APPDATA}" ]; then + # Often people install Anaconda on Windows without adding it to PATH + smenu="${APPDATA}/Microsoft/Windows/Start Menu/Programs" + trypy "${smenu}" "Anaconda3 (64-bit)" "Anaconda Prompt (anaconda3).lnk" activate.bat + # Anaconda3 2020 may need more PATH for numpy to work. + if test "$PYTHON" != "" ; then + if ! $PYTHON -c 'import numpy' >& /dev/null ; then + # first item added in trypy + a="`echo $PATH | sed 's/:.*//'`" + export PATH="$PATH:$a/Library/mingw-w64/bin:$a/Library/usr/bin:$a/Library/bin:$a/Scripts:$a/bin:$a/condabin" + # Actually get this PATH when scripts do a -- eval "`nrnpyenv.sh`" + echo "export PATH=\"$PATH\"" fi - if test "$PYTHON" = "" ; then #brittle but try Enthought - a=`cygpath -U "$APPDATA/../local/enthought/canopy/edm/envs/user"` - if test -d "$a" ; then - export PATH="$a":"$PATH" - PYTHON=python - fi + fi + if test "$PYTHON" = "" ; then + trypy "$smenu" Anaconda3 "Anaconda Prompt.lnk" activate.bat + fi + if test "$PYTHON" = "" ; then + trypy "$smenu" Anaconda2 "Anaconda Prompt.lnk" activate.bat + fi + if test "$PYTHON" = "" ; then + trypy "$smenu" Anaconda "Anaconda Prompt.lnk" activate.bat + fi + if test "$PYTHON" = "" ; then #brittle but try Enthought + a=`cygpath -U "$APPDATA/../local/enthought/canopy/edm/envs/user"` + if test -d "$a" ; then + export PATH="$a":"$PATH" + PYTHON=python fi fi + if [ -n "${PYTHON}" -a -z "${PYTHON_VERSION}"]; then + # In case one of the last-resort Windows hacks worked + PYTHON_VERSION=$("${PYTHON}" -c "import sys; print(\"{}.{}\".format(*sys.version_info[:2]))") + fi fi if test "$PYTHON" = "" ; then - echo "Cannot find executable python3 or python" 1>&2 + echo "Cannot find a Python in ${_pythons_to_try[@]} that matches the versions this NEURON installation supports: ${_this_neuron_py_versions[@]}" 1>&2 exit 1; fi -echo "# PYTHON=`$WHICH $PYTHON`" +echo "export NRN_PYTHONEXE=\"${PYTHON}\"" +echo "export NRN_PYTHONVERSION=\"${PYTHON_VERSION}\"" # what is the python library for Darwin nrnpylib_provenance="" @@ -555,9 +606,6 @@ if "darwin" in sys.platform or "linux" in sys.platform or "win" in sys.platform: print("\n#NRN_PYLIB provenance: " + str(nrnpylib_provenance)) print ("\n# if launch nrniv, then likely need:") - if pythonhome: - pythonhome=u2d(pythonhome) - print ("export NRN_PYTHONHOME=" + dq + pythonhome + dq) if ldpath and nrn_pylib is None: print ("export LD_LIBRARY_PATH=" + dq + ldpath + upathsep + "$LD_LIBRARY_PATH" + dq) if nrn_pylib is not None: diff --git a/bldnrnmacpkgcmake.sh b/bldnrnmacpkgcmake.sh index e29c3f7738..2f0a56af8d 100644 --- a/bldnrnmacpkgcmake.sh +++ b/bldnrnmacpkgcmake.sh @@ -1,7 +1,7 @@ #!/usr/bin/env bash set -ex -default_pythons="python3.8 python3.9 python3.10" +default_pythons="python3.8 python3.9 python3.10 python3.11" # distribution built with # bash bldnrnmacpkgcmake.sh # without args, default are the pythons above. @@ -11,6 +11,10 @@ default_pythons="python3.8 python3.9 python3.10" # All pythons must have the same macos version and that will become # the MACOSX_DEPLOYMENT_TARGET +# On my machine, to build nrn-x.x.x-macosx-10.9-universal2-py-38-39-310-311.pkg +# I built my own versions of 3.8 in $HOME/soft/python3.8, and +export PATH=$HOME/soft/python3.8/bin:$PATH + CPU=`uname -m` universal="yes" # changes to "no" if any python not universal @@ -45,11 +49,15 @@ if test "$archs" != "universal2" ; then universal=no fi +# Arrgh. Recent changes to nrn source require at least 10.15! +macosver=10.15 +mac_platform=macosx-$macosver-$archs + export MACOSX_DEPLOYMENT_TARGET=$macosver echo "MACOSX_DEPLOYMENT_TARGET=$MACOSX_DEPLOYMENT_TARGET" if test "$NRN_SRC" == "" ; then - NRN_SRC=$HOME/neuron/nrn + NRN_SRC=`pwd` fi NRN_BLD=$NRN_SRC/build NSRC=$NRN_SRC @@ -64,7 +72,7 @@ mkdir -p $NRN_BLD rm -r -f $NRN_BLD/* cd $NRN_BLD -PYVS="py" # will be part of package file name, eg. py-37-38-39-310 +PYVS="py" # will be part of package file name, eg. py-38-39-310-311 pythons="" # will be arg value of NRN_PYTHON_DYNAMIC for i in $args ; do PYVER=`$i -c 'from sys import version_info as v ; print (str(v.major) + str(v.minor)); quit()'` @@ -83,7 +91,7 @@ fi # from brew install gedit). User installations are expected to have the # former and would only accidentally have the latter. -cmake .. -DCMAKE_INSTALL_PREFIX=$NRN_INSTALL \ +cmake .. -G Ninja -DCMAKE_INSTALL_PREFIX=$NRN_INSTALL \ -DNRN_ENABLE_MPI_DYNAMIC=ON \ -DPYTHON_EXECUTABLE=`which python3` -DNRN_ENABLE_PYTHON_DYNAMIC=ON \ -DNRN_PYTHON_DYNAMIC="$pythons" \ @@ -94,7 +102,7 @@ cmake .. -DCMAKE_INSTALL_PREFIX=$NRN_INSTALL \ -DCMAKE_PREFIX_PATH=/usr/X11 \ -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ -make -j install +ninja install if test "$universal" = "yes" ; then _temp="`lipo -archs $NRN_INSTALL/share/nrn/demo/release/$CPU/special`" @@ -121,32 +129,29 @@ chk () { ) } +# test basic functionality +for i in $args ; do + chk $i +done + #/Applications/Packages.app from # http://s.sudre.free.fr/Software/Packages/about.html # For mac to do a productsign, need my developerID_installer.cer # and Neurondev.p12 file. To add to the keychain, double click each # of those files. By default, I added my certificates to the login keychain. -make macpkg # will sign the binaries, construct below - # mentioned PACKAGE_FILE_NAME, and request notarization from - # Apple. At the end it will print a stapling request that you - # should run manually after receiving a "success" email from - # Apple. +ninja macpkg # will sign the binaries, construct below + # mentioned PACKAGE_FILE_NAME, request notarization from + # Apple, and staple the package. -# test basic functionality -for i in $args ; do - chk $i -done - -# upload package to neuron.yale.edu -ALPHADIR='hines@neuron.yale.edu:/home/htdocs/ftp/neuron/versions/alpha' +# Copy the package to $HOME/$PACKAGE_FULL_NAME +# You should then manually upload that to github. describe="`sh $NRN_SRC/nrnversion.sh describe`" macos=macos${MACOSX_DEPLOYMENT_TARGET} PACKAGE_FULL_NAME=nrn-${describe}-${mac_platform}-${PYVS}.pkg -PACKATE_DOWNLOAD_NAME=$ALPHADIR/$PACKAGE_FULL_NAME PACKAGE_FILE_NAME=$NRN_BLD/src/mac/build/NEURON.pkg + +cp $PACKAGE_FILE_NAME $HOME/$PACKAGE_FULL_NAME + echo " - Until we figure out how to automatically staple the notarization - the following two commands must be executed manually. - xcrun stapler staple $PACKAGE_FILE_NAME - cp $PACKAGE_FILE_NAME $HOME/$PACKAGE_FULL_NAME + Manually upload $HOME/$PACKAGE_FULL_NAME to github " diff --git a/build_osx_wheels.sh b/build_osx_wheels.sh new file mode 100755 index 0000000000..7633361710 --- /dev/null +++ b/build_osx_wheels.sh @@ -0,0 +1,46 @@ +#!/usr/bin/env bash + +# This script assumes that you have the prerequisites already installed +# See https://nrn.readthedocs.io/en/latest/install/python_wheels.html#installing-macos-prerequisites +# +# If you want the script to check the nrniv output (for version string checking), set the following environment variable +# Note that this will require manual intervention during script execution +# +# export INTERACTIVE_OK=yes + +set -x + +export BREW_PREFIX=$(brew --prefix) +export PATH=/opt/homebrew/opt/bison/bin:/opt/homebrew/opt/flex/bin:$PATH + +export NRN_RELEASE_UPLOAD=false +export NRN_NIGHTLY_UPLOAD=false +export NEURON_NIGHTLY_TAG="" + +export SKIP_EMBEDED_PYTHON_TEST=true + +if [ -n $1 ] +then + # example: 9.0a + export SETUPTOOLS_SCM_PRETEND_VERSION=$1 +fi + +set -e + +packaging/python/build_wheels.bash osx 3.9 coreneuron &>3.9-output +packaging/python/build_wheels.bash osx 3.10 coreneuron &>3.10-output +packaging/python/build_wheels.bash osx 3.11 coreneuron &>3.11-output + +if [ -n $INTERACTIVE_OK ] +then + for py in 9 10 11 + do + python3.${py} -m venv venv3.${py} + venv3.${py}/bin/pip install wheelhouse/NEURON-${1}-cp3${py}-cp3${py}-macosx_11_0_arm64.whl + venv3.${py}/bin/nrniv + done +fi + +bash packaging/python/test_wheels.sh python3.9 wheelhouse/NEURON-*-cp39*.whl true &>3.9-test-output +bash packaging/python/test_wheels.sh python3.10 wheelhouse/NEURON-*-cp310*.whl true &>3.10-test-output +bash packaging/python/test_wheels.sh python3.11 wheelhouse/NEURON-*-cp311*.whl true &>3.11-test-output diff --git a/ci/azure-wheel-test-upload.yml b/ci/azure-wheel-test-upload.yml index 5f93d30593..c62902a706 100644 --- a/ci/azure-wheel-test-upload.yml +++ b/ci/azure-wheel-test-upload.yml @@ -24,43 +24,21 @@ steps: - task: TwineAuthenticate@1 inputs: pythonUploadServiceConnection: AzureNeuronPypiNightly - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), ne(variables.GPU_BUILD, 'true'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) + condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) - script: | python -m pip install twine python -m twine upload --verbose --skip-existing -r NeuronPypiNightly --config-file $(PYPIRC_PATH) wheelhouse/*.whl - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), ne(variables.GPU_BUILD, 'true'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) + condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) displayName: 'Upload nightly wheel to pypi.org' - - task: TwineAuthenticate@1 - inputs: - pythonUploadServiceConnection: AzureNeuronGpuPypiNightly - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), eq(variables.GPU_BUILD, 'true'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) - - - script: | - python -m pip install twine - python -m twine upload --verbose --skip-existing -r NeuronGpuPypiNightly --config-file $(PYPIRC_PATH) wheelhouse/*.whl - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule'), eq(variables.GPU_BUILD, 'true'), ne(variables['NRN_NIGHTLY_UPLOAD'], 'false'), eq(variables['Build.SourceBranch'], 'refs/heads/master')) - displayName: 'Upload nightly GPU wheel to pypi.org' - - task: TwineAuthenticate@1 inputs: pythonUploadServiceConnection: AzureNeuronPypi - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), ne(variables.GPU_BUILD, 'true'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) + condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) - script: | python -m pip install twine python -m twine upload --verbose --skip-existing -r NeuronPypi --config-file $(PYPIRC_PATH) wheelhouse/*.whl - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), ne(variables.GPU_BUILD, 'true'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) + condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) displayName: 'Upload release wheel to pypi.org' - - - task: TwineAuthenticate@1 - inputs: - pythonUploadServiceConnection: AzureNeuronGpuPypi - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), eq(variables.GPU_BUILD, 'true'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) - - - script: | - python -m pip install twine - python -m twine upload --verbose --skip-existing -r NeuronGpuPypiProj --config-file $(PYPIRC_PATH) wheelhouse/*.whl - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual'), eq(variables.GPU_BUILD, 'true'), eq(variables['NRN_RELEASE_UPLOAD'], 'true')) - displayName: 'Upload release GPU wheel to pypi.org' diff --git a/ci/azure-win-installer-upload.yml b/ci/azure-win-installer-upload.yml deleted file mode 100644 index acfade5821..0000000000 --- a/ci/azure-win-installer-upload.yml +++ /dev/null @@ -1,44 +0,0 @@ -steps: - - - task: PublishBuildArtifacts@1 - inputs: - pathToPublish: '$(Build.SourcesDirectory)\nrn-nightly-AMD64.exe' - displayName: 'Publish windows installer as build artifact' - - - task: UsePythonVersion@0 - inputs: - versionSpec: '3.8' - displayName: "Use System Python" - - - task: BatchScript@1 - inputs: - filename: ci/win_install_neuron.cmd - displayName: "Run Installer and launch .hoc association test" - condition: succeeded() - - - task: BatchScript@1 - inputs: - filename: ci/win_test_installer.cmd - displayName: "Test Installer" - condition: succeeded() - - - task: GithubRelease@0 - inputs: - gitHubConnection: neuronsimulator-installers - repositoryName: neuronsimulator/installers - action: edit - # note : if previous release doesn't exist, as we are pushing - # to different repository, we must need to provide commit id. - target: '9d7b1e26717bd207f7ae6114f78f7ab7b958c998' - tagSource: manual - tag: nightly - title: 'Nightly NEURON Developer Snapshot' - releaseNotesSource: input - releaseNotes: "Last NEURON Commit: $(Build.SourceVersionMessage)" - isPreRelease: true - assetUploadMode: replace - addChangeLog: false - assets: | - $(Build.SourcesDirectory)\nrn-nightly-AMD64.exe - displayName: 'Upload installer to GitHub' - condition: and(succeeded(), in(variables['Build.Reason'], 'Manual', 'Schedule')) diff --git a/ci/win_build_cmake.sh b/ci/win_build_cmake.sh index 94844089f4..c3c84ad37e 100755 --- a/ci/win_build_cmake.sh +++ b/ci/win_build_cmake.sh @@ -9,6 +9,9 @@ export MINGW_CHOST=x86_64-w64-mingw32 export MSYSTEM_PREFIX=/mingw64 export PATH=/mingw64/bin:$PATH +# have compatible cython3 +python3 -m pip install "cython<3" + # if BUILD_SOURCESDIRECTORY not available, use te root of the repo if [ -z "$BUILD_SOURCESDIRECTORY" ]; then export BUILD_SOURCESDIRECTORY=$(git rev-parse --show-toplevel) @@ -26,9 +29,10 @@ cd $BUILD_SOURCESDIRECTORY/build -DNRN_ENABLE_PYTHON=ON \ -DNRN_ENABLE_RX3D=ON \ -DNRN_RX3D_OPT_LEVEL=2 \ + -DNRN_BINARY_DIST_BUILD=ON \ -DPYTHON_EXECUTABLE=/c/Python38/python.exe \ -DNRN_ENABLE_PYTHON_DYNAMIC=ON \ - -DNRN_PYTHON_DYNAMIC='c:/Python38/python.exe;c:/Python39/python.exe;c:/Python310/python.exe;c:/Python311/python.exe' \ + -DNRN_PYTHON_DYNAMIC='c:/Python38/python.exe;c:/Python39/python.exe;c:/Python310/python.exe;c:/Python311/python.exe;c:/Python312/python.exe' \ -DCMAKE_INSTALL_PREFIX='/c/nrn-install' \ -DMPI_CXX_LIB_NAMES:STRING=msmpi \ -DMPI_C_LIB_NAMES:STRING=msmpi \ diff --git a/ci/win_download_deps.cmd b/ci/win_download_deps.cmd index fc58ed4254..a006ffc0d2 100644 --- a/ci/win_download_deps.cmd +++ b/ci/win_download_deps.cmd @@ -7,6 +7,7 @@ pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.8.exe htt pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.9.exe https://www.python.org/ftp/python/3.9.0/python-3.9.0-amd64.exe || goto :error pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.10.exe https://www.python.org/ftp/python/3.10.0/python-3.10.0-amd64.exe || goto :error pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.11.exe https://www.python.org/ftp/python/3.11.1/python-3.11.1-amd64.exe || goto :error +pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile python-3.12.exe https://www.python.org/ftp/python/3.12.1/python-3.12.1-amd64.exe || goto :error :: mpi pwsh -command Invoke-WebRequest -MaximumRetryCount 4 -OutFile msmpisetup.exe https://download.microsoft.com/download/a/5/2/a5207ca5-1203-491a-8fb8-906fd68ae623/msmpisetup.exe || goto :error diff --git a/ci/win_install_deps.cmd b/ci/win_install_deps.cmd index 79a32119df..2284b911c0 100644 --- a/ci/win_install_deps.cmd +++ b/ci/win_install_deps.cmd @@ -7,6 +7,7 @@ python-3.8.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustFo python-3.9.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python39 || goto :error python-3.10.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python310 || goto :error python-3.11.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python311 || goto :error +python-3.12.exe /passive Include_pip=1 Include_test=0 PrependPath=1 DefaultJustForMeTargetDir=C:\Python312 || goto :error :: fix msvcc version for all python3 pwsh -command "(Get-Content C:\Python38\Lib\distutils\cygwinccompiler.py) -replace 'elif msc_ver == ''1600'':', 'elif msc_ver == ''1916'':' | Out-File C:\Python38\Lib\distutils\cygwinccompiler.py" @@ -22,10 +23,12 @@ pwsh -command "(Get-Content C:\Python310\Lib\distutils\cygwinccompiler.py) -repl pwsh -command "(Get-Content C:\Python311\Lib\distutils\cygwinccompiler.py) -replace 'msvcr100', 'msvcrt' | Out-File C:\Python311\Lib\distutils\cygwinccompiler.py" :: install numpy -C:\Python38\python.exe -m pip install numpy==1.17.5 cython || goto :error -C:\Python39\python.exe -m pip install numpy==1.19.3 cython || goto :error -C:\Python310\python.exe -m pip install numpy==1.21.3 cython || goto :error -C:\Python311\python.exe -m pip install numpy==1.23.5 cython || goto :error +C:\Python38\python.exe -m pip install numpy==1.17.5 "cython < 3" || goto :error +C:\Python39\python.exe -m pip install numpy==1.19.3 "cython < 3" || goto :error +C:\Python310\python.exe -m pip install numpy==1.21.3 "cython < 3" || goto :error +C:\Python311\python.exe -m pip install numpy==1.23.5 "cython < 3" || goto :error +C:\Python312\python.exe -m pip install numpy==1.26.3 "cython < 3" || goto :error +C:\Python312\python.exe -m pip install setuptools || goto :error :: install nsis nsis-3.05-setup.exe /S || goto :error @@ -64,7 +67,6 @@ mingw-w64-x86_64-ninja ^ mingw-w64-x86_64-ncurses ^ mingw-w64-x86_64-readline ^ mingw-w64-x86_64-python3 ^ -mingw64/mingw-w64-x86_64-cython ^ mingw-w64-x86_64-python3-setuptools ^ mingw-w64-x86_64-python3-packaging ^ mingw-w64-x86_64-python3-pip ^ diff --git a/ci/win_install_neuron.cmd b/ci/win_install_neuron.cmd index 890622568a..fcbf662ff0 100644 --- a/ci/win_install_neuron.cmd +++ b/ci/win_install_neuron.cmd @@ -7,4 +7,4 @@ tree /F C:\nrn_test\lib\python :: Test of association with hoc files. This test is very tricky to handle. We do it in two steps :: 1st step -> launch association.hoc here and test the output in another step -start /wait /REALTIME %cd%\ci\association.hoc +start /B /wait /REALTIME %cd%\ci\association.hoc diff --git a/ci/win_test_installer.cmd b/ci/win_test_installer.cmd index 136fdba9a5..e6a5ab967f 100644 --- a/ci/win_test_installer.cmd +++ b/ci/win_test_installer.cmd @@ -17,15 +17,15 @@ echo %NEURONHOME% if not exist association.hoc.out (start /wait /REALTIME %cd%\ci\association.hoc) :: test all pythons -C:\Python38\python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()" || set "errorfound=y" -C:\Python39\python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()" || set "errorfound=y" -C:\Python310\python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()" || set "errorfound=y" -C:\Python311\python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()" || set "errorfound=y" +C:\Python38\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" +C:\Python39\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" +C:\Python310\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" +C:\Python311\python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" :: install numpy dependency python -m pip install numpy :: run also using whatever is system python python --version -python -c "import neuron; neuron.test(); neuron.test_rxd(); quit()" || set "errorfound=y" +python -c "import neuron; neuron.test(); quit()" || set "errorfound=y" :: test python and nrniv python -c "from neuron import h; s = h.Section(); s.insert('hh'); quit()" || set "errorfound=y" @@ -51,8 +51,8 @@ call nrnivmodl echo "nrnivmodl successfull" python -c "import neuron; from neuron import h; s = h.Section(); s.insert('cacum'); print('cacum inserted'); quit()" || set "errorfound=y" -:: text rxd -python share\lib\python\neuron\rxdtests\run_all.py || set "errorfound=y" +:: text rxd, disable until #2585 is fixed +:: python share\lib\python\neuron\rxdtests\run_all.py || set "errorfound=y" :: Test of association with hoc files. This test is very tricky to handle. We do it in two steps. :: 2nd step -> check association.hoc output after we've launched 1step in previous CI step diff --git a/ci_requirements.txt b/ci_requirements.txt new file mode 100644 index 0000000000..f6bd81c65b --- /dev/null +++ b/ci_requirements.txt @@ -0,0 +1,2 @@ +plotly +ipywidgets>=7.0.0 diff --git a/cmake/BuildOptionDefaults.cmake b/cmake/BuildOptionDefaults.cmake index 18a5b99a6e..e3e1f3dd63 100644 --- a/cmake/BuildOptionDefaults.cmake +++ b/cmake/BuildOptionDefaults.cmake @@ -21,13 +21,14 @@ set(NRN_ENABLE_MODEL_TESTS_DEFAULT "") set(NRN_ENABLE_PERFORMANCE_TESTS_DEFAULT ON) set(NRN_ENABLE_MODULE_INSTALL_DEFAULT ON) set(NRN_ENABLE_PYTHON_DYNAMIC_DEFAULT OFF) +set(NRN_LINK_AGAINST_PYTHON_DEFAULT ${MINGW}) set(NRN_ENABLE_MPI_DYNAMIC_DEFAULT OFF) set(NRN_ENABLE_MOD_COMPATIBILITY_DEFAULT OFF) set(NRN_ENABLE_REL_RPATH_DEFAULT OFF) set(NRN_AVOID_ABSOLUTE_PATHS_DEFAULT OFF) -set(NRN_DYNAMIC_UNITS_USE_LEGACY_DEFAULT OFF) set(NRN_NMODL_CXX_FLAGS_DEFAULT "-O0") set(NRN_SANITIZERS_DEFAULT "") +set(NRN_ENABLE_MATH_OPT_DEFAULT OFF) # Some distributions may set the prefix. To avoid errors, unset it set(NRN_PYTHON_DYNAMIC_DEFAULT "") @@ -43,7 +44,7 @@ set(PYTHON_EXECUTABLE_DEFAULT "") set(IV_LIB_DEFAULT "") # For wheel deployment -set(NRN_WHEEL_BUILD_DEFAULT OFF) +set(NRN_BINARY_DIST_BUILD_DEFAULT OFF) set(NRN_WHEEL_STATIC_READLINE_DEFAULT OFF) # we add some coreneuron options in order to check support like GPU @@ -62,11 +63,11 @@ set(NRN_OPTION_NAME_LIST NRN_ENABLE_MODEL_TESTS NRN_ENABLE_MODULE_INSTALL NRN_ENABLE_PYTHON_DYNAMIC + NRN_LINK_AGAINST_PYTHON NRN_ENABLE_MPI_DYNAMIC NRN_MODULE_INSTALL_OPTIONS NRN_PYTHON_DYNAMIC NRN_MPI_DYNAMIC - NRN_DYNAMIC_UNITS_USE_LEGACY NRN_RX3D_OPT_LEVEL NRN_SANITIZERS CMAKE_BUILD_TYPE diff --git a/cmake/CMakeListsNrnMech.cmake b/cmake/CMakeListsNrnMech.cmake index 38ea5084c8..6d53842273 100644 --- a/cmake/CMakeListsNrnMech.cmake +++ b/cmake/CMakeListsNrnMech.cmake @@ -23,48 +23,60 @@ endif() # Interview might have linked to libnrniv but we don't want to link to special list(REMOVE_ITEM NRN_LINK_LIBS "interviews") -# CMake does some magic to transform sys libs to -l. We replicate it -foreach(link_lib ${NRN_LINK_LIBS}) - # skip static readline library as it will be linked to nrniv (e.g. with wheel) also stub libraries - # from OSX can be skipped - if("${link_lib}" MATCHES "(libreadline.a|/*.tbd)") - continue() - endif() +function(get_link_libraries libs) + # CMake does some magic to transform sys libs to -l. We replicate it + foreach(link_lib ${libs}) + # skip static readline library as it will be linked to nrniv (e.g. with wheel) also stub + # libraries from OSX can be skipped + if("${link_lib}" MATCHES "(libreadline.a|/*.tbd)") + continue() + endif() - get_filename_component(dir_path ${link_lib} DIRECTORY) - if(TARGET ${link_lib}) - get_property( - link_flag - TARGET ${link_lib} - PROPERTY INTERFACE_LINK_LIBRARIES) - set(description - "Extracting link flags from target '${link_lib}', beware that this can be fragile.") - # Not use it yet because it can be generator expressions get_property(compile_flag TARGET - # ${link_lib} PROPERTY INTERFACE_COMPILE_OPTIONS) string(APPEND NRN_COMPILE_DEFS - # ${compile_flag}) - elseif(NOT dir_path) - set(link_flag "-l${link_lib}") - set(description - "Generating link flags from name '${link_lib}', beware that this can be fragile.") - # avoid library paths from special directory /nrnwheel which used to build wheels under docker - # container - elseif("${dir_path}" MATCHES "^/nrnwheel") - continue() - elseif("${dir_path}" MATCHES "^(/lib|/lib64|/usr/lib|/usr/lib64)$") - # NAME_WLE not avaialble with CMake version < 3.14 - get_filename_component(libname ${link_lib} NAME) - string(REGEX REPLACE "\\.[^.]*$" "" libname_wle ${libname}) - string(REGEX REPLACE "^lib" "" libname_wle ${libname_wle}) - set(link_flag "-l${libname_wle}") - set(description - "Extracting link flags from path '${link_lib}', beware that this can be fragile.") - else() - set(link_flag "${link_lib} -Wl,-rpath,${dir_path}") - set(description "Generating link flags from path ${link_lib}") - endif() - message(NOTICE "${description} Got: ${link_flag}") - string(APPEND NRN_LINK_DEFS " ${link_flag}") -endforeach() + get_filename_component(dir_path ${link_lib} DIRECTORY) + if(TARGET ${link_lib}) + get_property( + sublink_flag + TARGET ${link_lib} + PROPERTY INTERFACE_LINK_LIBRARIES) + set(description + "Extracting link flags from target '${link_lib}', beware that this can be fragile.") + # Not use it yet because it can be generator expressions get_property(compile_flag TARGET + # ${link_lib} PROPERTY INTERFACE_COMPILE_OPTIONS) string(APPEND NRN_COMPILE_DEFS + # ${compile_flag}) + foreach(sublink_lib ${sublink_flag}) + if(TARGET ${sublink_lib}) + message(NOTICE "For '${link_lib}' going to see TARGET '${sublink_lib}' recursively.") + get_link_libraries(${sublink_lib}) + else() + set(link_flag "${link_flag} ${sublink_flag}") + endif() + endforeach() + elseif(NOT dir_path) + set(link_flag "-l${link_lib}") + set(description + "Generating link flags from name '${link_lib}', beware that this can be fragile.") + # avoid library paths from special directory /nrnwheel which used to build wheels under docker + # container + elseif("${dir_path}" MATCHES "^/nrnwheel") + continue() + elseif("${dir_path}" MATCHES "^(/lib|/lib64|/usr/lib|/usr/lib64)$") + # NAME_WLE not avaialble with CMake version < 3.14 + get_filename_component(libname ${link_lib} NAME) + string(REGEX REPLACE "\\.[^.]*$" "" libname_wle ${libname}) + string(REGEX REPLACE "^lib" "" libname_wle ${libname_wle}) + set(link_flag "-l${libname_wle}") + set(description + "Extracting link flags from path '${link_lib}', beware that this can be fragile.") + else() + set(link_flag "${link_lib} -Wl,-rpath,${dir_path}") + set(description "Generating link flags from path ${link_lib}") + endif() + message(NOTICE "${description} Got: ${link_flag}") + string(APPEND NRN_LINK_DEFS " ${link_flag}") + endforeach() +endfunction(get_link_libraries) + +get_link_libraries("${NRN_LINK_LIBS}") # Compiler flags depending on cmake build type from BUILD_TYPE__FLAGS string(TOUPPER "${CMAKE_BUILD_TYPE}" _BUILD_TYPE) diff --git a/cmake/CompilerFlagsHelpers.cmake b/cmake/CompilerFlagsHelpers.cmake index 942c614ff4..7272084057 100644 --- a/cmake/CompilerFlagsHelpers.cmake +++ b/cmake/CompilerFlagsHelpers.cmake @@ -14,7 +14,8 @@ set(SUPPORTED_COMPILER_LANGUAGE_LIST "C;CXX") foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "XL") set(CMAKE_${COMPILER_LANGUAGE}_COMPILER_IS_XLC ON) - elseif(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "Intel") + elseif(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "Intel" + OR CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "IntelLLVM") set(CMAKE_${COMPILER_LANGUAGE}_COMPILER_IS_ICC ON) elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") set(CMAKE_${COMPILER_LANGUAGE}_COMPILER_IS_MSVC) @@ -27,6 +28,10 @@ foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) endif() endforeach() +set(UNSAFE_MATH_FLAG + "-ffinite-math-only -fno-math-errno -funsafe-math-optimizations -fno-associative-math") +set(FASTDEBUG_FLAG "-g -O1") + # Set optimization flags for each compiler foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) @@ -39,6 +44,7 @@ foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) set(CMAKE_${COMPILER_LANGUAGE}_OPT_NONE "-O0") set(CMAKE_${COMPILER_LANGUAGE}_OPT_NORMAL "-O2") set(CMAKE_${COMPILER_LANGUAGE}_OPT_FAST "-O3") + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG ${FASTDEBUG_FLAG}) set(CMAKE_${COMPILER_LANGUAGE}_STACK_PROTECTION "-qstackprotect") set(CMAKE_${COMPILER_LANGUAGE}_POSITION_INDEPENDENT "-qpic=small") set(CMAKE_${COMPILER_LANGUAGE}_VECTORIZE "-qhot") @@ -59,9 +65,11 @@ foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) set(CMAKE_${COMPILER_LANGUAGE}_OPT_NONE "-O0") set(CMAKE_${COMPILER_LANGUAGE}_OPT_NORMAL "-O2") set(CMAKE_${COMPILER_LANGUAGE}_OPT_FAST "-O3") + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG ${FASTDEBUG_FLAG}) set(CMAKE_${COMPILER_LANGUAGE}_STACK_PROTECTION "-fstack-protector") set(CMAKE_${COMPILER_LANGUAGE}_POSITION_INDEPENDENT "-fPIC") set(CMAKE_${COMPILER_LANGUAGE}_VECTORIZE "-ftree-vectorize") + set(CMAKE_${COMPILER_LANGUAGE}_UNSAFE_MATH ${UNSAFE_MATH_FLAG}) set(IGNORE_UNKNOWN_PRAGMA_FLAGS "-Wno-unknown-pragmas") if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_VERSION VERSION_GREATER "4.7.0") @@ -82,6 +90,11 @@ foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) set(CMAKE_${COMPILER_LANGUAGE}_OPT_NONE "-O0") set(CMAKE_${COMPILER_LANGUAGE}_OPT_NORMAL "-O2") set(CMAKE_${COMPILER_LANGUAGE}_OPT_FAST "-O3") + if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "IntelLLVM") + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG "${FASTDEBUG_FLAG} -fp-model precise") + else() + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG "${FASTDEBUG_FLAG} -fp-model consistent") + endif() set(CMAKE_${COMPILER_LANGUAGE}_STACK_PROTECTION "-fstack-protector") set(CMAKE_${COMPILER_LANGUAGE}_POSITION_INDEPENDENT "-fpic") set(CMAKE_${COMPILER_LANGUAGE}_VECTORIZE "") @@ -94,11 +107,18 @@ foreach(COMPILER_LANGUAGE ${SUPPORTED_COMPILER_LANGUAGE_LIST}) set(CMAKE_${COMPILER_LANGUAGE}_OPT_NONE "-O0") set(CMAKE_${COMPILER_LANGUAGE}_OPT_NORMAL "-O2") set(CMAKE_${COMPILER_LANGUAGE}_OPT_FAST "-O3") + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG "-g -O1") set(CMAKE_${COMPILER_LANGUAGE}_STACK_PROTECTION "") set(CMAKE_${COMPILER_LANGUAGE}_POSITION_INDEPENDENT "-fPIC") set(CMAKE_${COMPILER_LANGUAGE}_VECTORIZE "") if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "PGI") set(CMAKE_${COMPILER_LANGUAGE}_WARNING_ALL "") endif() + if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "Clang") + set(CMAKE_${COMPILER_LANGUAGE}_UNSAFE_MATH ${UNSAFE_MATH_FLAG}) + endif() + if(CMAKE_${COMPILER_LANGUAGE}_COMPILER_ID STREQUAL "NVHPC") + set(CMAKE_${COMPILER_LANGUAGE}_OPT_FASTDEBUG "${FASTDEBUG_FLAG} -fno-omit-frame-pointer") + endif() endif() endforeach() diff --git a/cmake/CompilerHelper.cmake b/cmake/CompilerHelper.cmake index 490ad3a09a..e50b120351 100644 --- a/cmake/CompilerHelper.cmake +++ b/cmake/CompilerHelper.cmake @@ -16,6 +16,15 @@ if(CMAKE_C_COMPILER_ID MATCHES "PGI" OR CMAKE_C_COMPILER_ID MATCHES "NVHPC") if(${CMAKE_VERSION} VERSION_GREATER_EQUAL "3.20" AND ${CMAKE_VERSION} VERSION_LESS "3.20.3") string(APPEND CMAKE_DEPFILE_FLAGS_CXX "-MD") endif() + + # CMake versions <3.19 used to add -A when using NVHPC/PGI, which makes the compiler excessively + # pedantic. See https://gitlab.kitware.com/cmake/cmake/-/issues/20997. Also, stdinit.h include + # behaviour is different with -A (ANSI C++) and result into an error mentioned in + # https://github.com/neuronsimulator/nrn/issues/2563 + if(CMAKE_VERSION VERSION_LESS 3.19) + list(REMOVE_ITEM CMAKE_CXX17_STANDARD_COMPILE_OPTION -A) + endif() + if(${CMAKE_C_COMPILER_VERSION} VERSION_GREATER_EQUAL 20.7) # https://forums.developer.nvidia.com/t/many-all-diagnostic-numbers-increased-by-1-from-previous-values/146268/3 # changed the numbering scheme in newer versions. The following list is from a clean start 16 @@ -28,17 +37,13 @@ if(CMAKE_C_COMPILER_ID MATCHES "PGI" OR CMAKE_C_COMPILER_ID MATCHES "NVHPC") # "src/modlunit/units.cpp", warning #170-D: pointer points outside of underlying object # "src/nrnpython/grids.cpp", warning #174-D: expression has no effect # "src/nmodl/nocpout.cpp", warning #177-D: variable "j" was declared but never referenced - # "src/mesch/conjgrad.c", warning #180-D: argument is incompatible with formal parameter # "src/nrniv/partrans.cpp", warning #186-D: pointless comparison of unsigned integer with zero - # "src/mesch/machine.h", warning #301-D: typedef name has already been declared (with same type) # "src/nrnpython/rxdmath.cpp", warning #541-D: allowing all exceptions is incompatible with previous function # "src/nmodl/nocpout.cpp", warning #550-D: variable "sion" was set but never used # "src/gnu/neuron_gnu_builtin.h", warning #816-D: type qualifier on return type is meaningless" - # "src/oc/fmenu.cpp", warning #941-D: missing return statement at end of non-void function "ibmgetc" # "src/modlunit/consist.cpp", warning #2465-D: conversion from a string literal to "char *" is deprecated # ~~~ - list(APPEND NRN_COMPILE_FLAGS - --diag_suppress=1,47,111,128,170,174,177,180,186,301,541,550,816,941,2465) + list(APPEND NRN_COMPILE_FLAGS --diag_suppress=1,47,111,128,170,174,177,186,541,550,816,2465) endif() list(APPEND NRN_COMPILE_FLAGS -noswitcherror) list(APPEND NRN_LINK_FLAGS -noswitcherror) @@ -46,7 +51,7 @@ if(CMAKE_C_COMPILER_ID MATCHES "PGI" OR CMAKE_C_COMPILER_ID MATCHES "NVHPC") # Random123 does not play nicely with NVHPC 21.11+'s detection of ABM features, see: # https://github.com/BlueBrain/CoreNeuron/issues/724 and # https://github.com/DEShawResearch/random123/issues/6. - list(APPEND NRN_COMPILE_DEFS R123_USE_INTRIN_H=0) + list(APPEND NRN_R123_COMPILE_DEFS R123_USE_INTRIN_H=0) endif() else() set(NRN_HAVE_NVHPC_COMPILER OFF) diff --git a/cmake/ConfigFileSetting.cmake b/cmake/ConfigFileSetting.cmake index 554ba9de6a..668ea44548 100644 --- a/cmake/ConfigFileSetting.cmake +++ b/cmake/ConfigFileSetting.cmake @@ -13,13 +13,11 @@ set(UNQUOTED_PACKAGE_VERSION "${PROJECT_VERSION}") # ~~~ nrn_set_string(PACKAGE "nrn") nrn_set_string(NRNHOST "${CMAKE_SYSTEM_PROCESSOR}-${CMAKE_SYSTEM_NAME}") -nrn_set_string(NRNHOSTCPU "${CMAKE_SYSTEM_PROCESSOR}") nrn_set_string(PACKAGE_STRING "nrn ${PROJECT_VERSION}") nrn_set_string(PACKAGE_VERSION "${PROJECT_VERSION}") nrn_set_string(VERSION "${PROJECT_VERSION}") nrn_set_string(NRN_LIBDIR "${CMAKE_INSTALL_PREFIX}/lib") nrn_set_string(NEURON_DATA_DIR "${CMAKE_INSTALL_PREFIX}/share/nrn") -nrn_set_string(LT_OBJDIR ".libs/") nrn_set_string(DLL_DEFAULT_FNAME "${CMAKE_SYSTEM_PROCESSOR}/.libs/libnrnmech.so") # indicate nmodl config is used @@ -27,11 +25,7 @@ add_definitions(-DHAVE_CONFIG_H) set(YYTEXT_POINTER 1) set(TIME_WITH_SYS_TIME 1) -set(HAVE_NAMESPACES "/**/") -set(HAVE_STTY 0) # below two are universal nowadays -set(IVOS_FABS "::fabs") -set(HAVE_STL "/**/") set(prefix ${CMAKE_INSTALL_PREFIX}) set(host_cpu ${CMAKE_SYSTEM_PROCESSOR}) set(exec_prefix ${prefix}) @@ -98,24 +92,6 @@ else() set(DISCRETE_EVENT_OBSERVER 0) endif() -# No longer a user option. Default modern units. Controlled at launch by the environment variable -# NRNUNIT_USE_LEGACY, and dynamically after launch by h.nrnunit_use_legacy(0or1). Left here solely -# to obtain a nrnunits.lib file for modlunit. Nmodl uses the nrnunits.lib.in file. -set(NRN_ENABLE_LEGACY_FR 0) -if(NRN_ENABLE_LEGACY_FR) - set(LegacyFR 1) - set(LegacyY "") - set(LegacyN "/") - set(LegacyYPy "") - set(LegacyNPy "#") -else() - set(LegacyFR 0) - set(LegacyY "/") - set(LegacyN "") - set(LegacyYPy "#") - set(LegacyNPy "") -endif() - if(NRN_ENABLE_MECH_DLL_STYLE) set(NRNMECH_DLL_STYLE 1) else() @@ -129,14 +105,7 @@ else() endif() if(NRN_ENABLE_PYTHON_DYNAMIC) - # the value needs to be made not to matter - set(NRNPYTHON_DYNAMICLOAD 3) -endif() - -if(NRN_DYNAMIC_UNITS_USE_LEGACY) - set(DYNAMIC_UNITS_USE_LEGACY_DEFAULT 1) -else() - unset(DYNAMIC_UNITS_USE_LEGACY_DEFAULT) + list(APPEND NRN_COMPILE_DEFS NRNPYTHON_DYNAMICLOAD) endif() # ============================================================================= @@ -148,85 +117,44 @@ set(SUNDIALS_USE_GENERIC_MATH 1) # ============================================================================= # Similar to check_include_files but also construct NRN_HEADERS_INCLUDE_LIST # ============================================================================= -nrn_check_include_files(alloca.h HAVE_ALLOCA_H) nrn_check_include_files(dlfcn.h HAVE_DLFCN_H) nrn_check_include_files(execinfo.h HAVE_EXECINFO_H) -nrn_check_include_files(fcntl.h HAVE_FCNTL_H) nrn_check_include_files(fenv.h HAVE_FENV_H) -nrn_check_include_files(float.h HAVE_FLOAT_H) -nrn_check_include_files(inttypes.h HAVE_INTTYPES_H) -nrn_check_include_files(limits.h HAVE_LIMITS_H) -nrn_check_include_files(locale.h HAVE_LOCALE_H) nrn_check_include_files(malloc.h HAVE_MALLOC_H) -nrn_check_include_files(math.h HAVE_MATH_H) -nrn_check_include_files(memory.h HAVE_MEMORY_H) -nrn_check_include_files(sgtty.h HAVE_SGTTY_H) -nrn_check_include_files(stdarg.h HAVE_STDARG_H) -nrn_check_include_files(stdint.h HAVE_STDINT_H) -nrn_check_include_files(stdlib.h HAVE_STDLIB_H) -nrn_check_include_files(stream.h HAVE_STREAM_H) nrn_check_include_files(strings.h HAVE_STRINGS_H) -nrn_check_include_files(string.h HAVE_STRING_H) -nrn_check_include_files(stropts.h HAVE_STROPTS_H) -nrn_check_include_files(sys/conf.h HAVE_SYS_CONF_H) -nrn_check_include_files(sys/file.h HAVE_SYS_FILE_H) -nrn_check_include_files(sys/ioctl.h HAVE_SYS_IOCTL_H) -nrn_check_include_files(sys/stat.h HAVE_SYS_STAT_H) -nrn_check_include_files(sys/time.h HAVE_SYS_TIME_H) nrn_check_include_files(sys/types.h HAVE_SYS_TYPES_H) -nrn_check_include_files(sys/wait.h HAVE_SYS_WAIT_H) -nrn_check_include_files(termio.h HAVE_TERMIO_H) nrn_check_include_files(unistd.h HAVE_UNISTD_H) -nrn_check_include_files(varargs.h HAVE_VARARGS_H) -nrn_check_include_files(sys/timeb.h HAVE_SYS_TIMEB_H) # ============================================================================= # Check for standard headers # ============================================================================= check_include_files("dlfcn.h;stdint.h;stddef.h;inttypes.h;stdlib.h;strings.h;string.h;float.h" STDC_HEADERS) -check_include_file_cxx("_G_config.h" HAVE__G_CONFIG_H) # ============================================================================= # Check symbol using check_cxx_symbol_exists but use ${NRN_HEADERS_INCLUDE_LIST} # ============================================================================= # note that this must be called after all *check_include_files because we use # NRN_HEADERS_INCLUDE_LIST is second argument (headers) is empty. -nrn_check_symbol_exists("alloca" "" HAVE_ALLOCA) -nrn_check_symbol_exists("bcopy" "" HAVE_BCOPY) -nrn_check_symbol_exists("bzero" "" HAVE_BZERO) -nrn_check_symbol_exists("doprnt" "" HAVE_DOPRNT) -nrn_check_symbol_exists("ftime" "" HAVE_FTIME) -nrn_check_symbol_exists("getcwd" "" HAVE_GETCWD) -nrn_check_symbol_exists("gethostname" "" HAVE_GETHOSTNAME) -nrn_check_symbol_exists("gettimeofday" "" HAVE_GETTIMEOFDAY) -nrn_check_symbol_exists("index" "" HAVE_INDEX) -nrn_check_symbol_exists("isatty" "" HAVE_ISATTY) +nrn_check_symbol_exists("bcopy" "strings.h" HAVE_BCOPY) +nrn_check_symbol_exists("bzero" "strings.h" HAVE_BZERO) +nrn_check_symbol_exists("gettimeofday" "sys/time.h" HAVE_GETTIMEOFDAY) +nrn_check_symbol_exists("index" "strings.h" HAVE_INDEX) +nrn_check_symbol_exists("isatty" "unistd.h" HAVE_ISATTY) nrn_check_symbol_exists("iv" "" HAVE_IV) -nrn_check_symbol_exists("lockf" "" HAVE_LOCKF) -nrn_check_symbol_exists("mallinfo" "" HAVE_MALLINFO) -nrn_check_symbol_exists("mallinfo2" "" HAVE_MALLINFO2) -nrn_check_symbol_exists("mkdir" "" HAVE_MKDIR) -nrn_check_symbol_exists("mkstemp" "" HAVE_MKSTEMP) -nrn_check_symbol_exists("namespaces" "" HAVE_NAMESPACES) -nrn_check_symbol_exists("posix_memalign" "" HAVE_POSIX_MEMALIGN) -nrn_check_symbol_exists("realpath" "" HAVE_REALPATH) -nrn_check_symbol_exists("select" "" HAVE_SELECT) -nrn_check_symbol_exists("setenv" "" HAVE_SETENV) -nrn_check_symbol_exists("setitimer" "" HAVE_SETITIMER) +nrn_check_symbol_exists("mallinfo" "malloc.h" HAVE_MALLINFO) +nrn_check_symbol_exists("mallinfo2" "malloc.h" HAVE_MALLINFO2) +nrn_check_symbol_exists("mkstemp" "stdlib.h" HAVE_MKSTEMP) +nrn_check_symbol_exists("posix_memalign" "stdlib.h" HAVE_POSIX_MEMALIGN) +nrn_check_symbol_exists("realpath" "stdlib.h" HAVE_REALPATH) +nrn_check_symbol_exists("setenv" "stdlib.h" HAVE_SETENV) +nrn_check_symbol_exists("setitimer" "sys/time.h" HAVE_SETITIMER) nrn_check_symbol_exists("sigaction" "signal.h" HAVE_SIGACTION) nrn_check_symbol_exists("sigprocmask" "signal.h" HAVE_SIGPROCMASK) nrn_check_symbol_exists("SIGBUS" "signal.h" HAVE_SIGBUS) -nrn_check_symbol_exists("SIGSEGV" "signal.h" HAVE_SIGSEGV) -nrn_check_symbol_exists("strdup" "" HAVE_STRDUP) -nrn_check_symbol_exists("strstr" "" HAVE_STRSTR) nrn_check_symbol_exists("stty" "" HAVE_STTY) -nrn_check_symbol_exists("vprintf" "" HAVE_VPRINTF) -nrn_check_cxx_symbol_exists("getpw" "sys/types.h;pwd.h" HAVE_GETPW) -nrn_check_cxx_symbol_exists("fesetround" "" HAVE_FESETROUND) -nrn_check_cxx_symbol_exists("feenableexcept" "" HAVE_FEENABLEEXCEPT) -# not necessary to check as it should be always there -set(HAVE_SSTREAM /**/) +nrn_check_cxx_symbol_exists("fesetround" "fenv.h" HAVE_FESETROUND) +nrn_check_cxx_symbol_exists("feenableexcept" "fenv.h" HAVE_FEENABLEEXCEPT) # ============================================================================= # Check data types @@ -245,13 +173,7 @@ nrn_check_signal_return_type(RETSIGTYPE) # ============================================================================= # Check direcotry manipulation header # ============================================================================= -nrn_check_dir_exists(dirent.h HAVE_DIRENT_H) -nrn_check_dir_exists(ndir.h HAVE_NDIR_H) -nrn_check_dir_exists(sys/dir.h HAVE_SYS_DIR_H) nrn_check_dir_exists(sys/ndir.h HAVE_SYS_NDIR_H) -if(HAVE_DIRENT_H) - set(HAVE_SYS_DIR_H 0) -endif() # ============================================================================= # Copy cmake specific template files @@ -263,18 +185,18 @@ endif() # ============================================================================= # Generate file from file.in template # ============================================================================= +set(version_strs ${NRN_PYTHON_VERSIONS}) +list(TRANSFORM version_strs APPEND "\"") +list(TRANSFORM version_strs PREPEND "\"") +string(JOIN ", " NRN_DYNAMIC_PYTHON_LIST_OF_VERSION_STRINGS ${version_strs}) nrn_configure_dest_src(nrnconf.h . cmake_nrnconf.h .) nrn_configure_dest_src(nmodlconf.h . cmake_nrnconf.h .) nrn_configure_file(nrnmpiuse.h src/oc) nrn_configure_file(nrnconfigargs.h src/nrnoc) -nrn_configure_file(nrnpython_config.h src/nrnpython) -nrn_configure_file(bbsconf.h src/parallel) nrn_configure_file(nrnneosm.h src/nrncvode) nrn_configure_file(sundials_config.h src/sundials) -nrn_configure_dest_src(nrnunits.lib share/nrn/lib nrnunits.lib share/lib) nrn_configure_dest_src(nrn.defaults share/nrn/lib nrn.defaults share/lib) -# NRN_DYNAMIC_UNITS requires nrnunits.lib.in be in same places as nrnunits.lib -file(COPY ${PROJECT_SOURCE_DIR}/share/lib/nrnunits.lib.in +file(COPY ${PROJECT_SOURCE_DIR}/share/lib/nrnunits.lib DESTINATION ${PROJECT_BINARY_DIR}/share/nrn/lib) if(NRN_MACOS_BUILD) @@ -293,8 +215,6 @@ if(MINGW) set(nrnskip_rebase "#") nrn_configure_file(mknrndll.mak src/mswin/lib) endif() -# TODO temporary workaround for mingw -file(COPY ${PROJECT_BINARY_DIR}/share/nrn/lib/nrnunits.lib.in DESTINATION ${PROJECT_BINARY_DIR}/lib) # ============================================================================= # If Interviews is not provided, configure local files diff --git a/cmake/Coverage.cmake b/cmake/Coverage.cmake index 7515e5a857..aa2bae3bf5 100644 --- a/cmake/Coverage.cmake +++ b/cmake/Coverage.cmake @@ -27,17 +27,16 @@ if(NRN_ENABLE_COVERAGE) find_program(LCOV lcov) - if(LCOV-NOTFOUND) - message(ERROR "lcov is not installed.") + if(LCOV STREQUAL "LCOV-NOTFOUND") + message(ERROR "lcov is required with NRN_ENABLE_COVERAGE=ON and it was not found.") endif() - set(NRN_COVERAGE_FLAGS_UNQUOTED --coverage -O0 -fno-inline -g) - set(NRN_COVERAGE_FLAGS "--coverage -O0 -fno-inline -g") - set(NRN_COVERAGE_LIB gcov) - - if(NRN_MACOS_BUILD) - unset(NRN_COVERAGE_LIB) - add_link_options(-fprofile-arcs) + string(TOUPPER ${CMAKE_BUILD_TYPE} BUILD_TYPE_UPPER) + if(NOT BUILD_TYPE_UPPER STREQUAL "DEBUG") + message(WARNING "Using CMAKE_BUILD_TYPE=Debug is recommended with NRN_ENABLE_COVERAGE") endif() + set(NRN_COVERAGE_FLAGS_UNQUOTED --coverage -fno-inline) + string(JOIN " " NRN_COVERAGE_FLAGS ${NRN_COVERAGE_FLAGS_UNQUOTED}) + set(NRN_COVERAGE_LINK_FLAGS --coverage) if(NRN_COVERAGE_FILES) # ~~~ @@ -59,9 +58,13 @@ if(NRN_ENABLE_COVERAGE) set(NRN_ADDED_COVERAGE_FLAGS "${NRN_COVERAGE_FLAGS}" CACHE INTERNAL "Remind that this is always in effect from now on" FORCE) - add_compile_options(${NRN_COVERAGE_FLAGS_UNQUOTED}) - link_libraries(${NRN_COVERAGE_LIB}) + list(APPEND NRN_COMPILE_FLAGS ${NRN_COVERAGE_FLAGS_UNQUOTED}) + list(APPEND CORENRN_EXTRA_CXX_FLAGS ${NRN_COVERAGE_FLAGS_UNQUOTED}) + list(APPEND CORENRN_EXTRA_MECH_CXX_FLAGS ${NRN_COVERAGE_FLAGS_UNQUOTED}) endif() + list(APPEND NRN_LINK_FLAGS ${NRN_COVERAGE_LINK_FLAGS}) + list(APPEND CORENRN_EXTRA_LINK_FLAGS ${NRN_COVERAGE_LINK_FLAGS}) + list(APPEND NRN_COMPILE_DEFS NRN_COVERAGE_ENABLED) else() unset(NRN_COVERAGE_FLAGS) unset(NRN_COVERAGE_FILES CACHE) @@ -74,21 +77,42 @@ else() endif() if(NRN_ENABLE_COVERAGE) - + set(cover_clean_command find "${PROJECT_BINARY_DIR}" "-name" "*.gcda" "-type" "f" "-delete") + set(cover_baseline_command + "${LCOV}" "--capture" "--initial" "--no-external" "--directory" "${PROJECT_SOURCE_DIR}" + "--directory" "${PROJECT_BINARY_DIR}" "--output-file" "coverage-base.info") + set(cover_collect_command + "${LCOV}" "--capture" "--no-external" "--directory" "${PROJECT_SOURCE_DIR}" "--directory" + "${PROJECT_BINARY_DIR}" "--output-file" "coverage-run.info") + set(cover_combine_command "${LCOV}" "--add-tracefile" "coverage-base.info" "--add-tracefile" + "coverage-run.info" "--output-file" "coverage-combined.info") + set(cover_html_command genhtml "coverage-combined.info" "--output-directory" html) + add_custom_target( + cover_clean + COMMAND ${cover_clean_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") + add_custom_target( + cover_baseline + COMMAND ${cover_baseline_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") add_custom_target( cover_begin - COMMAND find "${PROJECT_BINARY_DIR}" "-name" "*.gcda" "-type" "f" "-delete" - COMMAND "${LCOV}" "--capture" "--initial" "--no-external" "--directory" "${PROJECT_SOURCE_DIR}" - "--directory" "${PROJECT_BINARY_DIR}" "--output-file" "coverage-base.info" - WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) - + COMMAND ${cover_clean_command} + COMMAND ${cover_baseline_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") + add_custom_target( + cover_collect + COMMAND ${cover_collect_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") + add_custom_target( + cover_combine + COMMAND ${cover_combine_command} + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") add_custom_target( cover_html - COMMAND ${LCOV} "--capture" "--no-external" "--directory" "${PROJECT_SOURCE_DIR}" "--directory" - ${PROJECT_BINARY_DIR} "--output-file" "coverage-run.info" - COMMAND "${LCOV}" "--add-tracefile" "coverage-base.info" "--add-tracefile" "coverage-run.info" - "--output-file" "coverage-combined.info" - COMMAND genhtml "coverage-combined.info" "--output-directory" html + COMMAND ${cover_collect_command} + COMMAND ${cover_combine_command} + COMMAND ${cover_html_command} COMMAND echo "View in browser at file://${PROJECT_BINARY_DIR}/html/index.html" - WORKING_DIRECTORY ${PROJECT_BINARY_DIR}) + WORKING_DIRECTORY "${PROJECT_BINARY_DIR}") endif() diff --git a/cmake/ExecuteFindPython/CMakeLists.txt b/cmake/ExecuteFindPython/CMakeLists.txt new file mode 100644 index 0000000000..d56d5d913e --- /dev/null +++ b/cmake/ExecuteFindPython/CMakeLists.txt @@ -0,0 +1,9 @@ +cmake_minimum_required(VERSION 3.15 FATAL_ERROR) +# This is called from PythonHelper.cmake in a subprocess, to allow multiple Python versions to be +# searched for in the project without CACHE variable hackery. +project(ExecuteFindPython LANGUAGES C) +find_package(Python3 COMPONENTS ${Python3_COMPONENTS}) +message(STATUS "Python3_INCLUDE_DIRS=${Python3_INCLUDE_DIRS}") +message(STATUS "Python3_LIBRARIES=${Python3_LIBRARIES}") +message(STATUS "Python3_VERSION_MAJOR=${Python3_VERSION_MAJOR}") +message(STATUS "Python3_VERSION_MINOR=${Python3_VERSION_MINOR}") diff --git a/cmake/ExternalProjectHelper.cmake b/cmake/ExternalProjectHelper.cmake index 4e7205ac16..3abadf7e58 100644 --- a/cmake/ExternalProjectHelper.cmake +++ b/cmake/ExternalProjectHelper.cmake @@ -42,7 +42,8 @@ function(nrn_add_external_project name) find_path( ${name}_PATH NAMES CMakeLists.txt - PATHS "${THIRD_PARTY_DIRECTORY}/${name}") + PATHS "${THIRD_PARTY_DIRECTORY}/${name}" + NO_DEFAULT_PATH) if(NOT EXISTS ${${name}_PATH}) nrn_submodule_file_not_found("${THIRD_PARTY_DIRECTORY}/${name}") nrn_initialize_submodule("${THIRD_PARTY_DIRECTORY}/${name}") diff --git a/cmake/GetLIKWID.cmake b/cmake/GetLIKWID.cmake new file mode 100644 index 0000000000..2a393c8b83 --- /dev/null +++ b/cmake/GetLIKWID.cmake @@ -0,0 +1,8 @@ +include_guard(DIRECTORY) + +find_package(likwid REQUIRED) + +add_library(nrn_likwid IMPORTED SHARED) +set_target_properties(nrn_likwid PROPERTIES IMPORTED_LOCATION ${LIKWID_LIBRARY}) +target_include_directories(nrn_likwid INTERFACE ${LIKWID_INCLUDE_DIRS}) +target_compile_definitions(nrn_likwid INTERFACE LIKWID_PERFMON) diff --git a/cmake/MacroHelper.cmake b/cmake/MacroHelper.cmake index eec1f43a20..482345cacb 100644 --- a/cmake/MacroHelper.cmake +++ b/cmake/MacroHelper.cmake @@ -173,20 +173,6 @@ macro(nrn_create_file_list list_name prefix) endforeach(name) endmacro() -# ============================================================================= -# Copy file from source to destination in noclobber mode (i.e. no overwrite) -# ============================================================================= -macro(nrn_copy_file_without_overwrite source destination) - execute_process(COMMAND cp -n ${source} ${destination}) -endmacro() - -# ============================================================================= -# Copy file from source to destination only if different -# ============================================================================= -macro(nrn_copy_file_if_different source destination) - configure_file(${source} ${destination} COPYONLY) -endmacro() - # ============================================================================= # Set string with double quotes # ============================================================================= @@ -203,20 +189,6 @@ macro(dospath path var) set(${var} ${var1}) endmacro() -# ============================================================================= -# Given list of file names, find their path in project source tree -# ============================================================================= -macro(nrn_find_project_files list_name) - foreach(name ${ARGN}) - file(GLOB_RECURSE filepath "${PROJECT_SOURCE_DIR}/src/*${name}") - if(filepath STREQUAL "") - message(FATAL_ERROR " ${name} not found in ${PROJECT_SOURCE_DIR}/src") - else() - list(APPEND ${list_name} ${filepath}) - endif() - endforeach(name) -endmacro() - # ============================================================================= # Utility macro to print all matching CMake variables # ============================================================================= @@ -235,15 +207,24 @@ endmacro() # Run nocmodl to convert NMODL to C # ============================================================================= macro(nocmodl_mod_to_cpp modfile_basename) + set(NOCMODL_SED_EXPR "s/_reg()/_reg_()/") + if(NOT MSVC) + set(NOCMODL_SED_EXPR "'${NOCMODL_SED_EXPR}'") + endif() + set(REMOVE_CMAKE_COMMAND "rm") + if(CMAKE_VERSION VERSION_LESS "3.17") + set(REMOVE_CMAKE_COMMAND "remove") + endif() add_custom_command( OUTPUT ${PROJECT_BINARY_DIR}/${modfile_basename}.cpp COMMAND ${CMAKE_COMMAND} -E env "MODLUNIT=${PROJECT_BINARY_DIR}/share/nrn/lib/nrnunits.lib" - ${NRN_NOCMODL_SANITIZER_ENVIRONMENT} ${PROJECT_BINARY_DIR}/bin/nocmodl + ${NRN_NOCMODL_SANITIZER_ENVIRONMENT} $ ${PROJECT_SOURCE_DIR}/${modfile_basename}.mod - COMMAND sed "'s/_reg()/_reg_()/'" ${PROJECT_SOURCE_DIR}/${modfile_basename}.cpp > + COMMAND sed ${NOCMODL_SED_EXPR} ${PROJECT_SOURCE_DIR}/${modfile_basename}.cpp > ${PROJECT_BINARY_DIR}/${modfile_basename}.cpp - COMMAND rm ${PROJECT_SOURCE_DIR}/${modfile_basename}.cpp + COMMAND ${CMAKE_COMMAND} -E ${REMOVE_CMAKE_COMMAND} + ${PROJECT_SOURCE_DIR}/${modfile_basename}.cpp DEPENDS nocmodl ${PROJECT_SOURCE_DIR}/${modfile_basename}.mod WORKING_DIRECTORY ${PROJECT_BINARY_DIR}/src/nrniv) endmacro() diff --git a/cmake/NeuronFileLists.cmake b/cmake/NeuronFileLists.cmake index 3e81988551..9141aa4562 100644 --- a/cmake/NeuronFileLists.cmake +++ b/cmake/NeuronFileLists.cmake @@ -1,91 +1,92 @@ -# ============================================================================= -# Lists of header files to install -# ============================================================================= +# ======================================================================================= +# Lists of header files to install. Difference is whether the dir structure is preserved. +# +# * HEADER_FILES_TO_INSTALL: ${src}/src/dir/header.h -> {bld}/include/header.h +# * STRUCTURED_HEADER_FILES_TO_INSTALL: {src}/src/a/b.h -> {bld}/include/a/b.h +# ======================================================================================= +set(STRUCTURED_HEADER_FILES_TO_INSTALL + neuron/cache/mechanism_range.hpp neuron/container/data_handle.hpp + neuron/container/generic_data_handle.hpp neuron/container/non_owning_soa_identifier.hpp + neuron/model_data_fwd.hpp) set(HEADER_FILES_TO_INSTALL - bbsavestate.h - cabvars.h - crout.hpp - crout_thread.hpp - cspmatrix.h - cspredef.h - deflate.hpp - dimplic.hpp - errcodes.hpp - euler.hpp - euler_thread.hpp - hoc.h - hoc_membf.h - hocassrt.h - hocdec.h - hocgetsym.h - hoclist.h - hocparse.h - mcran4.h - md1redef.h - md2redef.h - mech_api.h - membdef.h - membfunc.h - multicore.h - multisplit.h - neuron.h - newton.hpp - newton_struct.h - newton_thread.hpp - nmodlmutex.h - nrn_ansi.h - nrnapi.h - nrnassrt.h - nrncvode.h - nrnisaac.h - nrniv_mf.h - nrnoc_ml.h - nrnmpi.h - nrnmpidec.h - nrnrandom.h - nrnran123.h - nrnredef.h - nrnversionmacros.h - oc_ansi.h - ocfunc.h - ocmisc.h - options.h - parse_with_deps.hpp - runge.hpp - scoplib.h - section.h - simeq.hpp - sparse.hpp - sparse_thread.hpp - spconfig.h - spmatrix.h - ssimplic.hpp - ssimplic_thread.hpp - treeset.h - wrap_sprintf.h) + gnu/mcran4.h + gnu/nrnisaac.h + gnu/nrnran123.h + nrniv/backtrace_utils.h + nrniv/bbsavestate.h + nrnmpi/nrnmpidec.h + nrnoc/cabvars.h + nrnoc/md1redef.h + nrnoc/md2redef.h + nrnoc/membdef.h + nrnoc/membfunc.h + nrnoc/multicore.h + nrnoc/multisplit.h + nrnoc/neuron.h + nrnoc/nmodlmutex.h + nrnoc/nrn_ansi.h + nrnoc/nrncvode.h + nrnoc/nrniv_mf.h + nrnoc/nrnoc_ml.h + nrnoc/nrnredef.h + nrnoc/nrnversionmacros.h + nrnoc/options.h + nrnoc/section_fwd.hpp + nrnoc/treeset.h + oc/hoc.h + oc/hoc_membf.h + oc/hocassrt.h + oc/hocdec.h + oc/hocgetsym.h + oc/hoclist.h + oc/hocparse.h + oc/mech_api.h + oc/memory.hpp + oc/nrnapi.h + oc/nrnassrt.h + oc/nrnmpi.h + oc/nrnrandom.h + oc/oc_ansi.h + oc/ocfunc.h + oc/ocmisc.h + oc/parse_with_deps.hpp + oc/wrap_sprintf.h + scopmath/crout.hpp + scopmath/crout_thread.hpp + scopmath/deflate.hpp + scopmath/dimplic.hpp + scopmath/errcodes.hpp + scopmath/euler.hpp + scopmath/euler_thread.hpp + scopmath/newton.hpp + scopmath/newton_struct.h + scopmath/newton_thread.hpp + scopmath/row_view.hpp + scopmath/runge.hpp + scopmath/scoplib.h + scopmath/simeq.hpp + scopmath/sparse.hpp + scopmath/sparse_thread.hpp + scopmath/ssimplic.hpp + scopmath/ssimplic_thread.hpp + sparse13/spconfig.h + sparse13/spmatrix.h) # ============================================================================= # Lists of headers populated using check_include_files # ============================================================================= set(NRN_HEADERS_INCLUDE_LIST) -# ============================================================================= -# Lists of random number related files -# ============================================================================= -set(RAN_FILE_LIST isaac64.cpp mcran4.cpp nrnisaac.cpp nrnran123.cpp) - # ============================================================================= # Files in oc directory # ============================================================================= set(OC_FILE_LIST - ${RAN_FILE_LIST} audit.cpp axis.cpp code.cpp code2.cpp debug.cpp fileio.cpp - fmenu.cpp ftime.cpp functabl.cpp getsym.cpp @@ -95,12 +96,13 @@ set(OC_FILE_LIST hoc_oop.cpp list.cpp math.cpp + oc_mcran4.cpp + memory.cpp mswinprt.cpp nonlin.cpp ocerf.cpp plot.cpp plt.cpp - regexp.cpp scoprand.cpp settext.cpp symbol.cpp @@ -115,6 +117,7 @@ set(NRNOC_FILE_LIST cabcode.cpp capac.cpp clamp.cpp + container.cpp eion.cpp extcelln.cpp fadvance.cpp @@ -123,6 +126,7 @@ set(NRNOC_FILE_LIST init.cpp ldifus.cpp membfunc.cpp + memblist.cpp nrnnemo.cpp nrntimeout.cpp nrnversion.cpp @@ -208,12 +212,10 @@ set(NRNIV_FILE_LIST bbslsrv2.cpp bbsrcli.cpp bbssrv.cpp - cachevec.cpp classreg.cpp cxprop.cpp datapath.cpp finithnd.cpp - geometry3d.cpp glinerec.cpp hocmech.cpp impedanc.cpp @@ -222,9 +224,11 @@ set(NRNIV_FILE_LIST linmod.cpp linmod1.cpp matrixmap.cpp + memory_usage.cpp multisplit.cpp ndatclas.cpp netpar.cpp + nmodlrandom.cpp nonlinz.cpp nrncore_write.cpp nrncore_write/callbacks/nrncore_callbacks.cpp @@ -306,93 +310,8 @@ nrn_create_file_list( sundialsmath.c) set(NRN_SUNDIALS_SRC_FILES ${SUNDIALS_CVODES} ${SUNDIALS_IDA} ${SUNDIALS_SHARED}) -# meschach matrix sources -set(MESCH_FILES_LIST - arnoldi.c - bdfactor.c - bkpfacto.c - chfactor.c - arnoldi.c - bdfactor.c - bkpfacto.c - chfactor.c - conjgrad.c - copy.c - dmacheps.c - err.c - extras.c - fft.c - givens.c - hessen.c - hsehldr.c - init.c - iter0.c - iternsym.c - itersym.c - ivecop.c - lanczos.c - lufactor.c - machine.c - matlab.c - matop.c - matrixio.c - meminfo.c - memory.c - memstat.c - mfunc.c - norm.c - otherio.c - pxop.c - qrfactor.c - schur.c - solve.c - sparse.c - sparseio.c - spbkp.c - spchfctr.c - splufctr.c - sprow.c - spswap.c - submat.c - svd.c - symmeig.c - update.c - vecop.c - version.c - zcopy.c - zfunc.c - zgivens.c - zhessen.c - zhsehldr.c - zlufctr.c - zmachine.c - zmatio.c - zmatlab.c - zmatop.c - zmemory.c - znorm.c - zqrfctr.c - zschur.c - zsolve.c - zvecop.c) - set(SPARSE_FILES_LIST bksub.cpp getelm.cpp lineq.cpp prmat.cpp subrows.cpp) -# sparse13 matrix sources -set(SPARSE13_FILES_LIST - spalloc.cpp - spbuild.cpp - spfactor.cpp - spoutput.cpp - spsolve.cpp - sputils.cpp - cspalloc.cpp - cspbuild.cpp - cspfactor.cpp - cspoutput.cpp - cspsolve.cpp - csputils.cpp) - # scopmath sources set(SCOPMATH_FILES_LIST abort.cpp @@ -432,25 +351,7 @@ set(SCOPMATH_FILES_LIST threshol.cpp tridiag.cpp) -set(NRNMPI_FILES_LIST nrnmpi.cpp bbsmpipack.cpp mpispike.cpp) - -set(NRNGNU_FILES_LIST - ACG.cpp - Binomial.cpp - DiscUnif.cpp - Erlang.cpp - Geom.cpp - HypGeom.cpp - LogNorm.cpp - MLCG.cpp - NegExp.cpp - Normal.cpp - Poisson.cpp - RNG.cpp - Random.cpp - RndInt.cpp - Uniform.cpp - Weibull.cpp) +set(NRNMPI_FILES_LIST nrnmpi.cpp memory_usage.cpp bbsmpipack.cpp mpispike.cpp) # nrnpython sources (only if ${NRN_ENABLE_PYTHON_DYNAMIC} is OFF} set(NRNPYTHON_FILES_LIST @@ -520,7 +421,7 @@ set(NMODL_FILES_LIST units.cpp version.cpp) -set(IVOS_FILES_LIST listimpl.cpp string.cpp observe.cpp regexp.cpp resource.cpp) +set(IVOS_FILES_LIST observe.cpp resource.cpp) set(MPI_DYNAMIC_INCLUDE nrnmpi_dynam.h nrnmpi_dynam_cinc nrnmpi_dynam_wrappers.inc) @@ -553,14 +454,10 @@ nrn_create_file_list(NRN_PARALLEL_SRC_FILES ${PROJECT_SOURCE_DIR}/src/nrniv nvector_nrnparallel_ld.cpp) nrn_create_file_list(NRN_PARALLEL_SRC_FILES ${PROJECT_SOURCE_DIR}/src/sundials/shared nvector_parallel.c) -nrn_create_file_list(NRN_MESCH_SRC_FILES ${PROJECT_SOURCE_DIR}/src/mesch ${MESCH_FILES_LIST}) nrn_create_file_list(NRN_SPARSE_SRC_FILES ${PROJECT_SOURCE_DIR}/src/sparse ${SPARSE_FILES_LIST}) -nrn_create_file_list(NRN_SPARSE13_SRC_FILES ${PROJECT_SOURCE_DIR}/src/sparse13 - ${SPARSE13_FILES_LIST}) nrn_create_file_list(NRN_SCOPMATH_SRC_FILES ${PROJECT_SOURCE_DIR}/src/scopmath ${SCOPMATH_FILES_LIST}) nrn_create_file_list(NRN_NRNMPI_SRC_FILES ${PROJECT_SOURCE_DIR}/src/nrnmpi ${NRNMPI_FILES_LIST}) -nrn_create_file_list(NRN_NRNGNU_SRC_FILES ${PROJECT_SOURCE_DIR}/src/gnu ${NRNGNU_FILES_LIST}) nrn_create_file_list(NRN_NRNPYTHON_SRC_FILES ${PROJECT_SOURCE_DIR}/src/nrnpython ${NRNPYTHON_FILES_LIST}) nrn_create_file_list(NRN_MODFILE_BASE_NAMES src/nrnoc ${MODFILE_BASE_NAMES}) diff --git a/cmake/NeuronTestHelper.cmake b/cmake/NeuronTestHelper.cmake index b57d4e1b09..35a8ec5e6c 100644 --- a/cmake/NeuronTestHelper.cmake +++ b/cmake/NeuronTestHelper.cmake @@ -432,7 +432,7 @@ function(nrn_add_test) # https://tobywf.com/2021/02/python-ext-asan/ list(APPEND test_env NRN_SANITIZER_PRELOAD_VAR=${NRN_SANITIZER_PRELOAD_VAR}) list(APPEND test_env NRN_SANITIZER_PRELOAD_VAL=${NRN_SANITIZER_LIBRARY_PATH}) - list(APPEND test_env NRN_PYTHON_EXECUTABLE=${PYTHON_EXECUTABLE}) + list(APPEND test_env NRN_PYTHON_EXECUTABLE=${NRN_DEFAULT_PYTHON_EXECUTABLE}) endif() list(APPEND test_env ${NRN_SANITIZER_ENABLE_ENVIRONMENT}) # Add the actual test job, including the `special` and `special-core` binaries in the path. TODOs: @@ -456,7 +456,7 @@ function(nrn_add_test) list(APPEND test_names ${test_name}::preparation) set_tests_properties(${test_name} PROPERTIES DEPENDS ${test_name}::preparation) endif() - set_tests_properties(${test_names} PROPERTIES TIMEOUT 300) + set_tests_properties(${test_names} PROPERTIES TIMEOUT 500) if(DEFINED NRN_ADD_TEST_PROCESSORS) set_tests_properties(${test_names} PROPERTIES PROCESSORS ${NRN_ADD_TEST_PROCESSORS}) endif() diff --git a/cmake/PythonDynamicHelper.cmake b/cmake/PythonDynamicHelper.cmake deleted file mode 100644 index d25e1d4900..0000000000 --- a/cmake/PythonDynamicHelper.cmake +++ /dev/null @@ -1,122 +0,0 @@ -# ============================================================================= -# Configure support for dynamic Python to use multiple Python versions -# ============================================================================= -# ~~~ -# NEURON can be built with python modules that can be usable from multiple -# versions of Python. Here we check if NRN_ENABLE_PYTHON_DYNAMIC is valid -# and determine an include directory for version 3 to build -# libnrnpython.so. For now only NRNPYTHON_INCLUDE3 will be defined. -# -# The above is good for mac and linux. Sadly, for MINGW, a distinct -# NRNPYTHON_INCLUDE is needed for each python in the -# NRN_PYTHON_DYNAMIC list. This is because libnrnpython.dll -# must be linked against the specfic libpython to avoid undefined name errors. -# Thus, at least for MINGW, parallel to the NRN_PYTHON_DYNAMIC list -# we construct the lists NRN_PYTHON_VER_LIST, NRN_PYTHON_INCLUDE_LIST, -# and NRN_PYTHON_LIB_LIST -# ~~~ - -set(LINK_AGAINST_PYTHON ${MINGW}) -set(NRN_PYTHON_VER_LIST - "" - CACHE INTERNAL "" FORCE) -set(NRN_PYTHON_INCLUDE_LIST - "" - CACHE INTERNAL "" FORCE) -set(NRN_PYTHON_LIB_LIST - "" - CACHE INTERNAL "" FORCE) - -# ~~~ -# Inform setup.py and nrniv/nrnpy.cpp whether libnrnpython name is libnrnpython -# or libnrnpython . The latter is required for mingw. -# This is here instead of in src/nrnpython/CMakeLists.txt as src/nrniv/CMakeLists -# needs it for nrniv/nrnpy.cpp -# ~~~ -set(USE_LIBNRNPYTHON_MAJORMINOR 0) -if(LINK_AGAINST_PYTHON) - set(USE_LIBNRNPYTHON_MAJORMINOR 1) -endif() - -if(NRN_ENABLE_PYTHON) - if(NRN_ENABLE_PYTHON_DYNAMIC) - if(NRN_PYTHON_DYNAMIC STREQUAL "") - # use the default python already determined - if(LINK_AGAINST_PYTHON) - set(PYVER "${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}") - else() - set(PYVER ${PYTHON_VERSION_MAJOR}) - endif() - # NB: we are constructing here a variable name NRNPYTHON_INCLUDE${PYVER} - set(NRNPYTHON_INCLUDE${PYVER} ${PYTHON_INCLUDE_DIRS}) - list(APPEND NRN_PYTHON_VER_LIST "${PYVER}") - list(APPEND NRN_PYTHON_INCLUDE_LIST "${PYTHON_INCLUDE_DIRS}") - list(APPEND NRN_PYTHON_LIB_LIST "${PYTHON_LIBRARIES}") - else() - # run each python provided by user to determine major and include directory - message(STATUS "Dynamic Python support") - foreach(pyexe ${NRN_PYTHON_DYNAMIC}) - message(STATUS "Checking if ${pyexe} is a working python") - if(LINK_AGAINST_PYTHON) - set(pr_pyver "print('%d.%d' % (sys.version_info[0], sys.version_info[1]))") - else() - set(pr_pyver "print(sys.version_info[0])") - endif() - execute_process( - COMMAND - ${pyexe} -c - "import sysconfig; print(sysconfig.get_path('include')); import sys; ${pr_pyver}; quit()" - RESULT_VARIABLE result - OUTPUT_VARIABLE std_output - ERROR_VARIABLE err_output - OUTPUT_STRIP_TRAILING_WHITESPACE) - if(result EQUAL 0) - # cmake-format: off - string(REGEX MATCH [0-9.]*$ PYVER ${std_output}) - string(REGEX MATCH ^[^\n]* incval ${std_output}) - # cmake-format: on - if(NOT NRNPYTHON_INCLUDE${PYVER}) - set(NRNPYTHON_INCLUDE${PYVER} ${incval}) - endif() - # Only needed to find include and library paths if LINK_AGAINST_PYTHON but useful for - # build report. Unset the variables set by PythonLibsNew so we can start afresh. - set(PYTHON_EXECUTABLE ${pyexe}) - unset(PYTHON_INCLUDE_DIR CACHE) - unset(PYTHON_LIBRARY CACHE) - set(PYTHON_PREFIX "") - set(PYTHON_LIBRARIES "") - set(PYTHON_INCLUDE_DIRS "") - set(PYTHON_MODULE_EXTENSION "") - set(PYTHON_MODULE_PREFIX "") - - find_package(PythonLibsNew ${PYVER} REQUIRED) - # convert major.minor to majorminor - string(REGEX REPLACE [.] "" PYVER ${PYVER}) - list(APPEND NRN_PYTHON_VER_LIST "${PYVER}") - list(APPEND NRN_PYTHON_INCLUDE_LIST "${incval}") - list(APPEND NRN_PYTHON_LIB_LIST "${PYTHON_LIBRARIES}") - else() - message( - FATAL_ERROR "Error while checking ${pyexe} : ${result}\n${std_output}\n${err_output}") - endif() - endforeach() - endif() - endif() -endif() - -# check Python.h exists under provided include directory -macro(check_python_include python_include major_version) - if(python_include) - if(NOT EXISTS "${${python_include}}/Python.h") - message( - FATAL_ERROR - " ${${python_include}}/Python.h does not exist, set proper ${python_include} to include directory" - ) - endif() - endif() -endmacro() - -if(NOT LINK_AGAINST_PYTHON) - # make sure provided python have Python.h header - check_python_include(NRNPYTHON_INCLUDE3 3) -endif() diff --git a/cmake/PythonHelper.cmake b/cmake/PythonHelper.cmake new file mode 100644 index 0000000000..3ea57760ef --- /dev/null +++ b/cmake/PythonHelper.cmake @@ -0,0 +1,252 @@ +# ================================================================================================= +# Organise which Python versions are to be built against, and find their versions, include +# directories and library paths. This is used both for dynamic Python (>= 1 libnrnpythonX.Y) and +# standard Python (libnrniv linked against one Python version) builds. To avoid the restrictions +# inherent in Python's limited API / stable ABI (see +# https://docs.python.org/3/c-api/stable.html#stable-application-binary-interface), we build +# Python-related NEURON code separately for each version of Python: libnrnpythonX.Y. Historically +# macOS and Linux were built ignoring the minor version, but this is unsafe without the limited API +# ================================================================================================= + +# Parse commandline options so that: +# +# * NRN_DEFAULT_PYTHON_EXECUTABLE is the default Python, which is used for running tests and so on. +# * NRN_PYTHON_EXECUTABLES is a list of all the Pythons that we are building against. This will only +# have a length > 1 if NRN_ENABLE_PYTHON_DYNAMIC is defined. +if(NOT PYTHON_EXECUTABLE AND (NOT NRN_ENABLE_PYTHON_DYNAMIC OR NOT NRN_PYTHON_DYNAMIC)) + # Haven't been explicitly told about any Python versions, set PYTHON_EXECUTABLE by searching PATH + message(STATUS "No python executable specified. Looking for `python3` in the PATH...") + # Since PythonInterp module prefers system-wide python, if PYTHON_EXECUTABLE is not set, look it + # up in the PATH exclusively. Need to set PYTHON_EXECUTABLE before calling SanitizerHelper.cmake + find_program( + PYTHON_EXECUTABLE python3 + PATHS ENV PATH + NO_DEFAULT_PATH) + if(PYTHON_EXECUTABLE STREQUAL "PYTHON_EXECUTABLE-NOTFOUND") + message(FATAL_ERROR "Could not find Python, please set PYTHON_EXECUTABLE or NRN_PYTHON_DYNAMIC") + endif() +endif() +if(NRN_ENABLE_PYTHON_DYNAMIC AND NRN_PYTHON_DYNAMIC) + list(GET NRN_PYTHON_DYNAMIC 0 NRN_PYTHON_DYNAMIC_0) + if(PYTHON_EXECUTABLE AND NOT PYTHON_EXECUTABLE STREQUAL NRN_PYTHON_DYNAMIC_0) + # When NRN_ENABLE_PYTHON_DYNAMIC and NRN_PYTHON_DYNAMIC are set, the first entry of + # NRN_PYTHON_DYNAMIC is taken to be the default python version + message(WARNING "Default python version is ${NRN_PYTHON_DYNAMIC_0} (from NRN_PYTHON_DYNAMIC)," + " not ${PYTHON_EXECUTABLE} (from PYTHON_EXECUTABLE)," + " because NRN_ENABLE_PYTHON_DYNAMIC=ON") + endif() + set(python_executables ${NRN_PYTHON_DYNAMIC}) +else() + # In other cases, there is just one Python and it's PYTHON_EXECUTABLE. + set(python_executables "${PYTHON_EXECUTABLE}") +endif() + +# Given a name (e.g. python3.11) find the include directories, full executable path, libraries and +# version information. +# ~~~ +# Usage: nrn_find_python(NAME python3.11 PREFIX nrnpy) +# ~~~ +# Sets: +# +# * nrnpy_EXECUTABLE +# * nrnpy_INCLUDES +# * nrnpy_LIBRARIES +# * nrnpy_VERSION_MAJOR +# * nrnpy_VERSION_MINOR +# +# If NRN_ENABLE_PYTHON is *not* set then only nrnpy_EXECUTABLE will be set. There is some special +# handling on macOS when sanitizers are enabled: +# +# * if the Python executable does *not* belong to a virtual environment but *is* a shim (as is often +# the case with binaries like /usr/bin/python on macOS) then nrnpy_EXECUTABLE will be set to the +# real (non-shim) binary. +# * if the Python executable *does* point to a virtual environment that was configured using a +# Python shim, an error is emitted with advice on how to re-create the virtual environment using +# the real (non-shim) binary. +function(nrn_find_python) + set(oneVal NAME PREFIX) + cmake_parse_arguments(opt "" "${oneVal}" "" ${ARGN}) + if(opt_UNPARSED_ARGUMENTS) + message(FATAL_ERROR "Unexpected arguments: ${opt_UNPARSED_ARGUMENTS}") + endif() + if(opt_KEYWORDS_MISSING_VALUES) + message(FATAL_ERROR "${opt_KEYWORDS_MISSING_VALUES} values are required") + endif() + if(NOT IS_ABSOLUTE "${opt_NAME}") + # Find the full path to ${opt_NAME} as Python3_EXECUTABLE does not accept relative paths. + find_program( + "${opt_NAME}_full" "${opt_NAME}" + PATHS ENV PATH + NO_DEFAULT_PATH) + if(${opt_NAME}_full STREQUAL "${opt_NAME}_full-NOTFOUND") + set("${opt_PREFIX}_EXECUTABLE" + "${opt_PREFIX}_EXECUTABLE-NOTFOUND" + PARENT_SCOPE) + return() + endif() + set(opt_NAME "${${opt_NAME}_full}") + endif() + # Only bother finding version/include/library information if NRN_ENABLE_PYTHON is set. + if(NRN_ENABLE_PYTHON) + # Run find_package(Python3 ...) in a subprocess, so there is no pollution of CMakeCache.txt and + # so on. Our desire to include multiple Python versions in one build means we have to handle + # lists of versions/libraries/... manually. Unfortunately one cannot safely use find_package in + # CMake script mode, so we configure an extra project. + string(SHA1 pyexe_hash "${opt_NAME}") + string(SUBSTRING "${pyexe_hash}" 0 6 pyexe_hash) + # Which attributes we're trying to learn about this Python + set(python_vars Python3_INCLUDE_DIRS Python3_VERSION_MAJOR Python3_VERSION_MINOR) + if(NRN_ENABLE_PYTHON_DYNAMIC AND NOT NRN_LINK_AGAINST_PYTHON) + # Do not link against Python, so we don't need the library -- just as well, it's not available + # in manylinux + if(${CMAKE_VERSION} VERSION_LESS 3.18) + message( + FATAL_ERROR + "NRN_ENABLE_PYTHON_DYNAMIC=ON and NRN_LINK_AGAINST_PYTHON=OFF requires CMake >= 3.18 for the Development.Module component in FindPython" + ) + endif() + set(dev_component "Development.Module") + set(Python3_LIBRARIES "do-not-link-against-libpython-in-dynamic-python-builds") + else() + set(dev_component "Development") + list(APPEND python_vars Python3_LIBRARIES) + endif() + execute_process( + COMMAND + ${CMAKE_COMMAND} "-DPython3_EXECUTABLE:STRING=${opt_NAME}" + "-DPython3_COMPONENTS=${dev_component};Interpreter" -S + ${CMAKE_SOURCE_DIR}/cmake/ExecuteFindPython -B + ${CMAKE_BINARY_DIR}/ExecuteFindPython_${pyexe_hash} + RESULT_VARIABLE result + OUTPUT_VARIABLE stdout + ERROR_VARIABLE stderr) + if(NOT result EQUAL 0) + message(FATAL_ERROR "find_package could not discover information about ${opt_NAME}\n" + "status=${result}\n" "stdout:\n${stdout}\n" "stderr:\n${stderr}\n") + endif() + # Parse out the variables printed by ExecuteFindPython.cmake + foreach(var ${python_vars}) + string(REGEX MATCH "-- ${var}=([^\n]*)\n" _junk "${stdout}") + if(NOT _junk OR NOT CMAKE_MATCH_1) + message(FATAL_ERROR "Could not extract ${var} from\n===\n${stdout}\n===") + endif() + set(${var} "${CMAKE_MATCH_1}") + endforeach() + set("${opt_PREFIX}_INCLUDES" + "${Python3_INCLUDE_DIRS}" + PARENT_SCOPE) + set("${opt_PREFIX}_LIBRARIES" + "${Python3_LIBRARIES}" + PARENT_SCOPE) + set("${opt_PREFIX}_VERSION_MAJOR" + "${Python3_VERSION_MAJOR}" + PARENT_SCOPE) + set("${opt_PREFIX}_VERSION_MINOR" + "${Python3_VERSION_MINOR}" + PARENT_SCOPE) + endif() + # Finally do our special treatment for macOS + sanitizers + if(APPLE AND NRN_SANITIZERS) + # Detect if the binary we have in opt_NAME points to a virtual environment. + execute_process( + COMMAND "${opt_NAME}" -c "import sys; print(sys.prefix != sys.base_prefix)" + RESULT_VARIABLE code + OUTPUT_VARIABLE isvenv_str + OUTPUT_STRIP_TRAILING_WHITESPACE) + if(code EQUAL 0 AND isvenv_str STREQUAL "True") + # opt_NAME points into a virtual environment. This will only work with sanitizers if the + # {venv}/bin/python symlink does *not* point to a shim. + file(REAL_PATH "${opt_NAME}" pyexe_without_symlinks) + cpp_cc_strip_python_shims(EXECUTABLE "${pyexe_without_symlinks}" OUTPUT deshimmed) + if(NOT deshimmed STREQUAL pyexe_without_symlinks) + # this is the sad case: a virtual environment sitting on top of a shim + message( + FATAL_ERROR + "${opt_NAME} points into a virtual environment that was configured using a Python " + "shim. This will not work with sanitizers enabled on macOS.\nTry re-creating your " + "virtual environment using the real Python binary:\n" + "PYTHONEXECUTABLE=${deshimmed} ${deshimmed} -mvenv new_venv_path") + endif() + # the virtual environment sits on top of the real (non-shimmed) Python, so it should all work: + # opt_NAME is correct + elseif(code EQUAL 0 AND isvenv_str STREQUAL "False") + # opt_NAME does not point into a virtual environment, so we can safely strip out any shims + cpp_cc_strip_python_shims(EXECUTABLE "${opt_NAME}" OUTPUT opt_NAME) + else() + message(FATAL_ERROR "Could not determine if ${opt_NAME} points into a virtual environment " + "(code=${code} isvenv_str=${isvenv_str})") + endif() + endif() + set("${opt_PREFIX}_EXECUTABLE" + "${opt_NAME}" + PARENT_SCOPE) +endfunction() + +# For each Python in NRN_PYTHON_EXECUTABLES, find its version number, its include directory, and its +# library path. Store those in the new lists NRN_PYTHON_VERSIONS, NRN_PYTHON_INCLUDES and +# NRN_PYTHON_LIBRARIES. Set NRN_PYTHON_COUNT to be the length of those lists, and +# NRN_PYTHON_ITERATION_LIMIT to be NRN_PYTHON_COUNT - 1. +set(NRN_PYTHON_EXECUTABLES) +set(NRN_PYTHON_VERSIONS) +set(NRN_PYTHON_INCLUDES) +set(NRN_PYTHON_LIBRARIES) +foreach(pyexe ${python_executables}) + message(STATUS "Checking if ${pyexe} is a working python") + nrn_find_python(NAME "${pyexe}" PREFIX nrnpy) + if(NRN_ENABLE_PYTHON) + # If NRN_ENABLE_PYTHON=OFF then we're only using Python to run build scripts etc. + set(nrnpy_VERSION "${nrnpy_VERSION_MAJOR}.${nrnpy_VERSION_MINOR}") + if(${nrnpy_VERSION} VERSION_LESS NRN_MINIMUM_PYTHON_VERSION) + message(FATAL_ERROR "${pyexe} too old (${nrnpy_VERSION} < ${NRN_MINIMUM_PYTHON_VERSION})") + endif() + # Now nrnpy_INCLUDES and friends correspond to ${pyexe}. Assert that there is only one value per + # Python version for now, as otherwise we'd need to handle a list of lists... + list(LENGTH nrnpy_INCLUDES num_include_dirs) + list(LENGTH nrnpy_LIBRARIES num_lib_dirs) + if(NOT num_include_dirs EQUAL 1) + message(FATAL_ERROR "Cannot handle multiple Python include dirs: ${nrnpy_INCLUDES}") + endif() + if(NOT num_lib_dirs EQUAL 1) + message(FATAL_ERROR "Cannot handle multiple Python libraries: ${Python3_LIBRARIES}") + endif() + if(nrnpy_VERSION IN_LIST NRN_PYTHON_VERSIONS) + # We cannot build against multiple copies of the same pythonX.Y version. + message(FATAL_ERROR "Got duplicate version ${nrnpy_VERSION} from ${pyexe}") + endif() + list(APPEND NRN_PYTHON_VERSIONS "${nrnpy_VERSION}") + list(APPEND NRN_PYTHON_INCLUDES "${nrnpy_INCLUDES}") + list(APPEND NRN_PYTHON_LIBRARIES "${nrnpy_LIBRARIES}") + endif() + list(APPEND NRN_PYTHON_EXECUTABLES "${nrnpy_EXECUTABLE}") +endforeach() +# In any case, the default (NRN_DEFAULT_PYTHON_EXECUTABLE) should always be the zeroth entry in the +# list of Pythons, and we need to set it even if NRN_ENABLE_PYTHON=OFF -- for use in build scripts. +list(GET NRN_PYTHON_EXECUTABLES 0 NRN_DEFAULT_PYTHON_EXECUTABLE) +list(GET NRN_PYTHON_VERSIONS 0 NRN_DEFAULT_PYTHON_VERSION) +if(NRN_ENABLE_PYTHON) + list(GET NRN_PYTHON_INCLUDES 0 NRN_DEFAULT_PYTHON_INCLUDES) + list(GET NRN_PYTHON_LIBRARIES 0 NRN_DEFAULT_PYTHON_LIBRARIES) + list(LENGTH NRN_PYTHON_EXECUTABLES NRN_PYTHON_COUNT) + math(EXPR NRN_PYTHON_ITERATION_LIMIT "${NRN_PYTHON_COUNT} - 1") +endif() +if(NRN_ENABLE_TESTS AND NRN_ENABLE_PYTHON) + # Make sure that, if NRN_PYTHON_EXTRA_FOR_TESTS is set, none of its versions clash with versions + # we're building against + set(NRN_PYTHON_EXTRA_FOR_TESTS_EXECUTABLES) + set(NRN_PYTHON_EXTRA_FOR_TESTS_VERSIONS) + foreach(pyexe ${NRN_PYTHON_EXTRA_FOR_TESTS}) + nrn_find_python(NAME "${pyexe}" PREFIX nrnpy) + set(nrnpy_VERSION "${nrnpy_VERSION_MAJOR}.${nrnpy_VERSION_MINOR}") + if(nrnpy_VERSION IN_LIST NRN_PYTHON_VERSIONS) + string(JOIN ", " versions ${NRN_PYTHON_VERSIONS}) + message(FATAL_ERROR "NRN_PYTHON_EXTRA_FOR_TESTS=${NRN_PYTHON_EXTRA_FOR_TESTS} cannot contain" + " Python versions that NEURON *is* being built against (${versions})") + endif() + list(APPEND NRN_PYTHON_EXTRA_FOR_TESTS_EXECUTABLES "${nrnpy_EXECUTABLE}") + list(APPEND NRN_PYTHON_EXTRA_FOR_TESTS_VERSIONS "${nrnpy_VERSION}") + endforeach() + list(LENGTH NRN_PYTHON_EXTRA_FOR_TESTS NRN_PYTHON_EXTRA_FOR_TESTS_COUNT) + if(NRN_PYTHON_EXTRA_FOR_TESTS) + math(EXPR NRN_PYTHON_EXTRA_FOR_TESTS_ITERATION_LIMIT "${NRN_PYTHON_EXTRA_FOR_TESTS_COUNT} - 1") + endif() +endif() diff --git a/cmake/ReleaseDebugAutoFlags.cmake b/cmake/ReleaseDebugAutoFlags.cmake index 61565e60f4..747b8801e6 100644 --- a/cmake/ReleaseDebugAutoFlags.cmake +++ b/cmake/ReleaseDebugAutoFlags.cmake @@ -21,6 +21,9 @@ endif() # Release : Release mode, no debuginfo # RelWithDebInfo : Distribution mode, basic optimizations for portable code with debuginfos # Fast : Maximum level of optimization. Target native architecture, not portable code +# FastDebug: Similar to Debug with a bit higher level optimisations (-O1) and other compiler +# flags so that it's faster than -O0 but still produces consistent results for +# testing and debugging purposes. # ~~~ include(CompilerFlagsHelpers) @@ -33,17 +36,56 @@ set(CMAKE_CXX_FLAGS_DEBUG "${CMAKE_CXX_DEBUGINFO_FLAGS} ${CMAKE_CXX_OPT_NONE} ${CMAKE_CXX_STACK_PROTECTION} ${CMAKE_CXX_IGNORE_WARNINGS}" ) -set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_OPT_NORMAL} ${CMAKE_C_IGNORE_WARNINGS}") -set(CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_OPT_NORMAL} ${CMAKE_CXX_IGNORE_WARNINGS}") +set(C_UNSAFE_MATH_FLAGS "") +set(CXX_UNSAFE_MATH_FLAGS "") +if(NRN_ENABLE_MATH_OPT) + set(C_UNSAFE_MATH_FLAGS ${CMAKE_C_UNSAFE_MATH}) + set(CXX_UNSAFE_MATH_FLAGS ${CMAKE_CXX_UNSAFE_MATH}) +endif() + +set(CMAKE_C_FLAGS_RELEASE "${CMAKE_C_OPT_NORMAL} ${CMAKE_C_IGNORE_WARNINGS} ${C_UNSAFE_MATH_FLAGS}") +set(CMAKE_CXX_FLAGS_RELEASE + "${CMAKE_CXX_OPT_NORMAL} ${CMAKE_CXX_IGNORE_WARNINGS} ${CXX_UNSAFE_MATH_FLAGS}") set(CMAKE_C_FLAGS_RELWITHDEBINFO - "${CMAKE_C_DEBUGINFO_FLAGS} ${CMAKE_C_OPT_NORMAL} ${CMAKE_C_IGNORE_WARNINGS}") + "${CMAKE_C_DEBUGINFO_FLAGS} ${CMAKE_C_OPT_NORMAL} ${CMAKE_C_IGNORE_WARNINGS} ${C_UNSAFE_MATH_FLAGS}" +) set(CMAKE_CXX_FLAGS_RELWITHDEBINFO - "${CMAKE_CXX_DEBUGINFO_FLAGS} ${CMAKE_CXX_OPT_NORMAL} ${CMAKE_CXX_IGNORE_WARNINGS}") + "${CMAKE_CXX_DEBUGINFO_FLAGS} ${CMAKE_CXX_OPT_NORMAL} ${CMAKE_CXX_IGNORE_WARNINGS} ${CXX_UNSAFE_MATH_FLAGS}" +) set(CMAKE_C_FLAGS_FAST "${CMAKE_C_OPT_FAST} ${CMAKE_C_LINK_TIME_OPT} ${CMAKE_C_GEN_NATIVE} ${CMAKE_C_IGNORE_WARNINGS}") set(CMAKE_CXX_FLAGS_FAST "${CMAKE_CXX_OPT_FAST} ${CMAKE_CXX_LINK_TIME_OPT} ${CMAKE_CXX_GEN_NATIVE} ${CMAKE_CXX_IGNORE_WARNINGS}" ) + +set(CMAKE_C_FLAGS_FASTDEBUG "${CMAKE_C_OPT_FASTDEBUG} ${CMAKE_C_IGNORE_WARNINGS}") +set(CMAKE_CXX_FLAGS_FASTDEBUG "${CMAKE_CXX_OPT_FASTDEBUG} ${CMAKE_CXX_IGNORE_WARNINGS}") # ~~~ + +# for binary distributions, avoid addition of OpenMP specific flag as compiler on end-user machine +# may not support it. +if(NOT DEFINED NRN_BINARY_DIST_BUILD OR NOT NRN_BINARY_DIST_BUILD) + include(CheckCXXCompilerFlag) + # Check support for OpenMP SIMD constructs + set(SIMD_FLAGS "") + + if("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel") + set(SIMD_FLAGS "-qopenmp-simd") + elseif("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC") + set(SIMD_FLAGS "-openmp:experimental") + elseif(CMAKE_CXX_COMPILER_ID MATCHES "Clang") + set(SIMD_FLAGS "-openmp-simd") + else() # not ICC, MSVC, or Clang => GCC and others + set(SIMD_FLAGS "-fopenmp-simd") + endif() + + check_cxx_compiler_flag("${SIMD_FLAGS}" COMPILER_SUPPORT_SIMD) + if(COMPILER_SUPPORT_SIMD) + set(CMAKE_C_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAGS}") + set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} ${SIMD_FLAGS}") + else() + message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no support for OpenMP SIMD construct") + endif() +endif() diff --git a/cmake/SanitizerHelper.cmake b/cmake/SanitizerHelper.cmake index f7ee11f6ad..e88a1db7ea 100644 --- a/cmake/SanitizerHelper.cmake +++ b/cmake/SanitizerHelper.cmake @@ -8,11 +8,14 @@ set(${CODING_CONV_PREFIX}_SANITIZERS_UNDEFINED_EXCLUSIONS float-divide-by-zero implicit-signed-integer-truncation unsigned-integer-overflow CACHE STRING "" FORCE) include("${CODING_CONV_CMAKE}/sanitizers.cmake") +include(${CODING_CONV_CMAKE}/build-time-copy.cmake) + # Propagate the sanitizer flags to the NEURON sources list(APPEND NRN_COMPILE_FLAGS ${NRN_SANITIZER_COMPILER_FLAGS}) list(APPEND NRN_LINK_FLAGS ${NRN_SANITIZER_COMPILER_FLAGS}) # And to CoreNEURON as we don't have CORENRN_SANITIZERS any more list(APPEND CORENRN_EXTRA_CXX_FLAGS ${NRN_SANITIZER_COMPILER_FLAGS}) +list(APPEND CORENRN_EXTRA_MECH_CXX_FLAGS ${NRN_SANITIZER_COMPILER_FLAGS}) list(APPEND CORENRN_EXTRA_LINK_FLAGS ${NRN_SANITIZER_COMPILER_FLAGS}) if(NRN_SANITIZER_LIBRARY_DIR) # At least Clang 14 does not add rpath entries for the sanitizer runtime libraries. Adding this @@ -32,6 +35,9 @@ if(NRN_SANITIZERS) if("address" IN_LIST nrn_sanitizers) list(APPEND NRN_COMPILE_DEFS NRN_ASAN_ENABLED) endif() + if("thread" IN_LIST nrn_sanitizers) + list(APPEND NRN_COMPILE_DEFS NRN_TSAN_ENABLED) + endif() # generate and install a launcher script called nrn-enable-sanitizer [--preload] that sets # *SAN_OPTIONS variables and, optionally, LD_PRELOAD -- this is useful both in CI configuration # and when using the sanitizers "downstream" of NEURON @@ -42,8 +48,8 @@ if(NRN_SANITIZERS) # directory foreach(sanitizer ${nrn_sanitizers}) if(EXISTS "${PROJECT_SOURCE_DIR}/.sanitizers/${sanitizer}.supp") - configure_file(".sanitizers/${sanitizer}.supp" "share/nrn/sanitizers/${sanitizer}.supp" - COPYONLY) + configure_file("${PROJECT_SOURCE_DIR}/.sanitizers/${sanitizer}.supp" + "${PROJECT_BINARY_DIR}/share/nrn/sanitizers/${sanitizer}.supp" COPYONLY) install(FILES "${PROJECT_BINARY_DIR}/share/nrn/sanitizers/${sanitizer}.supp" DESTINATION "${CMAKE_INSTALL_PREFIX}/share/nrn/sanitizers") endif() @@ -56,10 +62,4 @@ if(NRN_SANITIZERS) if(NRN_SANITIZER_LIBRARY_PATH) set(NRN_SANITIZER_LD_PRELOAD "${NRN_SANITIZER_PRELOAD_VAR}=${NRN_SANITIZER_LIBRARY_PATH}") endif() - # Needed for using sanitizers on macOS - cpp_cc_strip_python_shims(EXECUTABLE "${PYTHON_EXECUTABLE}" OUTPUT PYTHON_EXECUTABLE) - set(NRN_DEFAULT_PYTHON_EXECUTABLE "${PYTHON_EXECUTABLE}") - configure_file(bin/nrn-enable-sanitizer.in bin/nrn-enable-sanitizer @ONLY) - install(PROGRAMS ${PROJECT_BINARY_DIR}/bin/nrn-enable-sanitizer - DESTINATION ${CMAKE_INSTALL_PREFIX}/bin) endif() diff --git a/cmake/coreneuron/AddMod2cSubmodule.cmake b/cmake/coreneuron/AddMod2cSubmodule.cmake deleted file mode 100644 index 9c1f505569..0000000000 --- a/cmake/coreneuron/AddMod2cSubmodule.cmake +++ /dev/null @@ -1,29 +0,0 @@ -# ============================================================================= -# Copyright (C) 2016-2021 Blue Brain Project -# -# See top-level LICENSE file for details. -# ============================================================================= - -find_package(FindPkgConfig QUIET) - -find_path( - MOD2C_PROJ - NAMES CMakeLists.txt - PATHS "${PROJECT_SOURCE_DIR}/external/mod2c") - -find_package_handle_standard_args(MOD2C REQUIRED_VARS MOD2C_PROJ) - -if(NOT MOD2C_FOUND) - find_package(Git 1.8.3 QUIET) - if(NOT ${GIT_FOUND}) - message(FATAL_ERROR "git not found, clone repository with --recursive") - endif() - message(STATUS "Sub-module mod2c missing : running git submodule update --init --recursive") - execute_process( - COMMAND ${GIT_EXECUTABLE} submodule update --init --recursive -- - ${PROJECT_SOURCE_DIR}/external/mod2c WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) -else() - message(STATUS "Using mod2c submodule from ${MOD2C_PROJ}") -endif() - -add_subdirectory(${PROJECT_SOURCE_DIR}/external/mod2c ${CMAKE_BINARY_DIR}/external/mod2c) diff --git a/cmake/coreneuron/AddNmodlSubmodule.cmake b/cmake/coreneuron/AddNmodlSubmodule.cmake deleted file mode 100644 index 021297f7ed..0000000000 --- a/cmake/coreneuron/AddNmodlSubmodule.cmake +++ /dev/null @@ -1,29 +0,0 @@ -# ============================================================================= -# Copyright (C) 2016-2021 Blue Brain Project -# -# See top-level LICENSE file for details. -# ============================================================================= - -find_package(FindPkgConfig QUIET) - -find_path( - NMODL_PROJ - NAMES CMakeLists.txt - PATHS "${PROJECT_SOURCE_DIR}/external/nmodl") - -find_package_handle_standard_args(NMODL REQUIRED_VARS NMODL_PROJ) - -if(NOT NMODL_FOUND) - find_package(Git 1.8.3 QUIET) - if(NOT ${GIT_FOUND}) - message(FATAL_ERROR "git not found, clone repository with --recursive") - endif() - message(STATUS "Sub-module nmodl missing : running git submodule update --init") - execute_process( - COMMAND ${GIT_EXECUTABLE} submodule update --init -- ${PROJECT_SOURCE_DIR}/external/nmodl - WORKING_DIRECTORY ${PROJECT_SOURCE_DIR}) -else() - message(STATUS "Using nmodl submodule from ${NMODL_PROJ}") -endif() - -add_subdirectory(${PROJECT_SOURCE_DIR}/external/nmodl ${CMAKE_BINARY_DIR}/external/nmodl) diff --git a/cmake/coreneuron/MakefileBuildOptions.cmake b/cmake/coreneuron/MakefileBuildOptions.cmake index a95a03e21c..1ab2de8e92 100644 --- a/cmake/coreneuron/MakefileBuildOptions.cmake +++ b/cmake/coreneuron/MakefileBuildOptions.cmake @@ -149,8 +149,8 @@ string( ${CORENRN_EXTRA_COMPILE_FLAGS}) # ============================================================================= -# nmodl/mod2c related options : TODO +# nmodl related options : TODO # ============================================================================= -# name of nmodl/mod2c binary -get_filename_component(nmodl_name ${CORENRN_MOD2CPP_BINARY} NAME) +# name of nmodl binary +get_filename_component(nmodl_name ${CORENRN_NMODL_BINARY} NAME) set(nmodl_binary_name ${nmodl_name}) diff --git a/cmake/coreneuron/OpenAccHelper.cmake b/cmake/coreneuron/OpenAccHelper.cmake index a21f8b5239..113ab18703 100644 --- a/cmake/coreneuron/OpenAccHelper.cmake +++ b/cmake/coreneuron/OpenAccHelper.cmake @@ -30,9 +30,6 @@ endfunction() # Prepare compiler flags for GPU target # ============================================================================= if(CORENRN_ENABLE_GPU) - # Get the NVC++ version number for use in nrnivmodl_core_makefile.in - cnrn_parse_version(${CMAKE_CXX_COMPILER_VERSION} OUTPUT_MAJOR_MINOR - CORENRN_NVHPC_MAJOR_MINOR_VERSION) # Enable cudaProfiler{Start,Stop}() behind the Instrumentor::phase... APIs list(APPEND CORENRN_COMPILE_DEFS CORENEURON_CUDA_PROFILING CORENEURON_ENABLE_GPU) # Plain C++ code in CoreNEURON may need to use CUDA runtime APIs for, for example, starting and @@ -133,11 +130,6 @@ if(CORENRN_HAVE_NVHPC_COMPILER) # "include/Random123/features/sse.h", warning #550-D: variable "edx" was set but never used # ~~~ set(CORENEURON_CXX_WARNING_SUPPRESSIONS --diag_suppress=111,550) - # This one can be a bit more targeted - # ~~~ - # "boost/test/unit_test_log.hpp", warning #612-D: overloaded virtual function "..." is only partially overridden in class "..." - # ~~~ - set(CORENEURON_BOOST_UNIT_TEST_COMPILE_FLAGS --diag_suppress=612) # Extra suppressions for .cpp files translated from .mod files. # ~~~ # "x86_64/corenrn/mod2c/pattern.cpp", warning #161-D: unrecognized #pragma diff --git a/cmake/coreneuron/packages/Findnmodl.cmake b/cmake/coreneuron/packages/Findnmodl.cmake index 6a1dbcee50..c68ff64c7e 100644 --- a/cmake/coreneuron/packages/Findnmodl.cmake +++ b/cmake/coreneuron/packages/Findnmodl.cmake @@ -35,7 +35,7 @@ find_program( NAMES nmodl${CMAKE_EXECUTABLE_SUFFIX} HINTS "${CORENRN_NMODL_DIR}/bin" QUIET) -find_path(nmodl_INCLUDE "nmodl/fast_math.hpp" HINTS "${CORENRN_NMODL_DIR}/include") +find_path(nmodl_INCLUDE "nmodl.hpp" HINTS "${CORENRN_NMODL_DIR}/include") find_path(nmodl_PYTHONPATH "nmodl/__init__.py" HINTS "${CORENRN_NMODL_DIR}/lib") # Checks 'REQUIRED', 'QUIET' and versions. diff --git a/cmake/modules/FindCython.cmake b/cmake/modules/FindCython.cmake index b18c16a8a3..dc77f361f6 100644 --- a/cmake/modules/FindCython.cmake +++ b/cmake/modules/FindCython.cmake @@ -22,16 +22,11 @@ # ============================================================================= # Use the Cython executable that lives next to the Python executable if it is a local installation. -find_package(PythonInterp) -if(PYTHONINTERP_FOUND) - get_filename_component(_python_path ${PYTHON_EXECUTABLE} PATH) - find_program( - CYTHON_EXECUTABLE - NAMES cython cython.bat cython3 - HINTS ${_python_path}) -else() - find_program(CYTHON_EXECUTABLE NAMES cython cython.bat cython3) -endif() +get_filename_component(_python_path ${NRN_DEFAULT_PYTHON_EXECUTABLE} PATH) +find_program( + CYTHON_EXECUTABLE + NAMES cython cython.bat cython3 + HINTS ${_python_path}) if(NOT CYTHON_EXECUTABLE STREQUAL "CYTHON_EXECUTABLE-NOTFOUND") execute_process( diff --git a/cmake/modules/FindPythonLibsNew.cmake b/cmake/modules/FindPythonLibsNew.cmake deleted file mode 100644 index 01a7eb2b99..0000000000 --- a/cmake/modules/FindPythonLibsNew.cmake +++ /dev/null @@ -1,209 +0,0 @@ -# ~~~ -# ============================================================================= -# Find libraries corresponding to Python interpreter -# ============================================================================= -# Using this module from Pybind11, see: github.com/pybind/pybind11/pull/207 - -# * Find python libraries This module finds the libraries corresponding to the Python interpreter -# FindPythonInterp provides. This code sets the following variables: -# -# PYTHONLIBS_FOUND - have the Python libs been found PYTHON_PREFIX - path to -# the Python installation PYTHON_LIBRARIES - path to the python library -# PYTHON_INCLUDE_DIRS - path to where Python.h is found PYTHON_MODULE_EXTENSION - lib -# extension, e.g. '.so' or '.pyd' PYTHON_MODULE_PREFIX - lib name prefix: usually an empty -# string PYTHON_SITE_PACKAGES - path to installation site-packages PYTHON_IS_DEBUG - whether -# the Python interpreter is a debug build -# -# Thanks to talljimbo for the patch adding the 'LDVERSION' config variable usage. - -# ============================================================================= -# Copyright 2001-2009 Kitware, Inc. Copyright 2012 Continuum Analytics, Inc. -# -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without modification, are permitted -# provided that the following conditions are met: -# -# * Redistributions of source code must retain the above copyright notice, this list of conditions -# and the following disclaimer. -# -# * Redistributions in binary form must reproduce the above copyright notice, this list of -# conditions and the following disclaimer in the documentation and/or other materials provided -# with the distribution. -# -# * Neither the names of Kitware, Inc., the Insight Software Consortium, nor the names of their -# contributors may be used to endorse or promote products derived from this software without -# specific prior written permission. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR -# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND -# FITNESS FOR # A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR -# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL -# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, -# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER -# IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT -# OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -# ============================================================================= - -# Checking for the extension makes sure that `LibsNew` was found and not just `Libs`. -if(PYTHONLIBS_FOUND AND PYTHON_MODULE_EXTENSION) - return() -endif() - -# Use the Python interpreter to find the libs. -if(PythonLibsNew_FIND_REQUIRED) - find_package(PythonInterp ${PythonLibsNew_FIND_VERSION} REQUIRED) -else() - find_package(PythonInterp ${PythonLibsNew_FIND_VERSION}) -endif() - -if(NOT PYTHONINTERP_FOUND) - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# According to http://stackoverflow.com/questions/646518/python-how-to-detect-debug-interpreter -# testing whether sys has the gettotalrefcount function is a reliable, cross-platform way to detect -# a CPython debug interpreter. -# -# The library suffix is from the config var LDVERSION sometimes, otherwise VERSION. VERSION will -# typically be like "2.7" on unix, and "27" on windows. -execute_process( - COMMAND - "${PYTHON_EXECUTABLE}" "-c" "import sysconfig;import sys;import struct; -print('.'.join(str(v) for v in sys.version_info)); -print(sys.prefix); -print(sysconfig.get_path('include')); -print(sysconfig.get_path('platlib')); -print(sysconfig.get_config_var('EXT_SUFFIX')); -print(hasattr(sys, 'gettotalrefcount')+0); -print(struct.calcsize('@P')); -print(sysconfig.get_config_var('LDVERSION') or sysconfig.get_config_var('VERSION')); -print(sysconfig.get_config_var('LIBDIR') or ''); -print(sysconfig.get_config_var('MULTIARCH') or ''); -" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE _PYTHON_VALUES - ERROR_VARIABLE _PYTHON_ERROR_VALUE) - -if(NOT _PYTHON_SUCCESS MATCHES 0) - if(PythonLibsNew_FIND_REQUIRED) - message(FATAL_ERROR "Python config failure:\n${_PYTHON_VALUES}\n${_PYTHON_ERROR_VALUE}") - endif() - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# Convert the process output into a list -if(WIN32) - string(REGEX REPLACE "\\\\" "/" _PYTHON_VALUES ${_PYTHON_VALUES}) -endif() -string(REGEX REPLACE ";" "\\\\;" _PYTHON_VALUES ${_PYTHON_VALUES}) -string(REGEX REPLACE "\n" ";" _PYTHON_VALUES ${_PYTHON_VALUES}) -list(GET _PYTHON_VALUES 0 _PYTHON_VERSION_LIST) -list(GET _PYTHON_VALUES 1 PYTHON_PREFIX) -if(NOT DEFINED PYTHON_INCLUDE_DIR) - list(GET _PYTHON_VALUES 2 PYTHON_INCLUDE_DIR) -endif() -list(GET _PYTHON_VALUES 3 PYTHON_SITE_PACKAGES) -list(GET _PYTHON_VALUES 4 PYTHON_MODULE_EXTENSION) -list(GET _PYTHON_VALUES 5 PYTHON_IS_DEBUG) -list(GET _PYTHON_VALUES 6 PYTHON_SIZEOF_VOID_P) -list(GET _PYTHON_VALUES 7 PYTHON_LIBRARY_SUFFIX) -list(GET _PYTHON_VALUES 8 PYTHON_LIBDIR) -list(GET _PYTHON_VALUES 9 PYTHON_MULTIARCH) - -# Make sure the Python has the same pointer-size as the chosen compiler Skip if CMAKE_SIZEOF_VOID_P -# is not defined -if(CMAKE_SIZEOF_VOID_P AND (NOT "${PYTHON_SIZEOF_VOID_P}" STREQUAL "${CMAKE_SIZEOF_VOID_P}")) - if(PythonLibsNew_FIND_REQUIRED) - math(EXPR _PYTHON_BITS "${PYTHON_SIZEOF_VOID_P} * 8") - math(EXPR _CMAKE_BITS "${CMAKE_SIZEOF_VOID_P} * 8") - message(FATAL_ERROR "Python config failure: Python is ${_PYTHON_BITS}-bit, " - "chosen compiler is ${_CMAKE_BITS}-bit") - endif() - set(PYTHONLIBS_FOUND FALSE) - return() -endif() - -# The built-in FindPython didn't always give the version numbers -string(REGEX REPLACE "\\." ";" _PYTHON_VERSION_LIST ${_PYTHON_VERSION_LIST}) -list(GET _PYTHON_VERSION_LIST 0 PYTHON_VERSION_MAJOR) -list(GET _PYTHON_VERSION_LIST 1 PYTHON_VERSION_MINOR) -list(GET _PYTHON_VERSION_LIST 2 PYTHON_VERSION_PATCH) - -# Make sure all directory separators are '/' -string(REGEX REPLACE "\\\\" "/" PYTHON_PREFIX ${PYTHON_PREFIX}) -string(REGEX REPLACE "\\\\" "/" PYTHON_INCLUDE_DIR ${PYTHON_INCLUDE_DIR}) -string(REGEX REPLACE "\\\\" "/" PYTHON_SITE_PACKAGES ${PYTHON_SITE_PACKAGES}) - -if(CMAKE_HOST_WIN32) - set(PYTHON_LIBRARY "${PYTHON_PREFIX}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib") - - # when run in a venv, PYTHON_PREFIX points to it. But the libraries remain in the original python - # installation. They may be found relative to PYTHON_INCLUDE_DIR. - if(NOT EXISTS "${PYTHON_LIBRARY}") - get_filename_component(_PYTHON_ROOT ${PYTHON_INCLUDE_DIR} DIRECTORY) - set(PYTHON_LIBRARY "${_PYTHON_ROOT}/libs/Python${PYTHON_LIBRARY_SUFFIX}.lib") - endif() - - # raise an error if the python libs are still not found. - if(NOT EXISTS "${PYTHON_LIBRARY}") - message(FATAL_ERROR "Python libraries not found") - endif() - -else() - if(PYTHON_MULTIARCH) - set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}/${PYTHON_MULTIARCH}" "${PYTHON_LIBDIR}") - else() - set(_PYTHON_LIBS_SEARCH "${PYTHON_LIBDIR}") - endif() - # message(STATUS "Searching for Python libs in ${_PYTHON_LIBS_SEARCH}") Probably this needs to be - # more involved. It would be nice if the config information the python interpreter itself gave us - # were more complete. - find_library( - PYTHON_LIBRARY - NAMES "python${PYTHON_LIBRARY_SUFFIX}" - PATHS ${_PYTHON_LIBS_SEARCH} - NO_DEFAULT_PATH) - - # If all else fails, just set the name/version and let the linker figure out the path. - if(NOT PYTHON_LIBRARY) - set(PYTHON_LIBRARY python${PYTHON_LIBRARY_SUFFIX}) - # Since this isn't very robust. One more try with find_libpython.py - execute_process( - COMMAND "${PYTHON_EXECUTABLE}" "${CMAKE_SOURCE_DIR}/cmake/find_libpython.py" - RESULT_VARIABLE _PYTHON_SUCCESS - OUTPUT_VARIABLE _PYTHON_VALUES - ERROR_VARIABLE _PYTHON_ERROR_VALUE) - if(_PYTHON_SUCCESS MATCHES 0) - if(_PYTHON_VALUES) - set(PYTHON_LIBRARY "${_PYTHON_VALUES}") - message(STATUS "PYTHON_LIBRARY from find_libpython.py: \"${PYTHON_LIBRARY}\"") - endif() - endif() - endif() -endif() - -mark_as_advanced(PYTHON_LIBRARY PYTHON_INCLUDE_DIR) - -# Make sure Python includes exist -if(NOT EXISTS ${PYTHON_INCLUDE_DIR}) - message( - FATAL_ERROR - "Could not find Python.h in ${PYTHON_INCLUDE_DIR}, install python-dev package (e.g. On Ubuntu : apt-get install python${PYTHON_VERSION_MAJOR}.${PYTHON_VERSION_MINOR}-dev)" - ) -endif() - -# We use PYTHON_INCLUDE_DIR, PYTHON_LIBRARY and PYTHON_DEBUG_LIBRARY for the cache entries because -# they are meant to specify the location of a single library. We now set the variables listed by the -# documentation for this module. -set(PYTHON_INCLUDE_DIRS "${PYTHON_INCLUDE_DIR}") -set(PYTHON_LIBRARIES "${PYTHON_LIBRARY}") -set(PYTHON_DEBUG_LIBRARIES "${PYTHON_DEBUG_LIBRARY}") - -find_package_message(PYTHON "Found PythonLibs: ${PYTHON_LIBRARY}" - "${PYTHON_EXECUTABLE}${PYTHON_VERSION}") - -set(PYTHONLIBS_FOUND TRUE) -# ~~~ diff --git a/cmake/modules/FindPythonModule.cmake b/cmake/modules/FindPythonModule.cmake index 2146efa4c0..9ea86a3972 100644 --- a/cmake/modules/FindPythonModule.cmake +++ b/cmake/modules/FindPythonModule.cmake @@ -1,6 +1,14 @@ -# * Macro to find a python module +# Usage: nrn_find_python_module(MODULE module [VERSION version] [REQUIRED] [ALL]) # -# Usage: find_python_module (module [VERSION] [REQUIRED]) +# * MODULE: specifies the Python module to try and import +# * ALL: if specified, checks that the module exists in *all* of the Python executables +# ${NRN_PYTHON_EXECUTABLES}, otherwise checks ${NRN_DEFAULT_PYTHON_EXECUTABLE} +# * REQUIRED: if specified, emit a FATAL_ERROR if the module cannot be found +# * VERSION: if specified, require the package version is at least `version` +# +# Sets UPPERCASEMODULENAME_FOUND if the module was found. +# +# Extensively modified for NEURON, but original license information below: # # Copyright 2005-2018 Airbus-EDF-IMACS-Phimeca # @@ -9,63 +17,90 @@ # # https://github.com/openturns/otsubsetinverse/blob/master/cmake/FindPythonModule.cmake -macro(nrn_find_python_module module) - - string(TOUPPER ${module} module_upper) - if(NOT ${module_upper}_FOUND) - - # parse arguments - set(${module}_FIND_OPTIONAL TRUE) - if(${ARGC} EQUAL 2) - if(${ARGV1} MATCHES REQUIRED) - set(${module}_FIND_OPTIONAL FALSE) - else() - set(${module}_FIND_VERSION ${ARGV1}) - endif() - elseif(${ARGC} EQUAL 3) - if(${ARGV2} MATCHES REQUIRED) - set(${module}_FIND_OPTIONAL FALSE) - endif() - set(${module}_FIND_VERSION ${ARGV1}) - endif() - +macro(nrn_find_python_module) + cmake_parse_arguments(_find_module "ALL;REQUIRED" "MODULE;VERSION" "" ${ARGN}) + if(NOT _find_module_MODULE) + message(FATAL_ERROR "The MODULE option is required") + endif() + # Which Pythons are we going to check + if(_find_module_ALL) + set(_find_module_pythons ${NRN_PYTHON_EXECUTABLES}) + else() + set(_find_module_pythons "${NRN_DEFAULT_PYTHON_EXECUTABLE}") + endif() + # Reset these (we're a macro, after all) + set(_find_module_versions) + set(_find_module_not_found) + set(_find_module_insufficient_version) + # Loop through Pythons, track lists of which Pythons had missing/insufficient versions + foreach(_find_module_python ${_find_module_pythons}) # A module's location is usually a directory, but for binary modules it's a .so file. + string(JOIN "; " _find_module_python_code "import re, ${_find_module_MODULE}" + "print(re.compile('/__init__.py.*').sub('',${_find_module_MODULE}.__file__))") execute_process( - COMMAND "${PYTHON_EXECUTABLE}" "-c" - "import re, ${module}; print(re.compile('/__init__.py.*').sub('',${module}.__file__))" - RESULT_VARIABLE _${module}_status - OUTPUT_VARIABLE _${module}_location + COMMAND "${_find_module_python}" "-c" "${_find_module_python_code}" + RESULT_VARIABLE _find_module_status + OUTPUT_VARIABLE _find_module_location ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) - if(NOT _${module}_status) - set(${module_upper}_LOCATION - ${_${module}_location} - CACHE STRING "Location of Python module ${module}") - # retrieve version - execute_process( - COMMAND "${PYTHON_EXECUTABLE}" "-c" "import ${module}; print(${module}.__version__)" - RESULT_VARIABLE _${module}_status - OUTPUT_VARIABLE _${module}_version - ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) - - set(_${module_upper}_VERSION_MATCH TRUE) - if(NOT _${module}_status) - set(${module_upper}_VERSION_STRING ${_${module}_version}) - if(${module}_FIND_VERSION) - if(${module}_FIND_VERSION VERSION_GREATER ${module_upper}_VERSION_STRING) - set(_${module_upper}_VERSION_MATCH FALSE) - endif() - endif() - mark_as_advanced(${module_upper}_VERSION_STRING) + if(_find_module_status) + # module not found with this Python + list(APPEND _find_module_not_found "${_find_module_python}") + # keep going for the sake of a good error + continue() + endif() + # module *was* found with this Python; retrieve version + execute_process( + COMMAND "${_find_module_python}" "-c" + "import ${_find_module_MODULE}; print(${_find_module_MODULE}.__version__)" + RESULT_VARIABLE _find_module_status + OUTPUT_VARIABLE _find_module_version + ERROR_QUIET OUTPUT_STRIP_TRAILING_WHITESPACE) + if(_find_module_status) + # module version could not be determined, some packages don't define __version__ if VERSION + # was passed, we may need to fail because of this + if(_find_module_VERSION) + list(APPEND _find_module_insufficient_version "${_find_module_python}-UNKNOWN") endif() + # keep going, this may not be fatal + continue() endif() - - find_package_handle_standard_args( - ${module} - REQUIRED_VARS ${module_upper}_LOCATION _${module_upper}_VERSION_MATCH - VERSION_VAR ${module_upper}_VERSION_STRING) - if(NOT ${module}_FIND_OPTIONAL AND NOT _${module_upper}_VERSION_MATCH) - message(FATAL_ERROR "Missing python module ${module}") + # module version is in _find_module_version + list(APPEND _find_module_versions "${_find_module_version}") + if(_find_module_VERSION AND _find_module_VERSION VERSION_GREATER "${_find_module_version}") + # a minimum version was specified, and it wasn't met with this Python + list(APPEND _find_module_insufficient_version + "${_find_module_python}-${_find_module_version}") + # keep going for the sake of a good error + continue() + endif() + endforeach() + string(TOUPPER "${_find_module_MODULE}" _find_module_upper) + if(_find_module_not_found OR _find_module_insufficient_version) + set("${_find_module_upper}_FOUND" OFF) + if(_find_module_REQUIRED) + set(_find_module_message_type FATAL_ERROR) + else() + set(_find_module_message_type STATUS) + endif() + # Print a message explaining why the module wasn't found with the given constraints + set(_find_module_msg "Could not find Python module '${_find_module_MODULE}'") + if(_find_module_not_found) + string(JOIN " " _find_module_msg "${_find_module_msg}" "with:" ${_find_module_not_found}) + endif() + if(_find_module_insufficient_version) + string(JOIN " " _find_module_msg "${_find_module_msg}" "insufficient version with:" + ${_find_module_insufficient_version}) + endif() + message(${_find_module_message_type} "${_find_module_msg}") + else() + set("${_find_module_upper}_FOUND" ON) + # Print a quick summary of what we found with which versions: _find_module_versions + list(REMOVE_DUPLICATES _find_module_versions) + set(_find_module_msg "Found Python module '${_find_module_MODULE}'") + if(_find_module_versions) + string(JOIN " " _find_module_msg ${_find_module_msg} "with version(s):" + ${_find_module_versions}) endif() - mark_as_advanced(${module_upper}_LOCATION) - endif(NOT ${module_upper}_FOUND) + message(STATUS "${_find_module_msg}") + endif() endmacro(nrn_find_python_module) diff --git a/cmake_nrnconf.h.in b/cmake_nrnconf.h.in index e79d8e04d4..c70ddbfa85 100644 --- a/cmake_nrnconf.h.in +++ b/cmake_nrnconf.h.in @@ -13,41 +13,21 @@ */ #undef CRAY_STACKSEG_END -/* Define to 1 if using `alloca.c'. */ -#undef C_ALLOCA - /* if mac os x */ #undef DARWIN -/* Define to 1 if you have `alloca', as a function or macro. */ -#undef HAVE_ALLOCA - -/* Define to 1 if you have and it should be used (not on Ultrix). - */ -#undef HAVE_ALLOCA_H - /* Define to 1 if you have the `bcopy' function. */ #undef HAVE_BCOPY /* Define to 1 if you have the `bzero' function. */ #undef HAVE_BZERO -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -#undef HAVE_DIRENT_H - /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H -/* Define to 1 if you don't have `vprintf' but do have `_doprnt.' */ -#undef HAVE_DOPRNT - /* Define to 1 if you have the header file. */ #undef HAVE_EXECINFO_H -/* Define to 1 if you have the header file. */ -#undef HAVE_FCNTL_H - /* Define to 1 if you have the header file. */ #undef HAVE_FENV_H @@ -57,45 +37,18 @@ /* Define to 1 if you have the `feenableexcept' function. */ #undef HAVE_FEENABLEEXCEPT -/* Define to 1 if you have the header file. */ -#undef HAVE_FLOAT_H - -/* Define to 1 if you have the `ftime' function. */ -#undef HAVE_FTIME - -/* Define to 1 if you have the `getcwd' function. */ -#undef HAVE_GETCWD - -/* Define to 1 if you have the `gethostname' function. */ -#undef HAVE_GETHOSTNAME - -/* Define to 1 if you have the `getpw' function. */ -#undef HAVE_GETPW - /* Define to 1 if you have the `gettimeofday' function. */ #undef HAVE_GETTIMEOFDAY /* Define to 1 if you have the `index' function. */ #undef HAVE_INDEX -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - /* Define to 1 if you have the `isatty' function. */ #undef HAVE_ISATTY /* define if using InterViews */ #undef HAVE_IV -/* Define to 1 if you have the header file. */ -#undef HAVE_LIMITS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_LOCALE_H - -/* Define to 1 if you have the `lockf' function. */ -#undef HAVE_LOCKF - /* Define to 1 if you have the `mallinfo' function. */ #undef HAVE_MALLINFO @@ -105,42 +58,21 @@ /* Define to 1 if you have the header file. */ #undef HAVE_MALLOC_H -/* Define to 1 if you have the header file. */ -#undef HAVE_MATH_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the `mkdir' function. */ -#undef HAVE_MKDIR - /* Define to 1 if you have the `mkstemp' function. */ #undef HAVE_MKSTEMP -/* define if the compiler implements namespaces */ -#undef HAVE_NAMESPACES - -/* Define to 1 if you have the header file, and it defines `DIR'. */ -#undef HAVE_NDIR_H - /* Define to 1 if you have the `posix_memalign' function. */ #undef HAVE_POSIX_MEMALIGN /* Define to 1 if you have the `realpath' function. */ #undef HAVE_REALPATH -/* Define to 1 if you have the `select' function. */ -#undef HAVE_SELECT - /* Define to 1 if you have the `setenv' function. */ #undef HAVE_SETENV /* Define to 1 if you have the `setitimer' function. */ #undef HAVE_SETITIMER -/* Define to 1 if you have the header file. */ -#undef HAVE_SGTTY_H - /* Define to 1 if you have the `sigaction' function. */ #undef HAVE_SIGACTION @@ -150,101 +82,15 @@ /* (Define if this signal exists) */ #undef HAVE_SIGBUS -/* (Define if this signal exists) */ -#undef HAVE_SIGSEGV - -/* define if the compiler has stringstream */ -#undef HAVE_SSTREAM - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDARG_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the `strdup' function. */ -#undef HAVE_STRDUP - -/* Define to 1 if you have the header file. */ -#undef HAVE_STREAM_H - /* Define to 1 if you have the header file. */ #undef HAVE_STRINGS_H -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STROPTS_H - -/* Define to 1 if you have the `strstr' function. */ -#undef HAVE_STRSTR - -/* Define to 1 if you have the `stty' function. */ -#undef HAVE_STTY - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_CONF_H - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -#undef HAVE_SYS_DIR_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_FILE_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_IOCTL_H - -/* Define to 1 if you have the header file, and it defines `DIR'. - */ -#undef HAVE_SYS_NDIR_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TIME_H - /* Define to 1 if you have the header file. */ #undef HAVE_SYS_TYPES_H -/* Define to 1 if you have that is POSIX.1 compatible. */ -#undef HAVE_SYS_WAIT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_TERMIO_H - /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H -/* Define to 1 if you have the header file. */ -#undef HAVE_VARARGS_H - -/* Define to 1 if you have the `vprintf' function. */ -#undef HAVE_VPRINTF - -/* Define to 1 if you have the <_G_config.h> header file. */ -#undef HAVE__G_CONFIG_H - -/* define if can declare inline float abs(float) */ -#undef INLINE_FLOAT_ABS - -/* define if can declare inline long abs(long) */ -#undef INLINE_LONG_ABS - -/* undefined or ::fabs or std::fabs */ -#undef IVOS_FABS - -/* Define to the sub-directory where libtool stores uninstalled libraries. */ -#undef LT_OBJDIR - -/* 1 for legacy, undef for NIST (as of 2017), for FARADAY and R */ -#undef LegacyFR - /* define if using mingw */ #undef MINGW @@ -257,9 +103,6 @@ /* host triplet */ #undef NRNHOST -/* cpu type consistent with nrnivmodl */ -#undef NRNHOSTCPU - /* if 1 then dlopen nrnmech instead of special */ #undef NRNMECH_DLL_STYLE @@ -361,8 +204,13 @@ * mesch (and maybe others) */ #if defined(__cplusplus) +#include #include namespace neuron::config { +#ifdef USE_PYTHON + constexpr std::string_view default_python_executable{R"(@NRN_DEFAULT_PYTHON_EXECUTABLE@)"}; + constexpr std::array supported_python_versions{@NRN_DYNAMIC_PYTHON_LIST_OF_VERSION_STRINGS@}; +#endif constexpr std::string_view shared_library_prefix{"@CMAKE_SHARED_LIBRARY_PREFIX@"}; constexpr std::string_view shared_library_suffix{"@CMAKE_SHARED_LIBRARY_SUFFIX@"}; constexpr std::string_view system_processor{"@CMAKE_SYSTEM_PROCESSOR@"}; diff --git a/codecov.yml b/codecov.yml index a4f26e18e8..bfdc9877d9 100644 --- a/codecov.yml +++ b/codecov.yml @@ -1,4 +1,8 @@ -codecov: - require_ci_to_pass: false - notify: - wait_for_ci: false +coverage: + status: + project: + default: + informational: true + patch: + default: + informational: true diff --git a/docs/README.md b/docs/README.md index c6e5081ddf..4e9f479312 100644 --- a/docs/README.md +++ b/docs/README.md @@ -12,23 +12,37 @@ Contents: ### Local build -#### Setup +#### Virtual environment It is recommended to use a Python virtual environment, for example: -``` +```bash python3 -m venv venv source venv/bin/activate ``` + In order to build documentation locally, you need to pip install the ``docs_requirements.txt`` : ``` pip3 install -r docs/docs_requirements.txt --upgrade ``` Also, make sure to have `Doxygen` and `pandoc` installed, and the dependencies listed in [conda_environment.yml](conda_environment.yml) -Note that this file is tailored to the ReadTheDocs setup, but lists all desired requirements. +Note that this conda environment file is tailored to the online ReadTheDocs setup (but it lists out all desired requirements, so make sure to check it out). + +#### Anaconda environment + +After installing Anaconda, create a new environment with the following command: + +```bash +conda env create --quiet --name rtd --file docs/conda_environment.yml +conda activate rtd +``` + +This will install all dependencies needed to build the documentation locally, in a similar way as on ReadTheDocs. However ReadTheDocs has a different setup, so it is of interest to head over and check the build logs for additional information. + +#### Confguring the build -With all dependencies installed, configure project with CMake as described in [CMake Build Options](cmake_doc/options.rst#nrn_enable_docsbooloff). +With all dependencies installed, configure project with CMake (>= v3.17) as described in [CMake Build Options](./cmake_doc/options.rst#nrn-enable-docs-bool-off). e.g. in your CMake build folder: @@ -57,7 +71,7 @@ make sphinx When working locally on documentation, depending on what you work on, be aware of the following targets to speed up building process: -* `doxygen` - build the API documentation only. Ends up in [_generated](_generated) +* `doxygen` - build the API documentation only. Ends up in ``_generated`` folder under ``docs``. * `notebooks` - execute & embed outputs in-place into jupyter notebooks, see [notebooks.sh](notebooks.sh) * `notebooks-clean` - clears outputs from notebooks. Remember that executing notebooks will add outputs in-place, and we don't want those committed to the repo. * `sphinx` - build Sphinx documentation diff --git a/docs/changelog.md b/docs/changelog.md index abc5ee7381..ab3b1002c5 100644 --- a/docs/changelog.md +++ b/docs/changelog.md @@ -1,5 +1,25 @@ # NEURON 8.2 +## 8.2.3 +_Release Date_ : 15-09-2023 + +### What's New + +- The primary purpose of 8.2.3 is to fix the HOC cursor control problems of + the wheel and windows installed versions. +- Many fragments from current master to allow building of installers. + with current compiler toolchains and github actions. + + +### Bug Fixes + +- Fix MacOS linux wheel HOC backspace. +- Fix Windows HOC cursor issues. +- Fix Windows 11 HOC icon. +- Fix Windows and MacOS segfault on multiline HOC statements input from terminal. +- Fix build issues with current compiler toolchains and github actions. +- Deal with .inputrc if missing on Windows + ## 8.2.2 _Release Date_ : 15-12-2022 diff --git a/docs/cmake_doc/options.rst b/docs/cmake_doc/options.rst index 8183a0d738..1d9bfba9e6 100644 --- a/docs/cmake_doc/options.rst +++ b/docs/cmake_doc/options.rst @@ -1,25 +1,72 @@ Introduction ============ -The NEURON build system now uses cmake as of version 7.8 circa Nov 2019. +The NEURON build system now uses CMake as of version 7.8 circa Nov 2019. The previous autotools (./configure) build system has been removed after 8.0 release. +The NEURON simulator as well as Interviews, CoreNEURON and NMODL can be installed +together using the following instructions: + +1. Clone the latest version or specific release: + .. code-block:: shell - git clone https://github.com/neuronsimulator/nrn nrn - cd nrn - mkdir build - cd build - cmake .. # default install to /usr/local - make -j - sudo make -j install + git clone https://github.com/neuronsimulator/nrn # latest development branch + git clone https://github.com/neuronsimulator/nrn -b 8.2.3 # specific release version 8.2.3 + cd nrn + +.. -The ``-j`` option to make invokes a parallel make using all available cores. -This is often very much faster than a single process make. One can add a number -after the ``-j`` (e.g. ``make -j 6``) to specify the maximum number of processes -to use. This can be useful if there is the possibility of running out of memory. + .. warning:: To build NEURON from source you either need to clone the + NEURON Git repository or download a source code archive that includes + Git submodules, such as the ``nrn-full-src-package-X.Y.Z.tar.gz`` file in + the `NEURON + releases `__ on + GitHub. The tarballs like ``Source code (tar.gz)`` or + ``Source code (zip)`` created by GitHub are incomplete. -The make targets that are made available by cmake can be listed with +2. Create a build directory: + +.. code-block:: shell + + mkdir build + cd build + +3. Run ``cmake`` with the appropriate options (see below for a list of + common options). A full list of options can be found in + ``nrn/CMakeLists.txt`` and defaults are shown in + ``nrn/cmake/BuildOptionDefaults.cmake``. e.g. a bare-bones + installation: + +.. code-block:: shell + + cmake .. \ + -DNRN_ENABLE_INTERVIEWS=OFF \ + -DNRN_ENABLE_MPI=OFF \ + -DNRN_ENABLE_RX3D=OFF \ + -DPYTHON_EXECUTABLE=$(which python3) \ + -DCMAKE_INSTALL_PREFIX=/path/to/install/directory + +4. Build the code: + +.. code-block:: shell + + cmake --build . --parallel 8 --target install + +Feel free to set the number of parallel jobs (i.e. 8) according to your +system using the ``--parallel`` option. + + .. warning:: When ``NEURON`` is installed with ``CoreNEURON`` option enabled then ``NMODL`` is also installed with the ``NMODL`` Python bindings which increase a lot the compilation complexity and memory requirements. For that purpose it’s recommended to either disable this option if the Python bindings are not needed using the ``CMake`` option ``-DNMODL_ENABLE_PYTHON_BINDINGS=OFF`` or restrict the number of parallel jobs running in parallel in the ``cmake`` command using ``cmake --parallel ``. i.e. in a machine with 8 threads do ``cmake -parallel 4``. + +5. Set PATH and PYTHONPATH environmental variables to use the + installation: + +.. code-block:: shell + + export PATH=/path/to/install/directory/bin:$PATH + export PYTHONPATH=/path/to/install/directory/lib/python:$PYTHONPATH + +The make targets that are made available by CMake can be listed with .. code-block:: shell @@ -190,8 +237,8 @@ IV_ENABLE_X11_DYNAMIC_MAKE_HEADERS:BOOL=OFF If it is ever necessary to remake the X11 dynamic .h files, I will do so and push them to the https://github.com/neuronsimulator/iv respository. -MPI options: -============ +MPI options +=========== NRN_ENABLE_MPI:BOOL=ON ---------------------- @@ -248,14 +295,15 @@ NRN_ENABLE_MUSIC:BOOL=OFF only for binary distributions of NEURON (e.g. wheels) where NEURON may be installed and used prior to installing music.) -Python options: -=============== +Python options +============== NRN_ENABLE_PYTHON:BOOL=ON ------------------------- Enable Python interpreter support (default python, fallback to python3, but see PYTHON_EXECUTABLE below) +.. _cmake_nrn_enable_python_dynamic: NRN_ENABLE_PYTHON_DYNAMIC:BOOL=OFF ---------------------------------- Enable dynamic Python version support @@ -263,21 +311,31 @@ NRN_ENABLE_PYTHON_DYNAMIC:BOOL=OFF This is mostly useful for binary distributions where it is unknown which version, if any, of python exists on the target machine. +.. _cmake_nrn_python_dynamic: NRN_PYTHON_DYNAMIC:STRING= -------------------------- - semicolon (;) separated list of python executables to create interfaces. (default python3) + Semicolon (;) separated list of Python executables to build support for. - If the string is empty use the python specified by PYTHON_EXECUTABLE - or else the default python. Binary distributions often specify a list - of python versions so that if any one of them is available on the - target machine, NEURON + Python will be fully functional. Eg. the - mac package build script on my machine, nrn/bldnrnmacpkgcmake.sh uses + If the string is empty use the python specified by ``PYTHON_EXECUTABLE``. + or else the default python (``python3`` in the ``$PATH``). + Binary distributions often specify a list of python versions so that if any + one of them is available on the target machine, NEURON + Python will be fully + functional. + You must specify exactly one executable for each minor version of Python that + you would like to support. + For example: .. code-block:: shell -DNRN_PYTHON_DYNAMIC="python3.8;python3.9;python3.10;python3.11" - This option is ignored unless NRN_ENABLE_PYTHON_DYNAMIC=ON + The first entry in the list is considered to be the default version, followed + by alternatives in decreasing order of preference. + The default version is used to execute build scripts, and many tests are only + executed using this version. + + This option is ignored unless ``NRN_ENABLE_PYTHON_DYNAMIC=ON``, in which case + ``PYTHON_EXECUTABLE`` is ignored. PYTHON_EXECUTABLE:PATH= ----------------------- @@ -315,8 +373,8 @@ NRN_RX3D_OPT_LEVEL:STRING=0 -DNRN_RX3D_OPT_LEVEL=2 -CoreNEURON options: -=================== +CoreNEURON options +================== NRN_ENABLE_CORENEURON:BOOL=OFF ------------------------------ @@ -337,10 +395,23 @@ NRN_ENABLE_MOD_COMPATIBILITY:BOOL=OFF Other CoreNEURON options: ------------------------- There are 20 or so cmake arguments specific to a CoreNEURON - build that are listed in https://github.com/BlueBrain/CoreNeuron/blob/master/CMakeLists.txt. + build that are listed in https://github.com/neuronsimulator/nrn/blob/master/src/coreneuron/CMakeLists.txt. The ones of particular interest that can be used on the NEURON CMake configure line are `CORENRN_ENABLE_NMODL` and `CORENRN_ENABLE_GPU`. +NMODL options +============= + +To see all the NMODL CMake options you can look in https://github.com/BlueBrain/nmodl/blob/master/CMakeLists.txt. + +NMODL_ENABLE_PYTHON_BINDINGS:BOOL=ON +------------------------------------ + Enable pybind11 based python bindings + + Using this option the user can use the NMODL python package to use NMODL via python. For more information look at + the NMODL documentation in https://bluebrain.github.io/nmodl/html/notebooks/nmodl-python-tutorial.html. + + Occasionally useful advanced options: ===================================== @@ -508,12 +579,29 @@ NRN_COVERAGE_FILES:STRING= ``-DNRN_COVERAGE_FILES="src/nrniv/partrans.cpp;src/nmodl/parsact.cpp;src/nrnpython/nrnpy_hoc.cpp"`` + For a list of all the cpp files changed in a pull request, consider + copy/pasting the ``;`` separated list obtained with + + .. code-block:: shell + + a=`git diff --name-only master | grep '\.cpp'` + echo $a | sed 's/ /;/g' + + NRN_SANITIZERS:STRING= ---------------------- - Enable some combination of AddressSanitizer, LeakSanitizer and - UndefinedBehaviorSanitizer. Accepts a comma-separated list of ``address``, - ``leak`` and ``undefined``. See the "Diagnosis and Debugging" section for more - information. + Enable some combination of AddressSanitizer, LeakSanitizer, ThreadSanitizer + and UndefinedBehaviorSanitizer. Accepts a comma-separated list of ``address``, + ``leak``, ``thread`` and ``undefined``. + See the "Diagnosis and Debugging" section for more information. + Note that on macOS it can be a little intricate to combine + ``-DNRN_SANITIZERS=address`` with the use of Python virtual environments; if + you attempt this then the CMake code should recommend a solution. + + Note: the ``address`` sanitizer also prints leak infornation when a + launch exits. That can be avoided with + + ``export ASAN_OPTIONS=detect_leaks=0`` Miscellaneous Rarely used options specific to NEURON: ===================================================== @@ -523,20 +611,6 @@ NRN_ENABLE_DISCRETE_EVENT_OBSERVER:BOOL=ON Enable Observer to be a subclass of DiscreteEvent Can save space but a lot of component destruction may not notify other components that are watching it to no longer use that component. Useful only if one builds a model without needing to eliminate pieces of the model. -NRN_DYNAMIC_UNITS_USE_LEGACY:BOOL=OFF ----------------------------- - Default is to use modern faraday, R, etc. from 2019 nist constants. - When Off or ON, and in the absence of the ``NRNUNIT_USE_LEGACY=0or1`` - environment variable, the default dynamic value of ``h.nrnunit_use_legacy()`` - will be 0 or 1 respectively. - - At launch time (or import neuron), - use of legacy or modern units can be specified with the - ``NRNUNIT_USE_LEGACY=0or1`` environment variable. The use of legacy or - modern units can be dynamically specified after launch with the - ``h.nrnunit_use_legacy(0or1)`` function (with no args, returns the - current use flag). - NRN_ENABLE_MECH_DLL_STYLE:BOOL=ON --------------------------------- Dynamically load nrnmech shared library @@ -572,3 +646,31 @@ NRN_ENABLE_BACKTRACE:BOOL=OFF Does not work with python. Note: floating exceptions are turned on with :func:`nrn_feenableexcept`. + +NRN_LINK_AGAINST_PYTHON:BOOL=OFF +-------------------------------- + When ``NRN_ENABLE_PYTHON_DYNAMIC=ON`` then link the NEURON-Python interface + libraries ``libnrnpythonX.Y.so`` against the corresponding Python library + that was found at configuration time (``libpythonX.Y.so``). + This is enabled by default on Windows, but is not generally needed on macOS + and Linux, where the Python library is found and loaded dynamically at + runtime. + +NRN_PYTHON_EXTRA_FOR_TESTS:STRING= +---------------------------------- + Semicolon (;) separated list of Python executables that NEURON is **not** + built with support for, for use in tests of error messages and reporting. + For these purposes, minor versions (3.X and 3.Y) are considered different + and patch versions (3.8.X and 3.8.Y) are considered to be the same. + +NRN_ENABLE_MATH_OPT:BOOL=OFF +------------------------------------- + Enable extra math optimisations. + + When using compilers like GCC and Clang, one needs to explicitly use compiler + flags like `-funsafe-math-optimizations` in order to generate SIMD/vectorised + code using vector math library. This flag adds these extra compiler flags + to enable SIMD code. + + Note: Compilers like Intel, NVHPC, Cray etc enable such optimisations + by default. diff --git a/docs/conda_environment.yml b/docs/conda_environment.yml index 735d980278..d13bdc8775 100644 --- a/docs/conda_environment.yml +++ b/docs/conda_environment.yml @@ -3,13 +3,15 @@ channels: - conda-forge - defaults dependencies: - - python=3.10 + - packaging=21.3 + - python=3.11 - bison - cmake - xorg-libxcomposite - ffmpeg - - cython + - cython<3 - pandoc + - mpich - pip - pip: - -r docs_requirements.txt diff --git a/docs/conf.py b/docs/conf.py index c9e1f5c8ae..cd71853651 100644 --- a/docs/conf.py +++ b/docs/conf.py @@ -36,7 +36,7 @@ "sphinx.ext.autodoc", "sphinx.ext.autosummary", "sphinx.ext.autosectionlabel", - "recommonmark", + "myst_parser", "sphinx.ext.mathjax", "nbsphinx", "sphinx_design", @@ -57,6 +57,8 @@ def setup(app): hocdomain.setup(app) +myst_heading_anchors = 3 + # Add any paths that contain templates here, relative to this directory. templates_path = ["_templates"] diff --git a/docs/coreneuron/_static/bluebrain_coreneuron.jpg b/docs/coreneuron/_static/bluebrain_coreneuron.jpg new file mode 100644 index 0000000000..e169cb1e02 Binary files /dev/null and b/docs/coreneuron/_static/bluebrain_coreneuron.jpg differ diff --git a/docs/coreneuron/index.rst b/docs/coreneuron/index.rst index 53757ca533..6ec92f6ae1 100644 --- a/docs/coreneuron/index.rst +++ b/docs/coreneuron/index.rst @@ -1,7 +1,7 @@ CoreNEURON ########## -`CoreNEURON `_ is a compute engine for the NEURON simulator optimised for both memory usage and computational speed on modern CPU/GPU architectures. +`CoreNEURON `_ is a compute engine for the NEURON simulator optimised for both memory usage and computational speed on modern CPU/GPU architectures. Some of its key goals are to: * Efficiently simulate large network models diff --git a/docs/coreneuron/installation.rst b/docs/coreneuron/installation.rst index 961484dcfe..0eb00f3fc4 100644 --- a/docs/coreneuron/installation.rst +++ b/docs/coreneuron/installation.rst @@ -6,20 +6,25 @@ CoreNEURON is integrated into the NEURON repository, and it is straightforward t Starting with version 8.1, NEURON also provides Python wheels that include CoreNEURON and, optionally, GPU support. These binary distributions are described in :ref:`Installing Binary Distribution`. -.. warning:: - These wheels are not yet released, and must currently be installed using ``pip install neuron-nightly`` and/or ``pip install neuron-gpu-nightly``. - Installing with ``pip`` *********************** -This should be as simple as ``pip install neuron-nightly``. +This should be as simple as ``pip install neuron``, for the latest +release, or ``pip install neuron-nightly`` to install a snapshot of the +development branch. You may want to use ``virtualenv`` to manage your Python package installations. -If you want to use the GPU-enabled wheel then you should run ``pip install neuron-gpu-nightly``. -This binary wheel does not include all the NVIDIA dependencies that are required to build and execute GPU code, so you should install the `NVIDIA HPC SDK `_ on your machine. - .. warning:: - It is safest to use the same version of the HPC SDK as was used to build the binary wheels. - This is currently defined `in this file `_ in the NEURON repository. + + Between versions 8.1 and 8.2.2 a GPU-enabled wheel was available via ``pip install neuron-gpu`` + and ``pip install neuron-gpu-nightly``. + Due to ease-of-use and maintainability concerns, this has been removed in `#2378 + `_ until person-power is available to pursue a + more robust solution. + Using the released GPU wheels with custom mechanism files (``nrnivmodl``) requires that you have + the same version of the `NVIDIA HPC SDK `_ installed on + your system as was used to build the wheels. + For example, in 8.2.2 this was version 22.1, as can be seen `in this file + `_. Installing from source @@ -36,7 +41,14 @@ Compiler Selection ================== CoreNEURON relies on compiler `auto-vectorisation `_ to achieve better performance on modern CPUs. With this release we recommend compilers from **Intel**, **Cray** and, for GPU support, **NVIDIA** (formerly **PGI**). -These compilers are able to vectorise the code better than **GCC** or **Clang**, achieving the best possible performance gains. +These compilers are often able to vectorise the code better than +**GCC** or **Clang**, achieving the best possible performance gains. + +.. note:: + To benefit from auto-vectorisation it is important to ensure that + your compiler flags allow the compiler to assume that vector CPU + instructions are available. See the discussion of compiler flags + below. Computer clusters will typically provide the Intel and/or Cray compilers as modules. You can also install the Intel compiler by downloading the `oneAPI HPC Toolkit `_. @@ -128,18 +140,23 @@ For example, -DCMAKE_CXX_COMPILER=nvc++ .. note:: - ``nvcc`` is provided both by the NVIDIA HPC SDK and by CUDA toolkit + ``nvcc`` is provided both by the NVIDIA HPC SDK and by CUDA toolkit installations, which can lead to fragile and surprising behaviour. See, for example, `this issue `_. On some systems it is necessary to load the ``nvhpc`` module before the ``cuda`` module, thereby ensuring that ``nvcc`` comes from a CUDA toolkit installation, but your mileage may vary. -By default the GPU code will be compiled for NVIDIA devices with compute capability 7.0 or 8.0. -This can be steered by passing, for example, ``-DCMAKE_CUDA_ARCHITECTURES:STRING=60;70;80`` to CMake. +By default the GPU code will be compiled for NVIDIA devices with +compute capability 7.0 (Volta) or 8.0 (Ampere). +This can be steered by passing, for example, +``-DCMAKE_CUDA_ARCHITECTURES:STRING=60;70;80`` to CMake. -You can change C/C++ optimisation flags using the ``-DCMAKE_C_FLAGS``, ``-DCMAKE_CUDA_FLAGS`` and ``-DCMAKE_CXX_FLAGS`` options. -To make sure your custom flags are not modified, you should also set ``-DCMAKE_BUILD_TYPE=Custom``, for example: +You can change C/C++ optimisation flags using the ``-DCMAKE_C_FLAGS``, +``-DCMAKE_CUDA_FLAGS`` and ``-DCMAKE_CXX_FLAGS`` options. +These will be appended to the default flags for the CMake build type. +If you need to override the default flags, you can also set +``-DCMAKE_BUILD_TYPE=Custom``, for example: .. code-block:: @@ -151,6 +168,44 @@ To make sure your custom flags are not modified, you should also set ``-DCMAKE_B .. warning:: If the CMake command fails, make sure to delete temporary CMake cache files (``CMakeCache.txt`` and ``CMakeFiles``, or the entire build directory) before re-running CMake. +To enable support for the vector instructions available on modern CPUs +and auto-vectorisation optimisations, you may need to pass additional +flags to your compiler. + +For compilers that accept GCC-like options, this often involves setting +the ``-march`` and ``-mtune`` options. +Other compilers may vary. +If you are building on the same machine that you will be running NEURON +on, you may be able to use ``-march=native`` and ``-mtune=native``, in +which case many compilers will detect the CPU features that are +available on the machine that is compiling NEURON. +Alternatively, you may need to set this explictly, for example: +``-march=skylake-avx512 -mtune=skylake-avx512``. +Note that compute clusters may contain a mix of CPU types. + +Please also note the following observations about different compilers, +but ultimately refer to the documentation of the compiler version that +you are using: + +* The handling of ``-march=native`` in GCC `can be surprising `_. + +* The NVIDIA HPC compiler ``nvc++`` uses the equivalent of + ``-march=native`` by default + (`nvc++ documentation `_). + +* The Intel C++ compilers ``icpc`` and ``icpx`` support an ``-x`` + option that enables even more specialised optimisations for Intel + CPUs + (`icpc documentation `_, + `icpx documentation `_), + this has been seen to give modest performance improvements when using + the ``mod2c``, but not ``NMODL``, transpiler. + +.. warning:: + If you tell the compiler to target a more modern CPU than you have + available, your NEURON installation may crash with illegal + instruction errors and/or ``SIGILL`` signals. + Once the configure step is done, you can build and install the project by running .. code-block:: diff --git a/docs/coreneuron/running-a-simulation.rst b/docs/coreneuron/running-a-simulation.rst index 310cb2704d..19d006ba4c 100644 --- a/docs/coreneuron/running-a-simulation.rst +++ b/docs/coreneuron/running-a-simulation.rst @@ -28,21 +28,15 @@ that is linked to CoreNEURON (here ``x86_64`` is the architecture name of your system). If you see any compilation error then one of the MOD files might be incompatible with CoreNEURON. -In this case, you should first consult the :ref:`CoreNEURON compatibility` section, and if that does not provide a clear explanation then you should `open an issue `_ with an example of your MOD file. +In this case, you should first consult the :ref:`CoreNEURON compatibility` section, and if that does not provide a clear explanation then you should `open an issue `_ with an example of your MOD file. +.. _enabling_coreneuron: Enabling CoreNEURON ******************* With CoreNEURON, existing NEURON models can be run with minimal changes. For a given NEURON model, the following steps are usually required: -First, enable cache efficiency: - -.. code-block:: python - - from neuron import h - h.CVode().cache_efficient(1) - -Second, enable CoreNEURON: +First, enable CoreNEURON: .. code-block:: python diff --git a/docs/courses/electrotonic_analysis.rst b/docs/courses/electrotonic_analysis.rst index e63b052617..092b6c0078 100644 --- a/docs/courses/electrotonic_analysis.rst +++ b/docs/courses/electrotonic_analysis.rst @@ -51,7 +51,7 @@ NEURON's tools for electrotonic analysis are gathered into four different "style - :ref:`log(A) vs. X ` -- :ref:`Shape ` +- :ref:`Shape ` They are accessible through :menuselection:`NEURON Main Menu --> Tools --> Impedance`. In this exercise you will start to learn how to use each of them. To save screen space, close a tool when you are done with it. @@ -217,7 +217,7 @@ But synapses aren't voltage sources. They're much more like current sources. In So just click on the *Vout* radio button and you see that a synapse attached to a basilar dendrite will produce nearly the same somatic PSP no matter how far it is from the soma! This is the phenomenon that David Jaffe and I call passive normalization : variation of somatic PSP amplitude with synaptic distance is reduced ("normalization"), and it doesn't require active currents to happen ("passive"). For more information, see our :ref:`paper `. -.. _shape: +.. _electrotonic_shape_tool: The Shape Tool ++++++++++++++ diff --git a/docs/courses/intro_to_gui.rst b/docs/courses/intro_to_gui.rst index e43a0a8e80..756550ae28 100644 --- a/docs/courses/intro_to_gui.rst +++ b/docs/courses/intro_to_gui.rst @@ -222,7 +222,7 @@ Halve the duration and double the amplitude. Change the number of Points plotted/ms to 100 and dt to 0.01 ms and run again. 5. - **Increase the amplitude to 1e4 nA, cut the duration to 1e-5 ms, inrease Points plotted/ms to 1e5, and start a simulation...** + **Increase the amplitude to 1e4 nA, cut the duration to 1e-5 ms, increase Points plotted/ms to 1e5, and start a simulation...** After a few seconds of eternity, stop the simulation by clicking on RunControl / stop @@ -242,7 +242,7 @@ Halve the duration and double the amplitude. :download:`bilayer.hoc ` contains a properly configured CellBuilder, plus a custom interface for running simulations. The one item it doesn't have is a VariableStepControl. - :download:`bilayer.hoc ` is actually a session file that was given the "hoc" extension so that MSWin users could launch it by double clickin on the file name. For didactic reasons we prefer that you load it from Python instead. + :download:`bilayer.hoc ` is actually a session file that was given the "hoc" extension so that MSWin users could launch it by double clicking on the file name. For didactic reasons we prefer that you load it from Python instead. 1. Save :download:`bilayer.hoc ` to your machine, open up in a terminal, then cd to the directory that contains bilayer.hoc diff --git a/docs/courses/multithread_parallelization.rst b/docs/courses/multithread_parallelization.rst index 7b72ba9b00..0a30710548 100644 --- a/docs/courses/multithread_parallelization.rst +++ b/docs/courses/multithread_parallelization.rst @@ -1,6 +1,6 @@ .. _multithread_parallelization: -Multithreaded paralellization +Multithreaded parallelization ============================= If a model has more than a few thousand states, it may run faster with multiple threads. diff --git a/docs/dealing_simulations_generate_lot.rst b/docs/dealing_simulations_generate_lot.rst index ec44cc9069..8f974beb63 100644 --- a/docs/dealing_simulations_generate_lot.rst +++ b/docs/dealing_simulations_generate_lot.rst @@ -12,4 +12,4 @@ The answer to your problem is to break the simulation into shorter segments, eac Comments and suggestions ------------- -As always, these examples illustrate modular code organization, and iterative refinement and testing of code, both of which are useful strategies for productive programming. If you run into keywords that are new to you, or unfamilar usage of familiar keywords, be sure to look them up in the :ref:`programmer's reference `. \ No newline at end of file +As always, these examples illustrate modular code organization, and iterative refinement and testing of code, both of which are useful strategies for productive programming. If you run into keywords that are new to you, or unfamiliar usage of familiar keywords, be sure to look them up in the :ref:`programmer's reference `. \ No newline at end of file diff --git a/docs/dev/HOCInterpreter/HOCInterpreter.md b/docs/dev/HOCInterpreter/HOCInterpreter.md index 2df729672d..72a17665a2 100644 --- a/docs/dev/HOCInterpreter/HOCInterpreter.md +++ b/docs/dev/HOCInterpreter/HOCInterpreter.md @@ -4,8 +4,8 @@ - [**Source Code**](#source-code) - [**HOC Grammar**](#hoc-grammar) - [**HOC Initialization**](#hoc-initialization) - - [****HOC Interpreter - executing the machine****](#hoc-interpreter-executing-the-machine) - - [****HOC Interpreter example - printf****](#hoc-interpreter-example-printf) + - [****HOC Interpreter - executing the machine****](#hoc-interpreter---executing-the-machine) + - [****HOC Interpreter example - printf****](#hoc-interpreter-example---printf) --- diff --git a/docs/dev/circuit_creation.svg b/docs/dev/circuit_creation.svg new file mode 100644 index 0000000000..494e050d8b --- /dev/null +++ b/docs/dev/circuit_creation.svg @@ -0,0 +1,3 @@ + + +add_section()
Circuit creation
Circuit cr...
new_sections()
new_section()
This functions sets "tree_changed = 1;" which is used for shiffling sections later
It also calls "sec_alloc()" which allocates a `SectionPool`
It also allocates the `Node`s by calling cable_prop_assign()->nrn_change_nseg()->node_alloc():
there is one Node per segment
new_section()...
Segments can also be connected/disconnected with various functions that set "tree_changed = 1;"
 nrn_disconnect(): called by functions that disconnect the section
connectsec_impl(): connection between two sections -> "tree_changed = 1"
nrn_change_nseg(): change the number of segments in section. It also sets `tree_changed = 1;`, `diam_changed = 1;` and `sec->recalc_area_ = 1;`
Segments can also be connected/disconnected with various functions that set "tree_changed = 1;"...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/data-structures.rst b/docs/dev/data-structures.rst new file mode 100644 index 0000000000..b5406372ff --- /dev/null +++ b/docs/dev/data-structures.rst @@ -0,0 +1,956 @@ +.. include:: ../rst_substitutions.txt + +Data structures +############### +This section provides higher-level documentation of the data structures used in NEURON, +complementing the lower-level Doxygen documentation of the +`neuron::container:: <../doxygen/namespaceneuron_1_1container.html>`_ namespace. + +|neuron_with_soa_data| contains substantial changes to the organisation of model data, of which the +most notable is a transposition from an array-of-structs (AoS) layout to a struct-of-arrays (SoA) +layout, following the model of CoreNEURON. +These changes were introduced in GitHub pull request +`#2027 `_. + +As well as adopting an SoA layout, this work also introduces new "**data handle**" types that +enable persistent references to elements in the data structures, which automatically remain valid +even when the underlying storage arrays are re-allocated, or their elements are reordered +(permuted). + +The basic idea is to allow us to refer to logical elements of a NEURON model (*e.g.* "the Node at +the centre of this Section") via struct-like "handle" objects that abstract away both the size of +the underlying arrays containing the Node data, and the current index of a given (logical) Node in +those arrays. + +Performance-sensitive code code, such as the Node matrix solver algorithm and "current" and "state" +functions that are generated from MOD files, can operate directly on the underlying array storage, +taking advantage of improved cache efficiency and (in some cases) vectorisation, without paying for +the relatively slow indirection inherent to the "**data handle**" and "**Node handle**" types +introduced above. + +Overview +-------- +|neuron_with_soa_data|'s SoA data structures are based on the +`neuron::container::soa <../doxygen/structneuron_1_1container_1_1soa.html>`_ variadic template +class. +Here is an example of its use: + +.. image:: soa-architecture.svg + +This defines an SoA data structure (``ab_store``) with two **data** arrays for variables +imaginatively named ``A`` (red) and ``B`` (blue). +There is an implicit extra "index" array (purple) that is needed for the implementation of the +"handle" types introduced above; no matter how many variables are added to the structure, there is +still just one index array. + +Let's unpick this example a little more, starting with the definition of our ``ab_store`` type: + +.. code-block:: c++ + + struct ab_store: soa + +The ``neuron::container::soa<...>`` template uses +`CRTP `_, which is why the +first template argument to ``soa<...>`` is the derived class name ``ab_store``; the reasons for +this are not important for a high-level overview and it can be ignored for the moment. + +All the remaining template arguments, ``field::A, field::B`` in this case, are **tag types** that +define the **fields** of our data structure. +A minimal definition of these would be something like: + +.. code-block:: c++ + + namespace field { + + struct A { + using type = double; + }; + + struct B { + using type = int; + }; + + } // namespace field + +Which would specify that the ``A`` values shown above (red ``a0``, ``a1``, *etc.*) are of type +``double``, while the ``B`` values (blue ``b0``, ``b1``, *etc.*) are of type ``int``. +Certain additional functions and variables can be added to the tag types to control, for example, +pretty-printing of data handles, non-zero default values, and non-scalar fields. + +In essence, our struct ``ab_store``, if we wrote it out manually, would look something like: + +.. code-block:: c++ + + struct ab_store_manual { + std::vector a_values; + std::vector b_values; + std::vector indices; + }; + +As with any other ``std::vector``-based type, the currently allocated capacity and the current size +are different to one another; in the illustration above there are five elements (the size), but +there are two unused elements (``...``) in each array, so the capacity is seven. + +The index column has so far been glossed over, but you may have noticed that, for example, +0\ :sup:`th` entry is shown as "→ 0" in the figure above. +In essence, the index column type is "pointer to integer", and the pointed-to integers are kept up +to date so they always hold the **current** index into the storage array of a given logical entry. +Expressed as code, this means that: + +.. code-block:: c++ + + for (auto i = 0; i < indices.size(); ++i) { + assert(i == *indices[i]); + } + +should never trigger an error. + +.. note:: + + While a large part of the motivation for allowing the underlying arrays to be reordered is that + it allows explicit permutation of the data for performance reasons, it also permits other + optimisations. For example, deletion from NEURON's data structures is :math:`\mathcal{O}(1)`, + whereas erasing from a regular ``std::vector`` is :math:`\mathcal{O}(N)`. This is because the + index mechanism allows deletion to be implemented by swapping the last element of the array + into the deleted position and reducing the size by one. + +The data handle type described above essentially hold a pair of pointers: one that can be +dereferenced to get the address of the 0\ :sup:`th` entry in the data array, and one +pointer-to-integer taken from the index column: + +.. code-block:: c++ + + struct data_handle_double_manual { + double* const* ptr_to_array_base; + unsigned long* ptr_to_current_row; + double& get_value() { + return (*ptr_to_array_base)[*ptr_to_current_row]; + } + }; + +This is enough indirection that neither re-ordering nor re-allocating the actual data storage +invalidates any instances of ``data_handle_double_manual``. +The real type used in the NEURON code-base is the +`neuron::container::data_handle <../doxygen/structneuron_1_1container_1_1data__handle.html>`_ +template, *i.e.* we use ``data_handle`` in place of ``data_handle_double_manual``. + +.. note:: + + You may wonder what happens when an entry is deleted from the data structures. In this case the + storage for the **data** of the deleted element (*i.e.* its ``a`` and ``b`` values) is released + and made available for re-use, but its entry in the index vector is not freed and the + pointed-to integer is updated with a sentinel value. This means that data handles that referred + to now-deleted entries (``a`` and ``b`` values) can detect that they are no longer valid and + will not return invalid values. + +Of course, this indirection also means that these data handles are not especially performant, but +in general they are intended to solve otherwise-tedious bookkeeping problems, and +performance-critical code is expected to operate directly on the underlying vectors. +In other cases, such as ``POINTER`` variables in MOD files, data handles are used while the model +is being built in memory, but they are "flattened" into plain ``double*`` for use during the actual +simulation, where performance **is** important **and** it is known that no re-allocation or +re-ordering will occur that could invalidate those raw pointers. + +The "data handle" type just discussed is the right tool for the job if we want to refer to a single +value of a type that is known at compile time, but there are a few other types of "handle" that are +also supported: + +* `neuron::container::generic_data_handle <../doxygen/structneuron_1_1container_1_1generic__data__handle.html>`_ + is a type-erased version of ``neuron::container::data_handle``, similar to + `std::any `_. +* Handles to higher-level objects. For example, if the entity that has an "a" [side] and a "b" + [side] is a vinyl, we can also have "vinyl handles", which provide accessors ``a()`` and ``b()``. + These handles come in two flavours: + + * non-owning: like the "data handle" types above, these refer to an entry in the ``ab_store`` + container and provide access to both "a" and "b" [these are currently not used outside tests] + * owning: like non-owning handles, these refer to an entry in the ``ab_store`` container and + provide access to both "a" and "b" values. The key difference is that owning handles have + owning semantics: creating an owning handle appends a new entry to the underlying data arrays + and destroying an owning handle deletes that (owned) entry from the arrays. + +The following code snippet illustrates the use of owning handles: + +.. code-block:: c++ + + ab_store my_data{}; + data_handle dh{}; + assert(!dh); // not pointing to a valid value + { + // my_data.size() == 0 + owning_vinyl_handle heroes{my_data}; + // now my_data.size() == 1 + heroes.a() = 19.817; // runtime in minutes + heroes.b() = 5; // track count + // higher-level handles-to-entities can produce lower-level handles-to-values + dh = heroes.b_handle(); + assert(dh); // now pointing to a valid value + assert(*dh == 5); + *dh = 6; // bonus track + assert(heroes.b() == 6); // `dh` and `heroes.b()` refer to the same value + } // `heroes` is destroyed when it goes out of scope at this closing brace + // my_data.size() == 0 again + assert(!dh); // pointed-to value is no longer valid + + + +Advanced field features +------------------------- +You should now be familiar with the key components of the new NEURON data structures, however there +are -- of course -- some additional features that were not included in the overview above. + +In the example above, we used a simple tag type: + +.. code-block:: c++ + + struct A { + using type = double; + }; + +to define a scalar field. +There are several additional features in ``neuron::container::soa<...>`` that are not enabled in +the example above. +Additional features are generally enabled by using tag types that have +additional member functions and variables. There are three advanced tag types: +optional fields; array-valued fields, i.e. fields for which each row has +multiple values; and tags which contain multiple, possibly array-valued, +fields. All three are shown schematically in the figure below; and described in +detail in the following subsections. In the figure fields with the same color +must have the same scalar type. The zigzag line represents the memory layout +for array-valued fields. + +.. image:: soa-advanced-tag-types.svg + + +Runtime-variable field counts +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +In the introductory example, the ``ab_store`` example has precisely two data fields: ``A`` and +``B``. +This can only be changed at compile time, by adding or removing tag types from the definition of +``ab_store``. +Sometimes, more flexibility is required: in this case, a member function named ``num_variables()`` +can be added to the relevant tag type, for example: + +.. code-block:: c++ + + namespace field { + + struct C { + C(std::size_t count) : m_count{count} {} + using type = float; + [[nodiscard]] int num_variables() const { return m_count; } + private: + int m_count; + }; + + } // namespace field + + struct some_cs: soa { + using base_type = soa; + some_cs(std::size_t count) : base_type{field::C{count}} {} + }; + +In this case, the ``some_cs`` struct will be similar to: + +.. code-block:: c++ + + struct some_cs_manual { + some_cs_manual(std::size_t count) { + c_data.resize(count); + } + std::vector> c_data; + std::vector indices; + }; + +and the "inner" vectors will all be the same size as each other, *i.e.* +``c_data[i].size() == indices.size()`` for ``i`` in ``0 .. count-1``. + +This involves slightly more indirection and runtime checking than the original example with +everything fixed at compile time. + +The canonical example of usage of this is the storage for ``RANGE`` variables in MOD files, where a +single tag type (``neuron::container::Mechanism::field::FloatingPoint``) with a ``num_variables()`` +method is used to generate one ``std::vector`` per ``RANGE`` variable. + +Note that this also demonstrates how non-empty tag types can be used to hold extra information, the +``neuron::container::soa`` template has a constructor taking ``Tags...``, and it +stores an instance of each tag type internally that can be accessed via the ``get_tag()`` +member function. + +.. note:: + + If you use this mechanism, note that you must provide the ``soa<...>`` constructor with + instances of tag types in the same order as they appear in the declaration. Also, repeated tag + types are not allowed. + +Optional fields +^^^^^^^^^^^^^^^ +If it is known that the number of fields will be zero or one, *i.e.* the field is optional, then a +simplified tag type can be used: + +.. code-block:: c++ + + struct OptionalA { + static constexpr bool optional = true; + using type = double; + }; + +This defines a simple field like ``A`` above, with ``std::vector`` backing storage, but it +can be toggled on and off at runtime using the ``set_field_status`` method, and its status can be +queried using the ``field_active`` method. + +The real-world use of this is the data for the fast membrane current calculation, which is only +filled if it has been enabled at runtime using :meth:`CVode.use_fast_imem`. + +Explicit default values +^^^^^^^^^^^^^^^^^^^^^^^ +If you do not explicitly specify otherwise, new values in the storage arrays -- produced by, as +shown above, creating owning handles -- will be zero-initialised. +If the relevant tag type has a member function called ``default_value()``, new values will be +initialised with the value returned by that function. + +.. code-block:: c++ + + struct D { + using type = double; + [[nodiscard]] constexpr type default_value() const { return 4.2; } + }; + +Real-world examples of this include Node voltages (which default to -65) and Node areas (which +default to 100). + +Array variables +^^^^^^^^^^^^^^^ +In the examples shown so far, the fields have all had scalar types such as ``double`` and ``int``. +Sometimes, it is necessary to use represent vector data with multiple contiguous values for each +instance of an entity. +If the vector size was known at compile time, this could in principle be done with a tag type along +the lines of: + +.. code-block:: c++ + + #include + + struct E { + using type = std::array; + }; + +However, if the vector size is only known at runtime this would not work; a workaround along the +lines of: + +.. code-block:: c++ + + #include + + struct F { + using type = std::vector; + }; + +would be ill-advised, as it would force a number of independent allocations that scales linearly in +the container size. + +Instead, we can signal to the ``neuron::container::soa`` helper that each time we add an instance +to the container we would like multiple values to be allocated: + +.. code-block:: c++ + + struct G { + using type = float; + [[nodiscard]] int array_dimension() const { + return 42; + } + +gives essentially the same memory layout as the example usin ``std::array``, but it has +the major advantage that ``42`` can be replaced with a value that is only known at runtime. + +The memory layout when ``array_dimension()`` returns two is shown below. +In the figure, the container ``size()`` is three. + +.. image:: soa-array-var-layout.svg + +To use ``array_dimension()`` in conjunction with ``num_variables()``, the signature has to be +modified slightly: + +.. code-block:: c++ + + struct H { + using type = float; + [[nodiscard]] int num_variables() { return /* unspecified */; } + [[nodiscard]] int array_dimension(int i) { return i + 1; } + }; + +where the integer argument ``i`` lies in ``0 .. num_variables()-1`` and the function should, as you +might expect, return the desired array dimension for the ``i``\ :sup:`th` copy of the field +represented by this tag type. + +Real-world usage of this feature is, again, ``RANGE`` variables in MOD files, as each individual +``RANGE`` variable can have an array dimension. +The array dimension must be global to all instances of the mechanism (entity); there is no support +for arrays of per-instance-variable size. + +Pretty printing +^^^^^^^^^^^^^^^ +The ``data_handle`` and ``generic_data_handle`` handle-to-value types support pretty-printing to +``std::ostream&`` via ``operator<<`` overloading, yielding results like: + +.. code-block:: + + data_handle{Node::field::Voltage row=0/1 val=42} + +By default, the printed names are derived from the names of the tag types, however when +``num_variables()`` is used then the best that this method can yield are names like +``Entity::field::FloatingPoint#7``. +Once again, an extra method defined in the tag type allows more human-friendly names to be given: + +.. code-block:: c++ + + struct I { + using type = short; + [[nodiscard]] int num_variables() { return /* unspecified */; } + [[nodiscard]] const char* name(int i) const { + assert(i >= 0 && i < num_variables()); + return name_table[i]; + } + // ... name_table definition ... + }; + +Once again, the real-world usage of this comes from ``RANGE`` variables in MOD files; the NMODL +variable names are passed in to the relevant tag type and returned from the ``name`` method. + +.. _advanced-soa-container-features: + +Advanced container features +--------------------------- +You should now have a reasonable idea of what kinds of data can be represented, and how to express +the desired fields using tag types. +In this section we will explore some more advanced runtime features of containers implemented using +``neuron::container::soa<...>``. + +Let's take a simplified three-stage view of how NEURON is used: + +1. We construct our model, this basically means creating Node and Mechanism instances, although it + may also involve deleting them. It is driven by the user and consists of a very large number of + independent calls from Python (or HOC) user code into the NEURON library. +2. We prepare the model data for simulation. This is done by NEURON internally and implicitly, the + user just asks for the simulation to run. +3. We run the simulation. + +During step 1, the ordering of the underlying data arrays is essentially unknown; the data are +"unsorted". +Step 3 is generally where the majority of the computational time is spent, so we want the relevant +algorithms there to operate directly on the underlying data arrays, and to be able to make +assumptions about the ordering of thoese data. + +.. note:: + + In general terms, this is important because some of these algorithms reduce to loops with data + dependencies that inhibit more **generic** optimisations such as autovectorisation. + +.. note:: + + In principle then most dynamic memory [re-]allocation in step 1 can be avoided if an **good** + estimate of the final model size is available. + There is currently no way of providing this estimate, but it would be a relatively simple + change to NEURON to expose a `std::vector::reserve + `_-like method (pay attention to + the "Notes" section of that page). + Note that calling this *repeatedly* is likely to be a bad idea, as it can inhibit the standard + exponential growth of the allocated storage size, but a single call to ``reserve`` before + constructing the model with an estimate of the complete model size may be worthwhile. + +Step 2 is thus responsible for sorting the unordered data of step 1 into sorted data suitable for +step 3. +How the ordering scheme is defined will be described in :ref:`soa-data-integration-in-neuron` +below, for now let's just note that ``neuron::container::soa`` provides a method named +``apply_reverse_permutation`` that takes a [reverse] permutation vector, that is a range of +integers such that ``perm_vector[old_position]`` is the destination index for the value that +initially sits at index ``old_position``. + +.. warning:: + + The algorithm used to apply the permutation uses the given vector as temporary storage, so + **this method modifies its argument**. NB: the "reverse" terminology is consistent with Boost; + a "forward" permutation would move ``perm_vector[new_position]`` to ``new_position``. + +It should be apparent from the discussion so far that immediately after step 2, the data are +"sorted", but that this is a fragile state: creating or destroying a single Node or Mechanism +instance would leave the data "unsorted" once more. + +The ``soa<...>`` utility provides some additional functionality to manage this fragility in an +efficient and straightforward to reason about way, which is the topic of this section. + +First, the containers maintain a "sorted" flag: if you flag a container as "sorted" then it will +remain flagged "sorted" until one of the following happens: + +* You explicitly flag it as "unsorted". +* You add or erase an element from it. +* You apply a permutation to it. + +This allows step 2 to be short-circuited. + +Second, it is possible to forbid (at runtime) operations that would cause the container to become +"unsorted" by marking it "frozen". +This is done using a token type: + +.. code-block:: c++ + + void whatever(my_soa_container& data) { // assume `data` was not already "frozen" + owning_handle foo{data}; // adding an element is OK + auto token = data.issue_frozen_token(); // mark container "frozen" + owning_handle disappointing_foo{data}; // this will throw + } // `token`` is destroyed here; `data` ceases to be "frozen" + +The container maintains a count of how many tokens are controlling it and is "frozen" whenever that +count is nonzero. + +.. warning:: + + An unfortunate edge case of the current implementation is that trying to delete an owning + handle that refers to a "frozen" container will cause the program to terminate. + This is because trying to erase from a "frozen" container is forbidden, and will throw an + exception, leaving the destructor of the owning handle no better option than to immediately + terminate. See also: `E.16: Destructors, deallocation, swap, and exception type copy/move + construction must never fail + `_ from the C++ + Core Guidelines. + + So far this does not appear to be a problem. + One, so far theoretical, issue is that any code which takes a callback from the user that is + executed during the simulation cannot realistically prevent that user callback from attempting + to delete part of the model data. + It is also imaginable that the current or future GUI implementation could fall foul of this, + but it has not been seen in practice. + +Finally, it is possible to register a callback using the ``set_unsorted_callback`` method, which +is executed whenever the container transitions from "sorted" to "unsorted". +This is used in NEURON to invalidate temporary data that is derived from, and whose validity is +linked to, the "sorted" data generated in step 2 above. + +.. _porting-mechanisms-to-new-data-structures: + +Compatibility with older MOD files +---------------------------------- +Given the substantial changes behind the scenes, migrating to using the data structures described +on this page is not 100% backwards compatible. +In the vast majority of cases, issues stem from the use of ``VERBATIM`` blocks in MOD files, +although it can also occur that references to model data based on the new data handles are +"stickier" than the old explicit-pointer-update based techniques and manage to continue, for +example, recording data to a vector where older versions failed to. +This latter case is unlikely to cause a major problem, so ``VERBATIM`` blocks are most likely all +that you need to worry about. + +.. note:: + If you have a model that stopped compiling when you upgraded to or beyond + |neuron_with_soa_data|, the first thing that you should check is + whether the relevant MOD files have already been updated in ModelDB or in the + GitHub repository of that model. You can check the repository name with the + model accession number under ``_. + An updated version may already be available! + + The following section also contains links to the models that were updated in preparation for the + new data structures, which may serve as useful references. + +.. note:: + Moving from NEURON 8 or earlier to |neuron_with_soa_data| also switches MOD files from being + translated into C to being translated into C++ code, which is also not 100% backwards + compatible. + If you are migrating between these versions and encounter issues, you should also check: + :ref:`porting-mechanisms-to-cpp`. + +This section aims to summarise a few of the anti-patterns that cropped up repeatedly in the ModelDB +models listed above. + +Storing addresses of RANGE variables +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``VERBATIM`` code along the lines of + +.. code-block:: c++ + + some_double_pointer_saved_for_later = &some_range_variable; + +where ``some_double_pointer_saved_for_later`` has a lifetime longer than one +method in your MOD file is not a good idea. +If the model data is re-allocated (*e.g.* because a new instance of the mechanism is created) or +re-ordered (*e.g.* because you request processing with a different number of threads) then the +saved pointer will either become invalid, or point to unrelated data. + +It is best to completely avoid logic like this. +Mechanisms that refer to data in other parts of the model can use constructs such as ``POINTER``. + +Examples in ModelDB: `7399 `_, +`12631 `_ and +`52034 `_. + +.. _assuming-neuron-data-types-are-visible-in-generated-cpp-code: + +Assuming NEURON data types have visible definitions +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``VERBATIM`` code along the lines of + +.. code-block:: c++ + + Section* sec = chk_access(); // OK-ish, declared in nrn_ansi.h + Section* child = sec->child; // now an error, Section is a forward-declaration + +will no longer compile, because the ``Section`` struct is only visible to the C++ code generated +from MOD files as a forward declaration (``struct Section;``), so member access (``sec->child``) +will fail. + +The example used here is ``Section``, but the same applies to ``Prop`` and ``Node``. + +Insofar as these patterns are really necessary, free-standing accessor functions should be used. + +Examples in ModelDB: `83344 `_, +`97917 `_, +`106891 `_, +`116838 `_, +`136095 `_, +`138379 `_, +`140881 `_, +`141505 `_, +`144538 `_, +`146949 `_, +`150245 `_, +`244262 `_, +`267067 `_ and +`267384 `_. + +.. _assuming-aos-layout-in-generated-cpp-code: + +Assuming array-of-structs data layout +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``VERBATIM`` code along the lines of + +.. code-block:: c++ + + Prop* prop = /* unspecified */; + double* mech_data = prop->param; // member access doesn't compile anymore, and... + double third_range_var = mech_data[3]; // assumes struct-of-arrays layout + +is guilty both of :ref:`assuming-neuron-data-types-are-visible-in-generated-cpp-code` **and** +of assuming array-of-structs layout. + +In AoS layout, the value of the 2\ :sup:`nd` ``RANGE`` variable for a particular mechanism instance +is immediately after the value of the 1\ :sup:`st` ``RANGE`` variable for that instance. +In SoA layout, those two values are in general nowhere near each other, and the offset is certainly +not a compile time constant that one can write down. + +Insofar as this pattern is really necessary, accessor functions provided by NEURON should be used +so that the NEURON library code can correctly find the n\ :sup:`th` ``RANGE`` variable for a +particular mechanism instance. + +Example in ModelDB: `106891 `_, + +Out-of-bounds access +^^^^^^^^^^^^^^^^^^^^ +If you declare ``RANGE x[3], y`` then do not access ``x[3]``, ``x[4]`` and so on. +This was always a bad idea and always generally led to incorrect results, but the transposition to +SoA layout will in general lead to differently-wrong results. +In AoS layout, ``x[3]`` was probably the same as ``y``, with SoA it is probably ``x[0]`` of a +different instance of the mechanism. + +Do not do this. + +Example in ModelDB: `113446 `_, + +Assuming you know how the code generation works +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +``VERBATIM`` code along the lines of + +.. code-block:: c++ + + // restore pointers each time + Prop* prop = /* unspecified */; + _p = prop->param; + _ppvar = prop->dparam; + some_function(range_var /* a RANGE variable name */); + +is guilty both of :ref:`assuming-neuron-data-types-are-visible-in-generated-cpp-code` **and** +of assuming things about how the C++ code generation from MOD files works. + +In the ``nocmodl`` translator, ``range_var`` is a preprocessor macro whose definition used to use +a variable ``double* _p``. +We have already seen in :ref:`assuming-aos-layout-in-generated-cpp-code` that this no longer makes +sense. + +Do not assume that you know how the code generation works and how to "reset" which mechanism +instance ``range_var`` refers to. + +Examples in ModelDB: `116838 `_, +`136095 `_, +`138379 `_, +`140881 `_, +`141505 `_, +`144538 `_, +`146949 `_ and +`150245 `_. + +.. _soa-data-integration-in-neuron: + +Integration in NEURON +--------------------- +This section aims to give some more high-level details about how the data structures described +above are actually integrated into NEURON. + +Summary of migrated data structures +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +In brief, Node data -- excluding that for the fast membrane current calculation (``fast_imem``) -- +uses the data structures described above. +Mechanisms, whether defined via MOD files or at runtime using components like KSChan, manage their +floating-point (``double``) data using the new data structures: essentially data for ``RANGE`` +variables. +Other mechanism data, such as that stored in ``pdata`` for ``POINTER`` variables, has not been +modified. +However, the data type for ``pdata``, which used to be a union type named ``Datum``, is now the +``generic_data_handle`` type introduced above. + +In both cases, the struct that represented a Node (``Node``) or Mechanism instances (``Prop``) in +older versions of NEURON has been augmented to have a member variable that is an owning handle to +a row of the relevant new-style data structure. + +Integration of data handles +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +There have been far-reaching efforts to replace raw pointers (``double*``) that may refer to model +data with data handles (``data_handle``) that are stable with respect to permutations of +the underlying data structures. +The data handle type is aware of the global Node and Mechanism data structures and is able to scan +them to promote a raw pointer that **does** point into the model data into a data handle that will +stably refer to that value. + +.. note:: + + This scan is not especially fast and is mainly intended to allow incremental migration towards + using data handles directly everywhere. + +The data handle type also supports a fall-back mode where it simply wraps a raw pointer that does +not point into the model data structures described on this page. +Thanks to this fall-back mode, ``double*`` can safely be replaced with ``data_handle`` +without loss of generality, and this substitution has been made in the HOC stack machine. + +.. _soa-data-integration-in-neuron-sorting-algorithms: + +Sorting algorithms +^^^^^^^^^^^^^^^^^^ +To date no novel sort orders for the model data have been introduced and when the data are prepared +for simulation they simply follow the old ordering scheme. +One difference is that previously some data arrays were allocated separately for each ``NrnThread`` +object, while now there is a single array shared by all ``NrnThread`` objects, each of which is +assigned a (disjoint) contiguous range to process. + +.. note:: + + This means that changing the number of ``NrnThread`` objects in use, via + :meth:`ParallelContext.nthread`, does not trigger significant re-allocation and simply modifies + the sort order. + +For Node data, this is handled by the ``nrn_sort_node_data`` internal function which simply takes +the order saved to ``NrnThread::_v_node`` by the old implementation. + +For Mechanism data, this is handled by the ``nrn_sort_mech_data`` internal function, which follows +a similar order to ``nrn_sort_node_data``. + +.. warning:: + + When using CVODE in some configurations the current implementation is not sophisticated enough + to ensure that the Mechanism instances that CVODE will want to execute in one batch are + contiguous in memory. + To keep the implementation reasonably simple, in this case the Mechanism instances will be + executed in N batches of 1 instead of, as we would prefer for performance reasons, 1 batch of + N instances. + This can, presumably, be addressed with a more sophisticated sort order in this case. + The relevant code can be identified by searching for cases where the ``CvMembList::ml`` vector + has a size greater than one. + ModelDB entries 156120 and 267666 are some fairly arbitrary examples that follow this codepath. + +Transient cache +^^^^^^^^^^^^^^^ +It can be useful, when preparing the model data for simulation, to save some extra outputs from the +sort algorithm that are made available to the algorithms that execute the simulation. +For example: + +* For each ``NrnThread``, what is the offset in the global Node data at which its Nodes start? +* For a Mechanism with ``POINTER`` variable, what is the raw pointer value into the [frozen] data? + +These are data that can be computed when calculating how to sort the data, and which only remain +valid for as long as the data remain sorted. + +The cache structures that hold these data are defined in the +`neuron::cache:: <../doxygen/namespaceneuron_1_1cache.html>`_ namespace. +Normally the entry point to the cache and data sorting algorithms from within the NEURON codebase +is a single function: ``nrn_ensure_model_data_are_sorted()``. +This coordinates sorting and populating the caches for both Node and Mechanism data, returning a +token of type ``neuron::model_sorted_token`` that uses the tokens described in +:ref:`advanced-soa-container-features` to guarantee that the model data remain sorted, and also +provides access to the cache. + +.. code-block:: c++ + + // triggers model data sorting if it's not already sorted + auto const token = nrn_ensure_model_data_are_sorted(); + // creating a new Node would now fail; data structures are frozen + NrnThread& nt = /* ... */; + auto const offset = token.thread_cache(nt.id).node_data_offset; + // nt's nodes start at `offset` in the global Node data structure + +The ``set_unsorted_callback`` feature described above ensures that the cache does not outlive its +validity. + +Future work +----------- +This section contains a brief summary of future work that would be beneficial on these topics. + +Elimination of "legacy indices" +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +As explained above, in the presence of array ``RANGE`` variables, *e.g.* ``RANGE x[2]``, a +"struct of arrays of structs" (SoAoS) layout is used, with ``x[0]`` and ``x[1]`` contiguous in +memory for a given mechanism instance. + +.. note:: + + If this isn't what you want for a particular mechanism, you can replace ``RANGE x[2]`` with + ``RANGE x0, x1``... + +A consequence of this is that indices into the ``RANGE`` variables of a mechanism and indices into +array ``RANGE`` variables are in different directions/dimensions. +Consider a MOD file with + +.. code-block:: + + RANGE x, y[2], z + +Then we have the following indices. *e.g.* ``y[1]`` has :math:`i=1` because `y` is the second +``RANGE`` variable and :math:`j=1` because of the subscript ``[1]``; it has legacy index +:math:`k=2` because there are two values "before" it (``x`` and ``y[0]``). + ++----------+--------------------------+-----------------------+------------------------+ +| Data | Variable index :math:`i` | Array index :math:`j` | Legacy index :math:`k` | ++==========+==========================+=======================+========================+ +| ``x`` | 0 | 0 | 0 | ++----------+--------------------------+-----------------------+------------------------+ +| ``y[0]`` | 1 | 0 | 1 | ++----------+--------------------------+-----------------------+------------------------+ +| ``y[1]`` | 1 | 1 | 2 | ++----------+--------------------------+-----------------------+------------------------+ +| ``z`` | 2 | 0 | 3 | ++----------+--------------------------+-----------------------+------------------------+ + +The legacy index is the position in the old ``Prop::param`` vector. +When using the new SoAoS layout, one needs :math:`i` and :math:`j` separately. +Note that, in general, :math:`i + j \ne k`. + +This task is to eliminate usage of the legacy index. +Searching for ``legacy_index`` in the codebase is an excellent starting point. + +Completing migration of ``Node`` and ``Prop`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +As explained above in :ref:`soa-data-integration-in-neuron`, the ``Node`` and ``Prop`` structs are +still explicit structs that old owning handles to Node / Mechanism data as member variables. + +This should be viewed as a transition measure, which is quite verbose because it requires accessor +functions like ``rhs()`` to be implemented twice: once in +``neuron::container::Node::handle_interface`` and a forwarding version in ``Node`` itself. + +The intent is that once the migration is complete, the ``Node`` and ``Prop`` structs will no longer +be needed, and they can simply become aliases for, respectively, ``Node::owning_handle`` and +``Mechanism::owning_handle``. + +Eliminate the need for ``CvMembList::ml`` to be a vector +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +See the description above under :ref:`soa-data-integration-in-neuron-sorting-algorithms`. + +Eliminate the need for ``literal_value`` +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +As introduced above in :ref:`soa-data-integration-in-neuron`, the old ``Datum`` union has been +replaced by the new type-erased data handle ``neuron::container::generic_data_handle``. + +To match the flexibility of the old ``Datum``, this had to be augmented to allow small literal +values (``int``, ``double``, ``void*``, ...) to be stored inside it and accessed via either +``get()`` (if only the value is required) or ``literal_value()`` if, even worse, the address +of the wrapped value needs to be taken. + +This should be viewed as a hacky transition measure that can be removed, and which should provide +a performance benefit: storing an ``int`` as a literal value inside a ``generic_data_handle`` is +less space efficient, and involves more indirection, than simply storing an array of ``int`` values +via a tag type with ``using type = int``. + +.. note:: + + While detailed benchmarks have not been performed...it has been noted that storing the ``int`` + ``iontype`` variable as a literal value inside ``generic_data_handle`` is visible in profiles. + +In principle all usage of the old ``pdata``, and of ``Datum`` AKA ``generic_data_handle`` should be +removed using the same prescription, i.e. new fields with the relevant tag types. +The current implementation allows an unnecessary amount of freedom, namely that the n\ :sup:`th` +``pdata`` field could refer to a different-typed value for each instance of the mechanism. + +.. note:: + + If, for example, ``POINTER`` variables were managed using a new field with a tag type + + .. code-block:: c++ + + struct PointerVariables { + using type = data_handle; + // ... + }; + + that ultimately generates ``std::vector>`` then note that this structure + could be benchmarked + optimised using the knowledge that all the data handles will(?) be + pointing into the same container. + +See also: `#2312 `_. + +Eliminating ``pdata`` in a less invasive way +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ +The previous sub-section is the "ideal" way of removing the old ``pdata`` structure, which is +desirable because at present it takes some care to ensure that it is permuted in the same way as +the other Mechanism data. + +An alternative stepping stone would be to retain ``generic_data_handle`` for the moment, but to +transpose ``pdata`` from AoS to SoA. + +Reduce indirection when MOD files use ``diam`` +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +MOD files can access the section diameter using the special ``diam`` variable name. +Examples of this include ``share/examples/nrniv/nmodl/nadifl.mod`` and ModelDB entry 184054. +This is handled explicitly in the codebase, and the underlying storage for ``diam`` values is +managed via a special pseudomechanism called ``MORPHOLOGY``. +When ``diam`` is used in the generated code, the values are indirectly looked up using +``data_handle`` during the simulation, which is rather slow and indirect. +There are (at least?) two possibilities for how the situation can be improved: + +* Adopt the same caching technique that is used for ion variables, *i.e.* don't change the data + layout but do reduce the indirection down to loading a pointer and dereferencing it. + To pursue this, look at ``neuron::cache::indices_to_cache`` and modify the code generation to use + the cached pointer. +* Revisit whether the ``MORPHOLOGY`` pseudomechanism is still needed, or whether the diameter could + be stored directly as a ``Node`` data field? See `#2312 + `_ for more information. + +Similarly, usage of ``area`` in generated code may be able to be simplified. +Most likely, the best approach is to uniformly handle ``area`` and ``diam`` in the same way as Node +voltages, both in terms of the underlying data structure and how they are accessed in the generated +code. + +Analyze the bookkeeping overhead +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ +There a crude printf-based tool to access memory usage of the datastructures +``print_local_memory_usage`` and ``print_memory_stats``. +This provides some kind of breakdown between the actual data, the "active" bookkeeping costs +(the currently-used index columns, as explained above), and also the "wasted" overhead of values +that have their deletion deferred in order to avoid leaving any data handles "in the wild" from +accidentally dereferencing freed pointers. + +The need to "leak" the stable identifiers could be avoided by replacing the +"raw pointer to integer" idea with a reference counted integer, with bitpacking +and all. + +Alternatively, this "wasted" storage could, be recovered after a full traversal of all data +structures that hold ``data_handle`` or ``generic_data_handle`` that collapses handles that are +in previously-valid-but-not-any-more (once valid?) state into "null" (never valid?) state. + +Reporting and monitoring the scale of this "waste" is much easier than recovering it, which should +only be done **if** this is **shown** to be a real problem. + +Measurements at BBP have shown that under certain conditions the amount of +"leaked" stable identifiers adds up. diff --git a/docs/dev/execution_flow.svg b/docs/dev/execution_flow.svg new file mode 100644 index 0000000000..bada460ad2 --- /dev/null +++ b/docs/dev/execution_flow.svg @@ -0,0 +1,3 @@ + + +
Circuit creation
Circuit creation
Register mechanisms
Register mechanisms
Setup reports
Setup reports
Initialization
Initialization
Simulation
Simulation
Execution flow
Execution...
Insert mechanisms
Insert mechanisms
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/gpu-testing.rst b/docs/dev/gpu-testing.rst index 6cf4f9e0a7..87ecdf2011 100644 --- a/docs/dev/gpu-testing.rst +++ b/docs/dev/gpu-testing.rst @@ -8,7 +8,7 @@ Other sections of the documentation that may be relevant are: - The :ref:`Running GPU benchmarks` section, which outlines how to use profiling tools such as Caliper, NVIDIA NSight Systems, and NVIDIA NSight Compute. This section aims to add some basic information about how to test if GPU execution is working. -This might be useful if, for example, you need to test a change to the GPU wheel building, or test GPU execution on a new system. +This might be useful if, for example, you need to test GPU execution on a new system. Accessing GPU resources *********************** diff --git a/docs/dev/hocdomain-sphinx.md b/docs/dev/hocdomain-sphinx.md new file mode 100644 index 0000000000..1fe0061327 --- /dev/null +++ b/docs/dev/hocdomain-sphinx.md @@ -0,0 +1,47 @@ +# HOC Sphinx Domain + +## Overview + +The HOC Sphinx Domain is a Sphinx extension that allows to document HOC constructs in Sphinx. +Given the extra effort to create a full-fledged domain, we've decided to hack it from the Python domain. +Ideally we'd have a proper HOC domain, but that is an extra workload and we lack the required knowledge to build one. +See https://github.com/neuronsimulator/nrn/issues/1540 + +## Hacking the Python Domain + +It is sometimes required to re-hack the domain to make it work with the latest version of Sphinx. +To that end, the following script can be used to generate the domain from the Python domain: + +```bash +cd docs +python3 generate_hocdomain.py +``` + +This script generates a HOC domain from the one available in the sphinx package and writes it to: + + docs/domains/hocdomain.py + +A comment is added at the top of the file to indicate that it is a generated file and the Sphinx version used to generate it. + + +## Sphinx Setup + +Like all Sphinx setup, the HOC Sphinx Domain is registered in ``docs/conf.py`` + +```python +# 1st step: make docs/domains available to Sphinx +sys.path.insert(0, os.path.abspath("./domains")) + +# .... + +# 2nd step: import hocdomain +import hocdomain # Sphinx HOC domain (hacked from the Python domain via docs/generate_hocdomain.py) + +# .... + +# 3rd step: setup the HOC domain in Sphinx +def setup(app): + # ... + # Set-up HOC domain + hocdomain.setup(app) +``` \ No newline at end of file diff --git a/docs/dev/index.rst b/docs/dev/index.rst index 0ff34af608..6838979739 100644 --- a/docs/dev/index.rst +++ b/docs/dev/index.rst @@ -6,4 +6,10 @@ NEURON Development topics ./HOCInterpreter/HOCInterpreter.md ./how-do-i/how-do-i.rst + data-structures.rst gpu-testing.rst + workflow-code-paths.rst + ./setuptools/setuptools.md + ./morphology/morphology.md + hocdomain-sphinx.md + diff --git a/docs/dev/initialization.svg b/docs/dev/initialization.svg new file mode 100644 index 0000000000..b65b8e4376 --- /dev/null +++ b/docs/dev/initialization.svg @@ -0,0 +1,3 @@ + + +
Initialization
Initializa...
Call to finitialize()
nrn_finitialize(int, double)
finitialize passes initial voltage value to finitialize
nrn_finitialize(int, double)...
nrn_fihexec(3);
Call to finitialize handler kernels with type == 3
nrn_fihexec(3);...
verify_structure()
Takes care of recalculating circuit related geometry like topology, v vectors and diameters. Also calls nrn_solver_prepare 
verify_structure()...
if(tree_changed) -> setup_topology(): Sets up pointer to the parents of the sections
if(tree_changed) -> setup_topology(): Sets up pointer to the parents of the sections
if (v_structure_change) -> v_setup_vectors():
Frees and sets up: NrnThreads and memb_list, Calls function that setup auxiliary structs as well to keep track of pointers of C++ objects, Vectors of NrnThread, etc, Setup longitudinal diffusion with call to long_difus_solve(), Setup CVode with call to nrn_nonvint_block_setup()
if (v_structure_change) -> v_setup_vectors():...
if (diam changed) -> recalc_diam(): Reruns v_setup_vectors(), allocates and sets hines matrix data structures in nrn_matrix_node_alloc() and connection_coef(), stim_prepare(), synapse_prepare() and clamp_prepare() set up record/stimulus/synapses
if (diam changed) -> recalc_diam(): Reruns v_setup_vectors(), allocates and sets hines matrix data structures in nrn_matrix_node_alloc() and co...
nrn_solver_prepare(): prepare CVode for execution
Sets up fornetcon objects, calls NetCvode::init_global that does a final check that topology, vectors and recalc_diam are set.
nrn_solver_prepare(): prepare CVode for execution...
nrn_ensure_model_data_are_sorted()
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/insert_mechanism.svg b/docs/dev/insert_mechanism.svg new file mode 100644 index 0000000000..2a31aa34a8 --- /dev/null +++ b/docs/dev/insert_mechanism.svg @@ -0,0 +1,3 @@ + + +
Insert mechanism
Insert mec...
mech_insert1(): insert mechanism to the Section (meaning all the nodes of the sction by calling prop_alloc())
mech_insert1(): insert mechanism to the Section (meaning all the nodes of the sction by calling prop_alloc())
From python: NPySecObj_insert
From python: NPySecObj_insert
From hoc: MechanismType::insert
prop_alloc()
prop_alloc(): sets `v_structure_change = 1;`
It allocates a new `Prop` struct for the mechanism and calls the `<mech_name>_alloc` function defined in the mechanism file.
prop_alloc()...
`Prop` constructor calls constructor of `Mechanism::owning_handle`, which inserts a new element into each of the underlying data vectors (`field_data<Tag, FieldImplementation::RuntimeVariable>::m_storage[i]` for all `i`)
`Prop` constructor calls constructor of `Mechanism::owning_handle`, which inserts a new element into each of the underlying data vectors (`field_data<Tag, FieldImplementation:...
Look at "Memory allocation of field_data"
Look at "Memory allocation of field_data"
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/mechanism_registration.svg b/docs/dev/mechanism_registration.svg new file mode 100644 index 0000000000..35d095841e --- /dev/null +++ b/docs/dev/mechanism_registration.svg @@ -0,0 +1,3 @@ + + +
Mechanism registration
Mechanism...
Every mechanism defines:  `<mech_name>_reg_(void)`.
`<mech_name>_reg_(void)` calls `register_mech(mechanism, <mech_name>_alloc, <mech_name>_cur, <mech_name>_jacob, nullptr, nullptr, -1, 1);`  and 
`neuron::mechanism::register_data_fields(mechtype, field<double>{"<var_name"}, field<double>{"<array_var_name", <Array_size>}, fields...);`
`register_mech` registrates the allocator, nrn_cur function, jacob function, etc
Every mechanism defines:  `<mech_name>_reg_(void)`....
`register_data_fields`: takes care creating the storage underneath by calling
`register_data_fields`: takes care creating the storage underneath by calling
neuron::mechanism::detail::register_data_fields(int type, std::vector<std::pair<const char*, int>> const& param_info, std::vector<std::pair<const char*, const char*>> const& d...
neuron::container::Mechanism::storage(short mech_type, std::string name, std::vector<Variable> floating_point_fields)
floating_point_fields is a vector of structs that hold the informating for the various RANGE variables of the mechanism
We pass a `std::vector<Variable>` to a wrapper `neuron::container::Mechanism::field::FloatingPoint`. Due to the fact that the wrapper has a `num_instances` non static member function we need to allocate vectors for each one of the RANGE variables in the `m_var_info` member of `FloatingPoint`
neuron::container::Mechanism::storage sets `using base_type = soa<storage, field::FloatingPoint>;`
neuron::container::Mechanism::storage(short mech_type, std::string name, std::vector<Variable> floating_point_fields)...
neuron::Model::add_mechanism()
field_data of each variable
field_data of each variable
Creation of `soa` storage struct which holds a tuple of detaill:field_data structs which is the underyling storage
Creation of `soa` storage struct which holds a tuple of detaill:field_data structs which is the underyling storage
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/memory_allocation.svg b/docs/dev/memory_allocation.svg new file mode 100644 index 0000000000..bfb97f41e3 --- /dev/null +++ b/docs/dev/memory_allocation.svg @@ -0,0 +1,3 @@ + + +
Memory allocation of field_data (SoAoS)
Memory all...
Node has neuron::container::Node::owning_handle memberConstructor of owning_handle  calls acquire_owning_identifier for both Prop has neuron::container::Mehcanism::owning_handle member
By calling for_all_vectors<detail::may_cause_reallocation::Yes>() we append to the m_storage of field_data an element. We also set the m_ptr (std::unique_ptr<std::size_t, deleter>) of the owning_identifier to the old_size. This identifier will be used as offset to access the corresponding element from other parts of the code. Due to the fact that we use the detail::may_cause_reallocation::Yes template, when we call for_all_vectors() the m_data_ptrs member of the field_data for mechanisms will be updated to use the up to date pointers to the m_storage vectors
By calling for_all_vectors<detail::may_cause_reallocation::Yes>() we append to the m_stor...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/morphology/Import3D-hoc-morphio-backward.png b/docs/dev/morphology/Import3D-hoc-morphio-backward.png new file mode 100644 index 0000000000..9d5cfcd8e9 Binary files /dev/null and b/docs/dev/morphology/Import3D-hoc-morphio-backward.png differ diff --git a/docs/dev/morphology/Import3D-hoc.png b/docs/dev/morphology/Import3D-hoc.png new file mode 100644 index 0000000000..ced9f1e73e Binary files /dev/null and b/docs/dev/morphology/Import3D-hoc.png differ diff --git a/docs/dev/morphology/morphology.rst b/docs/dev/morphology/morphology.rst new file mode 100644 index 0000000000..1e4dd845eb --- /dev/null +++ b/docs/dev/morphology/morphology.rst @@ -0,0 +1,131 @@ +.. _morphology_loading: + +Morphology loading in NEURON +============================ + +Traditionally reading morphologies with NEURON has been coupled to the ``Import3D GUI`` tool. +The ``Import3D GUI`` tool is a graphical tool that allows the user to load a morphology and then interactively modify it. +:ref:`import3d_tutorial` provides a tutorial on how to use the ``Import3D GUI`` tool. + +The underlying implementation is mostly HOC and the GUI constructs can in principle be disregarded, since loading numerous morphologies for simulation does not make any use of them. +Note it is possible possible to load HOC-bound morphologies, but also Python-bound morphologies (refer to :ref:`bio_faq`). + +HOC Morphology Loading - Legacy Import3D_GUI +-------------------------------------------- + +A HOC ``CellType`` needs to be instantiated and the Import3D stack will be used to load the morphology. + +Please refer to `Cell.hoc from BlueBrain/neurodamus `_ for an example of HOC ``CellType``. + +.. image:: Import3D-hoc.png + +The ``instantiate`` method of Import3D stack will create a list of HOC commands that will be executed to load the morphology. + +Given a simple single point soma simple Neurolucida morphology :: + + ; This is the same morphology as simple.swc + ; + ; (0, 5) + ; (-5, 5)----- ------ (6, 5) + ; | + ; | + ; | + ; | Type = 3 + ; | + ; o origin + ; | + ; | Type = 2 + ; | + ; | + ;(-5, -4)----- ------ (6, -4) + ; (0, -4) + + + ("CellBody" + (Color Red) + (CellBody) + (0 0 0 2) + ) + + ((Dendrite) + (0 0 0 2) + (0 5 0 2) + ( + (-5 5 0 3) + | + (6 5 0 3) + ) + ) + + + ((Axon) + (0 0 0 2) + (0 -4 0 2) + ( + (6 -4 0 4) + | + (-5 -4 0 4) + ) + ) + +``instantiate`` will parse the file and create the following HOC commands +that will be executed in the HOC interpreter, thus loading the morphology: :: + + create soma[1] + forsec "soma" somatic.append + create axon[3] + forsec "axon" axonal.append + create dend[3] + forsec "dend" basal.append + forall all.append + soma { pt3dadd(1, 1.7484555e-07, 0, 1) } + soma { pt3dadd(0.94581723, -0.32469952, 0, 1) } + soma { pt3dadd(0.78914064, -0.61421257, 0, 1) } + soma { pt3dadd(0.54694813, -0.83716649, 0, 1) } + soma { pt3dadd(0.24548566, -0.96940023, 0, 1) } + soma { pt3dadd(-0.082579389, -0.99658448, 0, 1) } + soma { pt3dadd(-0.40169525, -0.91577339, 0, 1) } + soma { pt3dadd(-0.67728162, -0.73572391, 0, 1) } + soma { pt3dadd(-0.87947375, -0.47594735, 0, 1) } + soma { pt3dadd(-0.98636132, -0.16459456, 0, 1) } + soma { pt3dadd(-0.98636132, 0.16459462, 0, 1) } + soma { pt3dadd(-0.87947375, 0.47594741, 0, 1) } + soma { pt3dadd(-0.67728156, 0.73572391, 0, 1) } + soma { pt3dadd(-0.4016954, 0.91577333, 0, 1) } + soma { pt3dadd(-0.08257933, 0.99658448, 0, 1) } + soma { pt3dadd(0.2454855, 0.96940029, 0, 1) } + soma { pt3dadd(0.54694819, 0.83716649, 0, 1) } + soma { pt3dadd(0.78914052, 0.61421269, 0, 1) } + soma { pt3dadd(0.94581723, 0.32469946, 0, 1) } + soma { pt3dadd(1, 0, 0, 1) } + soma connect axon[0](0), 0.5 + axon[0] { pt3dadd(0, 0, 0, 2) } + axon[0] { pt3dadd(0, -4, 0, 2) } + axon[0] connect axon[1](0), 1 + axon[1] { pt3dadd(0, -4, 0, 2) } + axon[1] { pt3dadd(6, -4, 0, 4) } + axon[0] connect axon[2](0), 1 + axon[2] { pt3dadd(0, -4, 0, 2) } + axon[2] { pt3dadd(-5, -4, 0, 4) } + soma connect dend[0](0), 0.5 + dend[0] { pt3dadd(0, 0, 0, 2) } + dend[0] { pt3dadd(0, 5, 0, 2) } + dend[0] connect dend[1](0), 1 + dend[1] { pt3dadd(0, 5, 0, 2) } + dend[1] { pt3dadd(-5, 5, 0, 3) } + dend[0] connect dend[2](0), 1 + dend[2] { pt3dadd(0, 5, 0, 2) } + dend[2] { pt3dadd(6, 5, 0, 3) + + + +HOC Morphology Loading with MorphIO +----------------------------------- + +Given that legacy morphology loading is implemented in HOC, it does incur a performance penalty. +To that end, we are implementing a new mechanism for loading morphologies in NEURON, using the `MorphIO `_ library. + +The goal is to provide an API that will be backward compatible with the legacy mechanism, but will be implemented in C++ and will be more performant. +Ultimately, it would even be possible to remove the legacy mechanism, but that is not straightforward, as we need to couple it to the Import3D GUI/CellBuilder(see:ref:`cell_builder`). + +.. image:: Import3D-hoc-morphio-backward.png diff --git a/docs/dev/setuptools/images/setup-py.png b/docs/dev/setuptools/images/setup-py.png new file mode 100644 index 0000000000..2b6145637d Binary files /dev/null and b/docs/dev/setuptools/images/setup-py.png differ diff --git a/docs/dev/setuptools/setuptools.md b/docs/dev/setuptools/setuptools.md new file mode 100644 index 0000000000..56cc37f5e2 --- /dev/null +++ b/docs/dev/setuptools/setuptools.md @@ -0,0 +1,34 @@ +# setuptools + +## **setup.py** + +Traditionally **distutils** has been used, and **setuptools** ended up having its own internal copy. A lot of effort has been put into coping with the deprecation of **distutils** and accomodating **setuptools**. Furthermore, **setup.py** will also be discontinued and we will probably have to move on to **setuptools.build_meta** or find another way to package wheels and perform CMake builds. + + NEURON has several Python extensions +* HOC module (setuptools Extension with our CMake sauce on top) +* three Rx3D extensions (Cython extensions) +* MUSIC (Cython extension) + +## **Operational Modes** + +We use [setup.py](../../../setup.py) in two operational modes +: + +1) **wheel building** + It boils down to + + setup.py build_ext bdist_wheel + + We configure the HOC extension along with a CMake configure, build all extensios and collect them for the wheel. This is called via [build_wheels.bash](../../../packaging/python/build_wheels.bash). + +2) **CMake build** + It boils down to + + setup.py build_ext build + + We provide the cmake build folder, in this mode we do not run CMake configure, we build all extensions and make sure they are integrated into the CMake build and install. This is called via CMake in [src/nrnpython/CMakeLists.txt](../../../src/nrnpython/CMakeLists.txt) by passing **--cmake-build-dir** (the folder where we configured NEURON with CMake), along with other CMake options. + + +## **Activity Diagram** + +![](images/setup-py.png) \ No newline at end of file diff --git a/docs/dev/soa-advanced-tag-types.svg b/docs/dev/soa-advanced-tag-types.svg new file mode 100644 index 0000000000..074a2aade6 --- /dev/null +++ b/docs/dev/soa-advanced-tag-types.svg @@ -0,0 +1,459 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + SoA container + + + + + + Tag + + + + + + + + + + + + + + + + + + + + + + + + field_index + + + + + + + + Tag + + + + + + + + + + + + + Tag + + + + + + + + offset + + + + array_index + + + + + + diff --git a/docs/dev/soa-architecture.svg b/docs/dev/soa-architecture.svg new file mode 100644 index 0000000000..1e1747ab56 --- /dev/null +++ b/docs/dev/soa-architecture.svg @@ -0,0 +1,4 @@ + + + +
Field
A
Field...
a0
a0
a1
a1
a2
a2
a3
a3
a4
a4
...
...
...
...
Field
B
Field...
b0
b0
b1
b1
b2
b2
b3
b3
b4
b4
...
...
...
...
struct ab_store: soa<ab_store, field::A, field::B>
struct ab_store: soa<ab_store, field::A, field::B>
Indices
Indices
→ 0
→ 0
→ 1
→ 1
→ 2
→ 2
→ 3
→ 3
→ 4
→ 4
...
...
...
...
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/soa-array-var-layout.svg b/docs/dev/soa-array-var-layout.svg new file mode 100644 index 0000000000..521d2e6fe4 --- /dev/null +++ b/docs/dev/soa-array-var-layout.svg @@ -0,0 +1,4 @@ + + + +
Field
A[2]
Field...
a[0]0
a[0]0
a[1]0
a[1]0
a[0]1
a[0]1
a[1]1
a[1]1
a[0]2
a[0]2
a[1]2
a[1]2
struct a_store: soa<ab_store, field::A>
struct a_store: soa<ab_store, field::A>
Indices
Indices
→ 0
→ 0
→ 1
→ 1
→ 2
→ 2
Text is not SVG - cannot display
\ No newline at end of file diff --git a/docs/dev/workflow-code-paths.rst b/docs/dev/workflow-code-paths.rst new file mode 100644 index 0000000000..498557894d --- /dev/null +++ b/docs/dev/workflow-code-paths.rst @@ -0,0 +1,63 @@ +Workflow Code Paths +################### + +The intention of the following section is to give an overview of important functions and code paths executed to perform certain actions. + +NEURON simulations +------------------ + +A summary of the workflow of a typical ``NEURON`` simulation can be given with the following graph: + +.. image:: execution_flow.svg + +The parts of the simulation can be described as: + +Register Mechanisms +^^^^^^^^^^^^^^^^^^^ + +Mechanisms in ``NEURON`` are defined in ``.mod`` files. Apart from some built in mechanisms if the user wants to use any additional mechanisms those need to be compiled using the `nrnivmodl` executable. + +The ``nrnivmodl`` executable translates the ``MOD`` files from the ``NMODL`` language to ``C++``. Those ``C++`` files apart from the computation kernels also include a function named ``_reg_(void)`` that is essential for registering the mechanism functions in ``NEURON`` so that later the user can ``insert`` the mechanism in the needed compartments. + +.. raw:: html + :file: mechanism_registration.svg + +Circuit creation +^^^^^^^^^^^^^^^^ + +During this part of the simulation the segments, sections and their connections are created. All these are created based on the circuit which is defined by the users in Python or HOC scripts with the respective API. + +See: :ref:`topology` and :ref:`hoc_topology`. + +During this part all the ``Node`` s are created as well as the ``Section`` s. + +See C++ documentation: `Node `_, `Section `_ and `SectionPool `_. + +The call graph of this part can be found below: + +.. raw:: html + :file: circuit_creation.svg + +Insert Mechanisms +^^^^^^^^^^^^^^^^^ + +After registering the mechanisms the user can insert them in the needed compartments. To do so they need to call the ``insert`` function on the proper section. + +.. raw:: html + :file: insert_mechanism.svg + +Memory allocation of field data +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +During the mechanism insertion the needed data structures to hold their data are also allocated. This is done by appending to the SoA data structures of the data a new element for the newly inserted mechanism. + +.. raw:: html + :file: memory_allocation.svg + +Initialization +^^^^^^^^^^^^^^ + +After all the simulation ``Section`` have been created, along with the data structures assigned to them and their mechanisms, all the underlying data structures are ordered and initialized. At this stage, the ``INITIAL`` blocks of the inserted ``MOD`` files are also executed. + +.. raw:: html + :file: initialization.svg diff --git a/docs/docs_requirements.txt b/docs/docs_requirements.txt index faee0c968a..7a4ea6dd3f 100644 --- a/docs/docs_requirements.txt +++ b/docs/docs_requirements.txt @@ -1,8 +1,9 @@ sphinx +# do not check import of next line sphinx_rtd_theme jupyter nbconvert -recommonmark +myst_parser matplotlib # bokeh 3 seems to break docs notebooks bokeh<3 @@ -14,3 +15,4 @@ plotly nbsphinx jinja2 sphinx-design +packaging==21.3 diff --git a/docs/domains/hocdomain.py b/docs/domains/hocdomain.py index 4fbc4ecb56..73ae11df5d 100644 --- a/docs/domains/hocdomain.py +++ b/docs/domains/hocdomain.py @@ -1,25 +1,15 @@ -# generated from 'sphinx/domains/python.py' @ Sphinx 5.0.1 +# generated from 'sphinx/domains/python.py' @ Sphinx 6.1.3 """The HOC domain.""" +from __future__ import annotations + +import ast import builtins import inspect import re -import sys import typing -import warnings from inspect import Parameter -from typing import ( - Any, - Dict, - Iterable, - Iterator, - List, - NamedTuple, - Optional, - Tuple, - Type, - cast, -) +from typing import Any, Iterable, Iterator, List, NamedTuple, Tuple, cast from docutils import nodes from docutils.nodes import Element, Node @@ -30,19 +20,21 @@ from sphinx.addnodes import desc_signature, pending_xref, pending_xref_condition from sphinx.application import Sphinx from sphinx.builders import Builder -from sphinx.deprecation import RemovedInSphinx60Warning from sphinx.directives import ObjectDescription from sphinx.domains import Domain, Index, IndexEntry, ObjType from sphinx.environment import BuildEnvironment from sphinx.locale import _, __ -from sphinx.pycode.ast import ast -from sphinx.pycode.ast import parse as ast_parse from sphinx.roles import XRefRole from sphinx.util import logging from sphinx.util.docfields import Field, GroupedField, TypedField -from sphinx.util.docutils import SphinxDirective +from sphinx.util.docutils import SphinxDirective, switch_source_input from sphinx.util.inspect import signature_from_str -from sphinx.util.nodes import find_pending_xref_condition, make_id, make_refnode +from sphinx.util.nodes import ( + find_pending_xref_condition, + make_id, + make_refnode, + nested_parse_with_titles, +) from sphinx.util.typing import OptionSpec, TextlikeNode logger = logging.getLogger(__name__) @@ -88,7 +80,7 @@ class ModuleEntry(NamedTuple): def parse_reftarget( reftarget: str, suppress_prefix: bool = False -) -> Tuple[str, str, str, bool]: +) -> tuple[str, str, str, bool]: """Parse a type string and return (reftype, reftarget, title, refspecific flag)""" refspecific = False if reftarget.startswith("."): @@ -115,7 +107,7 @@ def parse_reftarget( def type_to_xref( - target: str, env: BuildEnvironment = None, suppress_prefix: bool = False + target: str, env: BuildEnvironment | None = None, suppress_prefix: bool = False ) -> addnodes.pending_xref: """Convert a type string to a cross reference node.""" if env: @@ -133,7 +125,7 @@ def type_to_xref( # nested classes. But python domain can't access the real python object because this # module should work not-dynamically. shortname = title.split(".")[-1] - contnodes: List[Node] = [ + contnodes: list[Node] = [ pending_xref_condition("", shortname, condition="resolved"), pending_xref_condition("", title, condition="*"), ] @@ -151,14 +143,14 @@ def type_to_xref( ) -def _parse_annotation(annotation: str, env: BuildEnvironment) -> List[Node]: +def _parse_annotation(annotation: str, env: BuildEnvironment | None) -> list[Node]: """Parse type annotation.""" - def unparse(node: ast.AST) -> List[Node]: + def unparse(node: ast.AST) -> list[Node]: if isinstance(node, ast.Attribute): - return [nodes.Text("%s.%s" % (unparse(node.value)[0], node.attr))] + return [nodes.Text(f"{unparse(node.value)[0]}.{node.attr}")] elif isinstance(node, ast.BinOp): - result: List[Node] = unparse(node.left) + result: list[Node] = unparse(node.left) result.extend(unparse(node.op)) result.extend(unparse(node.right)) return result @@ -168,7 +160,7 @@ def unparse(node: ast.AST) -> List[Node]: addnodes.desc_sig_punctuation("", "|"), addnodes.desc_sig_space(), ] - elif isinstance(node, ast.Constant): # type: ignore + elif isinstance(node, ast.Constant): if node.value is Ellipsis: return [addnodes.desc_sig_punctuation("", "...")] elif isinstance(node.value, bool): @@ -206,6 +198,8 @@ def unparse(node: ast.AST) -> List[Node]: elif isinstance(node, ast.Name): return [nodes.Text(node.id)] elif isinstance(node, ast.Subscript): + if getattr(node.value, "id", "") in {"Optional", "Union"}: + return _unparse_pep_604_annotation(node) result = unparse(node.value) result.append(addnodes.desc_sig_punctuation("", "[")) result.extend(unparse(node.slice)) @@ -236,23 +230,33 @@ def unparse(node: ast.AST) -> List[Node]: return result else: - if sys.version_info < (3, 8): - if isinstance(node, ast.Bytes): - return [addnodes.desc_sig_literal_string("", repr(node.s))] - elif isinstance(node, ast.Ellipsis): - return [addnodes.desc_sig_punctuation("", "...")] - elif isinstance(node, ast.NameConstant): - return [nodes.Text(node.value)] - elif isinstance(node, ast.Num): - return [addnodes.desc_sig_literal_string("", repr(node.n))] - elif isinstance(node, ast.Str): - return [addnodes.desc_sig_literal_string("", repr(node.s))] - raise SyntaxError # unsupported syntax + def _unparse_pep_604_annotation(node: ast.Subscript) -> list[Node]: + subscript = node.slice + if isinstance(subscript, ast.Index): + # py38 only + subscript = subscript.value # type: ignore[assignment] + + flattened: list[Node] = [] + if isinstance(subscript, ast.Tuple): + flattened.extend(unparse(subscript.elts[0])) + for elt in subscript.elts[1:]: + flattened.extend(unparse(ast.BitOr())) + flattened.extend(unparse(elt)) + else: + # e.g. a Union[] inside an Optional[] + flattened.extend(unparse(subscript)) + + if getattr(node.value, "id", "") == "Optional": + flattened.extend(unparse(ast.BitOr())) + flattened.append(nodes.Text("None")) + + return flattened + try: - tree = ast_parse(annotation) - result: List[Node] = [] + tree = ast.parse(annotation, type_comments=True) + result: list[Node] = [] for node in unparse(tree): if isinstance(node, nodes.literal): result.append(node[0]) @@ -274,7 +278,7 @@ def unparse(node: ast.AST) -> List[Node]: def _parse_arglist( - arglist: str, env: BuildEnvironment = None + arglist: str, env: BuildEnvironment | None = None ) -> addnodes.desc_parameterlist: """Parse a list of arguments using AST parser""" params = addnodes.desc_parameterlist(arglist) @@ -340,7 +344,7 @@ def _pseudo_parse_arglist(signode: desc_signature, arglist: str) -> None: string literal (e.g. default argument value). """ paramlist = addnodes.desc_parameterlist() - stack: List[Element] = [paramlist] + stack: list[Element] = [paramlist] try: for argument in arglist.split(","): argument = argument.strip() @@ -390,11 +394,11 @@ def make_xref( rolename: str, domain: str, target: str, - innernode: Type[TextlikeNode] = nodes.emphasis, - contnode: Node = None, - env: BuildEnvironment = None, - inliner: Inliner = None, - location: Node = None, + innernode: type[TextlikeNode] = nodes.emphasis, + contnode: Node | None = None, + env: BuildEnvironment | None = None, + inliner: Inliner | None = None, + location: Node | None = None, ) -> Node: # we use inliner=None to make sure we get the old behaviour with a single # pending_xref node @@ -439,13 +443,13 @@ def make_xrefs( rolename: str, domain: str, target: str, - innernode: Type[TextlikeNode] = nodes.emphasis, - contnode: Node = None, - env: BuildEnvironment = None, - inliner: Inliner = None, - location: Node = None, - ) -> List[Node]: - delims = r"(\s*[\[\]\(\),](?:\s*or\s)?\s*|\s+or\s+|\s*\|\s*|\.\.\.)" + innernode: type[TextlikeNode] = nodes.emphasis, + contnode: Node | None = None, + env: BuildEnvironment | None = None, + inliner: Inliner | None = None, + location: Node | None = None, + ) -> list[Node]: + delims = r"(\s*[\[\]\(\),](?:\s*o[rf]\s)?\s*|\s+o[rf]\s+|\s*\|\s*|\.\.\.)" delims_re = re.compile(delims) sub_targets = re.split(delims, target) @@ -473,7 +477,7 @@ def make_xrefs( ) ) - if sub_target in ("Literal", "typing.Literal"): + if sub_target in ("Literal", "typing.Literal", "~typing.Literal"): in_literal = True return results @@ -502,6 +506,7 @@ class HOCObject(ObjectDescription[Tuple[str, str]]): option_spec: OptionSpec = { "noindex": directives.flag, "noindexentry": directives.flag, + "nocontentsentry": directives.flag, "module": directives.unchanged, "canonical": directives.unchanged, "annotation": directives.unchanged, @@ -556,7 +561,7 @@ class HOCObject(ObjectDescription[Tuple[str, str]]): allow_nesting = False - def get_signature_prefix(self, sig: str) -> List[nodes.Node]: + def get_signature_prefix(self, sig: str) -> list[nodes.Node]: """May return a prefix to put before the object name in the signature. """ @@ -568,7 +573,7 @@ def needs_arglist(self) -> bool: """ return False - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: """Transform a HOC signature into RST nodes. Return (fully qualified name of the thing, classname if any). @@ -614,16 +619,11 @@ def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str] sig_prefix = self.get_signature_prefix(sig) if sig_prefix: if type(sig_prefix) is str: - warnings.warn( + raise TypeError( "HOC directive method get_signature_prefix()" - " returning a string is deprecated." - " It must now return a list of nodes." - " Return value was '{}'.".format(sig_prefix), - RemovedInSphinx60Warning, + " must return a list of nodes." + f" Return value was '{sig_prefix}'." ) - signode += addnodes.desc_annotation( - sig_prefix, "", nodes.Text(sig_prefix) # type: ignore - ) # type: ignore else: signode += addnodes.desc_annotation(str(sig_prefix), "", *sig_prefix) @@ -663,12 +663,23 @@ def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str] return fullname, prefix - def get_index_text(self, modname: str, name: Tuple[str, str]) -> str: + def _object_hierarchy_parts(self, sig_node: desc_signature) -> tuple[str, ...]: + if "fullname" not in sig_node: + return () + modname = sig_node.get("module") + fullname = sig_node["fullname"] + + if modname: + return (modname, *fullname.split(".")) + else: + return tuple(fullname.split(".")) + + def get_index_text(self, modname: str, name: tuple[str, str]) -> str: """Return the text for the index entry of the object.""" raise NotImplementedError("must be implemented in subclasses") def add_target_and_index( - self, name_cls: Tuple[str, str], sig: str, signode: desc_signature + self, name_cls: tuple[str, str], sig: str, signode: desc_signature ) -> None: modname = self.options.get("module", self.env.ref_context.get("hoc:module")) fullname = (modname + "." if modname else "") + name_cls[0] @@ -749,6 +760,25 @@ def after_content(self) -> None: else: self.env.ref_context.pop("hoc:module") + def _toc_entry_name(self, sig_node: desc_signature) -> str: + if not sig_node.get("_toc_parts"): + return "" + + config = self.env.app.config + objtype = sig_node.parent.get("objtype") + if config.add_function_parentheses and objtype in {"function", "method"}: + parens = "()" + else: + parens = "" + *parents, name = sig_node["_toc_parts"] + if config.toc_object_entries_show_parents == "domain": + return sig_node.get("fullname", name) + parens + if config.toc_object_entries_show_parents == "hide": + return name + parens + if config.toc_object_entries_show_parents == "all": + return ".".join(parents + [name + parens]) + return "" + class HOCFunction(HOCObject): """Description of a function.""" @@ -760,7 +790,7 @@ class HOCFunction(HOCObject): } ) - def get_signature_prefix(self, sig: str) -> List[nodes.Node]: + def get_signature_prefix(self, sig: str) -> list[nodes.Node]: if "async" in self.options: return [addnodes.desc_sig_keyword("", "async"), addnodes.desc_sig_space()] else: @@ -770,7 +800,7 @@ def needs_arglist(self) -> bool: return True def add_target_and_index( - self, name_cls: Tuple[str, str], sig: str, signode: desc_signature + self, name_cls: tuple[str, str], sig: str, signode: desc_signature ) -> None: super().add_target_and_index(name_cls, sig, signode) if "noindexentry" not in self.options: @@ -782,10 +812,10 @@ def add_target_and_index( text = _("%s() (in module %s)") % (name, modname) self.indexnode["entries"].append(("single", text, node_id, "", None)) else: - text = "%s; %s()" % (pairindextypes["builtin"], name) + text = f'{pairindextypes["builtin"]}; {name}()' self.indexnode["entries"].append(("pair", text, node_id, "", None)) - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str | None: # add index in own add_target_and_index() instead. return None @@ -793,12 +823,12 @@ def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: class HOCDecoratorFunction(HOCFunction): """Description of a decorator.""" - def run(self) -> List[Node]: + def run(self) -> list[Node]: # a decorator function is a function after all self.name = "hoc:function" return super().run() - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: ret = super().handle_signature(sig, signode) signode.insert(0, addnodes.desc_addname("@", "@")) return ret @@ -818,7 +848,7 @@ class HOCVariable(HOCObject): } ) - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: fullname, prefix = super().handle_signature(sig, signode) typ = self.options.get("type") @@ -845,7 +875,7 @@ def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str] return fullname, prefix - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: name, cls = name_cls if modname: return _("%s (HOC in module %s)") % (name, modname) @@ -867,7 +897,7 @@ class HOCClasslike(HOCObject): allow_nesting = True - def get_signature_prefix(self, sig: str) -> List[nodes.Node]: + def get_signature_prefix(self, sig: str) -> list[nodes.Node]: if "final" in self.options: return [ nodes.Text("final"), @@ -878,7 +908,7 @@ def get_signature_prefix(self, sig: str) -> List[nodes.Node]: else: return [nodes.Text(self.objtype), addnodes.desc_sig_space()] - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: if self.objtype == "class": if not modname: return _("%s (HOC built-in class)") % name_cls[0] @@ -899,19 +929,15 @@ class HOCMethod(HOCObject): "async": directives.flag, "classmethod": directives.flag, "final": directives.flag, - "property": directives.flag, "staticmethod": directives.flag, } ) def needs_arglist(self) -> bool: - if "property" in self.options: - return False - else: - return True + return True - def get_signature_prefix(self, sig: str) -> List[nodes.Node]: - prefix: List[nodes.Node] = [] + def get_signature_prefix(self, sig: str) -> list[nodes.Node]: + prefix: list[nodes.Node] = [] if "final" in self.options: prefix.append(nodes.Text("final")) prefix.append(addnodes.desc_sig_space()) @@ -924,15 +950,12 @@ def get_signature_prefix(self, sig: str) -> List[nodes.Node]: if "classmethod" in self.options: prefix.append(nodes.Text("classmethod")) prefix.append(addnodes.desc_sig_space()) - if "property" in self.options: - prefix.append(nodes.Text("property")) - prefix.append(addnodes.desc_sig_space()) if "staticmethod" in self.options: prefix.append(nodes.Text("static")) prefix.append(addnodes.desc_sig_space()) return prefix - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: name, cls = name_cls try: clsname, methname = name.rsplit(".", 1) @@ -946,8 +969,6 @@ def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: if "classmethod" in self.options: return _("%s() (HOC %s class method)") % (methname, clsname) - elif "property" in self.options: - return _("%s (HOC %s property)") % (methname, clsname) elif "staticmethod" in self.options: return _("%s() (HOC %s static method)") % (methname, clsname) else: @@ -959,7 +980,7 @@ class HOCClassMethod(HOCMethod): option_spec: OptionSpec = HOCObject.option_spec.copy() - def run(self) -> List[Node]: + def run(self) -> list[Node]: self.name = "hoc:method" self.options["classmethod"] = True @@ -971,7 +992,7 @@ class HOCStaticMethod(HOCMethod): option_spec: OptionSpec = HOCObject.option_spec.copy() - def run(self) -> List[Node]: + def run(self) -> list[Node]: self.name = "hoc:method" self.options["staticmethod"] = True @@ -981,11 +1002,11 @@ def run(self) -> List[Node]: class HOCDecoratorMethod(HOCMethod): """Description of a decoratormethod.""" - def run(self) -> List[Node]: + def run(self) -> list[Node]: self.name = "hoc:method" return super().run() - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: ret = super().handle_signature(sig, signode) signode.insert(0, addnodes.desc_addname("@", "@")) return ret @@ -1005,7 +1026,7 @@ class HOCAttribute(HOCObject): } ) - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: fullname, prefix = super().handle_signature(sig, signode) typ = self.options.get("type") @@ -1032,7 +1053,7 @@ def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str] return fullname, prefix - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: name, cls = name_cls try: clsname, attrname = name.rsplit(".", 1) @@ -1059,7 +1080,7 @@ class HOCProperty(HOCObject): } ) - def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str]: + def handle_signature(self, sig: str, signode: desc_signature) -> tuple[str, str]: fullname, prefix = super().handle_signature(sig, signode) typ = self.options.get("type") @@ -1075,8 +1096,8 @@ def handle_signature(self, sig: str, signode: desc_signature) -> Tuple[str, str] return fullname, prefix - def get_signature_prefix(self, sig: str) -> List[nodes.Node]: - prefix: List[nodes.Node] = [] + def get_signature_prefix(self, sig: str) -> list[nodes.Node]: + prefix: list[nodes.Node] = [] if "abstractmethod" in self.options: prefix.append(nodes.Text("abstract")) prefix.append(addnodes.desc_sig_space()) @@ -1088,7 +1109,7 @@ def get_signature_prefix(self, sig: str) -> List[nodes.Node]: prefix.append(addnodes.desc_sig_space()) return prefix - def get_index_text(self, modname: str, name_cls: Tuple[str, str]) -> str: + def get_index_text(self, modname: str, name_cls: tuple[str, str]) -> str: name, cls = name_cls try: clsname, attrname = name.rsplit(".", 1) @@ -1108,7 +1129,7 @@ class HOCModule(SphinxDirective): Directive to mark description of a new module. """ - has_content = False + has_content = True required_arguments = 1 optional_arguments = 0 final_argument_whitespace = False @@ -1116,16 +1137,24 @@ class HOCModule(SphinxDirective): "platform": lambda x: x, "synopsis": lambda x: x, "noindex": directives.flag, + "nocontentsentry": directives.flag, "deprecated": directives.flag, } - def run(self) -> List[Node]: + def run(self) -> list[Node]: domain = cast(HOCDomain, self.env.get_domain("hoc")) modname = self.arguments[0].strip() noindex = "noindex" in self.options self.env.ref_context["hoc:module"] = modname - ret: List[Node] = [] + + content_node: Element = nodes.section() + with switch_source_input(self.state, self.content): + # necessary so that the child nodes get the right source/line set + content_node.document = self.state.document + nested_parse_with_titles(self.state, self.content, content_node) + + ret: list[Node] = [] if not noindex: # note module to the domain node_id = make_id(self.env, self.state.document, "module", modname) @@ -1145,9 +1174,10 @@ def run(self) -> List[Node]: # the platform and synopsis aren't printed; in fact, they are only # used in the modindex currently ret.append(target) - indextext = "%s; %s" % (pairindextypes["module"], modname) + indextext = f'{pairindextypes["module"]}; {modname}' inode = addnodes.index(entries=[("pair", indextext, node_id, "", None)]) ret.append(inode) + ret.extend(content_node.children) return ret def make_old_id(self, name: str) -> str: @@ -1173,7 +1203,7 @@ class HOCCurrentModule(SphinxDirective): final_argument_whitespace = False option_spec: OptionSpec = {} - def run(self) -> List[Node]: + def run(self) -> list[Node]: modname = self.arguments[0].strip() if modname == "None": self.env.ref_context.pop("hoc:module", None) @@ -1190,7 +1220,7 @@ def process_link( has_explicit_title: bool, title: str, target: str, - ) -> Tuple[str, str]: + ) -> tuple[str, str]: refnode["hoc:module"] = env.ref_context.get("hoc:module") refnode["hoc:class"] = env.ref_context.get("hoc:class") if not has_explicit_title: @@ -1238,11 +1268,11 @@ class HOCModuleIndex(Index): shortname = _("modules") def generate( - self, docnames: Iterable[str] = None - ) -> Tuple[List[Tuple[str, List[IndexEntry]]], bool]: - content: Dict[str, List[IndexEntry]] = {} + self, docnames: Iterable[str] | None = None + ) -> tuple[list[tuple[str, list[IndexEntry]]], bool]: + content: dict[str, list[IndexEntry]] = {} # list of prefixes to ignore - ignores: List[str] = self.domain.env.config["modindex_common_prefix"] + ignores: list[str] = self.domain.env.config["modindex_common_prefix"] ignores = sorted(ignores, key=len, reverse=True) # list of all modules, sorted by module name modules = sorted( @@ -1319,7 +1349,7 @@ class HOCDomain(Domain): name = "hoc" label = "HOC" - object_types: Dict[str, ObjType] = { + object_types: dict[str, ObjType] = { "function": ObjType(_("function"), "func", "obj"), "data": ObjType(_("data"), "data", "obj"), "class": ObjType(_("class"), "class", "exc", "obj"), @@ -1358,7 +1388,7 @@ class HOCDomain(Domain): "mod": HOCXRefRole(), "obj": HOCXRefRole(), } - initial_data: Dict[str, Dict[str, Tuple[Any]]] = { + initial_data: dict[str, dict[str, tuple[Any]]] = { "objects": {}, # fullname -> docname, objtype "modules": {}, # modname -> docname, synopsis, platform, deprecated } @@ -1367,7 +1397,7 @@ class HOCDomain(Domain): ] @property - def objects(self) -> Dict[str, ObjectEntry]: + def objects(self) -> dict[str, ObjectEntry]: return self.data.setdefault("objects", {}) # fullname -> ObjectEntry def note_object( @@ -1404,7 +1434,7 @@ def note_object( self.objects[name] = ObjectEntry(self.env.docname, node_id, objtype, aliased) @property - def modules(self) -> Dict[str, ModuleEntry]: + def modules(self) -> dict[str, ModuleEntry]: return self.data.setdefault("modules", {}) # modname -> ModuleEntry def note_module( @@ -1426,7 +1456,7 @@ def clear_doc(self, docname: str) -> None: if mod.docname == docname: del self.modules[modname] - def merge_domaindata(self, docnames: List[str], otherdata: Dict) -> None: + def merge_domaindata(self, docnames: list[str], otherdata: dict[str, Any]) -> None: # XXX check duplicates? for fullname, obj in otherdata["objects"].items(): if obj.docname in docnames: @@ -1441,9 +1471,9 @@ def find_obj( modname: str, classname: str, name: str, - type: str, + type: str | None, searchmode: int = 0, - ) -> List[Tuple[str, ObjectEntry]]: + ) -> list[tuple[str, ObjectEntry]]: """Find a HOC object for "name", perhaps using the given module and/or classname. Returns a list of (name, object entry) tuples. """ @@ -1454,7 +1484,7 @@ def find_obj( if not name: return [] - matches: List[Tuple[str, ObjectEntry]] = [] + matches: list[tuple[str, ObjectEntry]] = [] newname = None if searchmode == 1: @@ -1520,7 +1550,7 @@ def resolve_xref( target: str, node: pending_xref, contnode: Element, - ) -> Optional[Element]: + ) -> Element | None: modname = node.get("hoc:module") clsname = node.get("hoc:class") searchmode = 1 if node.hasattr("refspecific") else 0 @@ -1577,14 +1607,21 @@ def resolve_any_xref( target: str, node: pending_xref, contnode: Element, - ) -> List[Tuple[str, Element]]: + ) -> list[tuple[str, Element]]: modname = node.get("hoc:module") clsname = node.get("hoc:class") - results: List[Tuple[str, Element]] = [] + results: list[tuple[str, Element]] = [] # always search in "refspecific" mode with the :any: role matches = self.find_obj(env, modname, clsname, target, None, 1) + multiple_matches = len(matches) > 1 + for name, obj in matches: + + if multiple_matches and obj.aliased: + # Skip duplicated matches + continue + if obj[2] == "module": results.append( ( @@ -1627,7 +1664,7 @@ def _make_module_refnode( builder, fromdocname, module.docname, module.node_id, contnode, title ) - def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]: + def get_objects(self) -> Iterator[tuple[str, str, str, str, str, int]]: for modname, mod in self.modules.items(): yield (modname, modname, "module", mod.docname, mod.node_id, 0) for refname, obj in self.objects.items(): @@ -1638,7 +1675,7 @@ def get_objects(self) -> Iterator[Tuple[str, str, str, str, str, int]]: else: yield (refname, refname, obj.objtype, obj.docname, obj.node_id, 1) - def get_full_qualified_name(self, node: Element) -> Optional[str]: + def get_full_qualified_name(self, node: Element) -> str | None: modname = node.get("hoc:module") clsname = node.get("hoc:class") target = node.get("reftarget") @@ -1650,7 +1687,7 @@ def get_full_qualified_name(self, node: Element) -> Optional[str]: def builtin_resolver( app: Sphinx, env: BuildEnvironment, node: pending_xref, contnode: Element -) -> Element: +) -> Element | None: """Do not emit nitpicky warnings for built-in types.""" def istyping(s: str) -> bool: @@ -1675,7 +1712,7 @@ def istyping(s: str) -> bool: return None -def setup(app: Sphinx) -> Dict[str, Any]: +def setup(app: Sphinx) -> dict[str, Any]: app.setup_extension("sphinx.directives") app.add_domain(HOCDomain) diff --git a/docs/guide/cellbuilder.rst b/docs/guide/cellbuilder.rst index ee0c56829f..99d4e97233 100644 --- a/docs/guide/cellbuilder.rst +++ b/docs/guide/cellbuilder.rst @@ -1,3 +1,5 @@ +.. _cell_builder: + Using the Cell Builder GUI ========================== diff --git a/docs/guide/faq.rst b/docs/guide/faq.rst index dd289869b9..7a41135860 100644 --- a/docs/guide/faq.rst +++ b/docs/guide/faq.rst @@ -208,6 +208,8 @@ Where can I find examples of mod files? See the NMODL tops on :ref:`the "getting started" page. ` + +.. _FAQ_how_do_I_compile_mod_files: How do I compile mod files? ---------------- diff --git a/docs/guide/hoc_chapter_11_old_reference.rst b/docs/guide/hoc_chapter_11_old_reference.rst index 02b17a9b3f..c09930d940 100644 --- a/docs/guide/hoc_chapter_11_old_reference.rst +++ b/docs/guide/hoc_chapter_11_old_reference.rst @@ -303,7 +303,7 @@ Keywords sin cos atan log log10 exp sqrt int abs erf erfc system prmat solve wqinit plt axis plot plotx ploty regraph symbols printf xred sred ropen wopen xopen fprint fscan - graph graphmode fmenu lw getstr strcmp setcolor startsw + graph graphmode lw getstr strcmp setcolor startsw stopsw object_id allobjectvars allobjexts xpanel xbutton xcheckbox xstatebutton xlabel xmenu xvalue xpvalue xradiobutton xfixedvalue xvarlabel xslider boolean_dialog continue_dialog diff --git a/docs/guide/how_to_get_started.rst b/docs/guide/how_to_get_started.rst index 5073d4c251..ad954f2e92 100644 --- a/docs/guide/how_to_get_started.rst +++ b/docs/guide/how_to_get_started.rst @@ -23,6 +23,8 @@ Basic NEURON Usage * Use the GUI tools as much as possible. You'll get more done, faster, and you won't have to write any code. Some of the GUI tools are described in the tutorials; others are demoed in the :ref:`course videos `. Save the GUI tools to session files; these files contain HOC and can be modified, adapted, and reused. * Examine `ModelDB `_ and the list of :ref:`publications about NEURON ` to find models of interest. Many authors have deposited their model code in ModelDB, posted it somewhere else on the WWW, or will provide code upon request. +.. _extending_neuron_with_nmodl: + Using NMODL to add new mechanisms to NEURON ------------------------------------------- @@ -40,3 +42,11 @@ Using NMODL to add new mechanisms to NEURON NMODL code needs to be updated to work with |neuron_with_cpp_mechanisms|. This is principally an issue for MOD files that include ``VERBATIM`` blocks. For more information, see :ref:`porting-mechanisms-to-cpp`. + +.. note:: + Starting in |neuron_with_soa_data|, the model data structures used by NEURON + have been completely overhauled. + This change is not fully backwards compatible, and you may find that older + NMODL code needs to be updated to work with |neuron_with_soa_data|. + This is principally an issue for MOD files that include ``VERBATIM`` blocks. + For more information, see :ref:`porting-mechanisms-to-new-data-structures`. diff --git a/docs/guide/porting_mechanisms_to_cpp.rst b/docs/guide/porting_mechanisms_to_cpp.rst index 5b2df166ec..5835725a31 100644 --- a/docs/guide/porting_mechanisms_to_cpp.rst +++ b/docs/guide/porting_mechanisms_to_cpp.rst @@ -324,3 +324,27 @@ If your MOD files produce these deprecation warnings, make sure that the relevant method (``vector_capacity`` in this example) is being called with an argument of the correct type (``IvocVect*``), and not a type that is implicitly converted to ``void*``. + +Legacy random number generators and API +--------------------------------------- + +Various changes have also been done in the API of NEURON functions related to random +number generators. + +First, in |neuron_with_cpp_mechanisms| parameters passed to the functions need to be +of the correct type as it was already mentioned in :ref:`function-decls-with-incorrect-types`. +The most usual consequence of that is that NEURON random API functions that were taking as +an argument a ``void*`` now need to called with a ``Rand*``. An example of the changes needed +to fix this issue is given in `182129 `_. + +Another related change is with ``scop_rand()`` function that is usually used for defining a +``URAND`` ``FUNCTION`` in mod files to return a number based on a uniform distribution from 0 to 1. +This function now takes no argument anymore. An example of this change can also be found in +`182129 `_. + +Finally, the preferred random number generator is ``Random123``. You can find more information +about that in :meth:`Random.Random123` and :ref:`Randomness in NEURON models`. An example of the +usage of ``Random123`` can be seen in `netstim.mod `_ +and its `corresponding test `_.` +Another important aspect of ``Random123`` is that it's supported in CoreNEURON as well. For more +information about this see :ref:`Random Number Generators: Random123 vs MCellRan4`. \ No newline at end of file diff --git a/docs/guide/units.rst b/docs/guide/units.rst index f1a4341d8d..e551925e0e 100644 --- a/docs/guide/units.rst +++ b/docs/guide/units.rst @@ -54,9 +54,11 @@ The following table lists the units that NEURON uses by default. Point processes - ``Ra`` - [ohm cm] * - Resistance - - ``Ri( )`` + - ``ri( )`` - [10 :superscript:`6` ohm] +.. _units_within_nmodl: + Units within NMODL files ------------------------ diff --git a/docs/hoc/index.rst b/docs/hoc/index.rst index 8dd3fe1c46..a5c37de329 100644 --- a/docs/hoc/index.rst +++ b/docs/hoc/index.rst @@ -44,7 +44,6 @@ Basic Programming .. toctree:: :maxdepth: 1 - programming/python.rst programming/hoc.rst programming/mathematics.rst programming/strings.rst @@ -55,6 +54,7 @@ Basic Programming programming/errors.rst programming/dynamiccode.rst programming/projectmanagement.rst + programming/python-from-hoc.rst programming/internals.rst Model Specification diff --git a/docs/hoc/modelspec/programmatic/mechanisms/nmodl.rst b/docs/hoc/modelspec/programmatic/mechanisms/nmodl.rst index 0a58b9a3e4..67010c3a56 100644 --- a/docs/hoc/modelspec/programmatic/mechanisms/nmodl.rst +++ b/docs/hoc/modelspec/programmatic/mechanisms/nmodl.rst @@ -285,12 +285,7 @@ Description: the UNIX units database. This can increase legibility and convenience, and is helpful both as a reminder to the user and as a means for automating the process of checking for consistency of units. - The UNIX units database taken into account is defined in the `nrnunits.lib file `_. - This file includes two versions of the units due to the updates in the values of their base - units. Currently there are legacy and modern units that contain the changes after the updates - introduced on 2019 to the nist constants. The selection between those two versions can be done - using the ``NRN_DYNAMIC_UNITS_USE_LEGACY`` CMake variable or a call to - ``h.nrnunit_use_legacy(bool)`` during runtime. + The UNIX units database (based on the 2019 updated NIST constants) taken into account is defined in the `nrnunits.lib file `_. New units can be defined in terms of default units and previously defined units by placing definitions in the UNITS block. e.g. diff --git a/docs/hoc/modelspec/programmatic/network/parcon.rst b/docs/hoc/modelspec/programmatic/network/parcon.rst index 93e6a70122..700d5e4b04 100644 --- a/docs/hoc/modelspec/programmatic/network/parcon.rst +++ b/docs/hoc/modelspec/programmatic/network/parcon.rst @@ -2740,9 +2740,6 @@ Parallel Transfer a single cpu. It does not matter if a one sid subtree is declared short or not; it is solved exactly in any case. - Note: using multisplit automatically sets - ``CVode.cache_efficient(1)`` - .. warning:: Implemented only for fixed step methods. Cannot presently be used with variable step @@ -3057,8 +3054,8 @@ Parallel Transfer and instead the pc.nthread() gidgroup values for the rank will be returned in the Vector. - This function requires cvode.cache_efficient(1) . Multisplit is not - supported. The model cannot be more complicated than a spike or gap + Multisplit is not supported. + The model cannot be more complicated than a spike or gap junction coupled parallel network model of real and artificial cells. Real cells must have gids, Artificial cells without gids connect only to cells in the same thread. No POINTER to data outside of the diff --git a/docs/hoc/programming/gui/textmenus.rst b/docs/hoc/programming/gui/textmenus.rst deleted file mode 100644 index ca5f07dbea..0000000000 --- a/docs/hoc/programming/gui/textmenus.rst +++ /dev/null @@ -1,40 +0,0 @@ - -.. _hoc_lw_doc: - -Obsolete Text Menus -------------------- - -The functions above have been superseded by the graphical user interface -but are available for use on unix machines and in the DOS version. -See :hoc:class:`Graph`. - ----- - - - -.. hoc:function:: fmenu - - - Description: - This is an old terminal based menu system that has been superseded by the - :ref:`hoc_GUI`. - - Fmenu creates, displays, and allows user to move within a menu to - select and change - a displayed variable value or to execute a command. - - The user can create space for - a series of menus and execute individual menus with each menu consisting of - lists of - variables and commands. Menus can execute commands which call other - menus and in this way a hierarchical menu system can be constructed. - Menus can be navigated by using arrow keys or by typing the first character - of a menu item. To exit a menu, either press the :kbd:`Esc` key, execute the - "Exit" item, or execute a command which has a "stop" statement. - A command item is executed by pressing the Return key. A variable item - is changed by typing the new number followed by a Return. - - See the file :file:`$NEURONHOME/doc/man/oc/menu.tex` for a complete description - of this function. - - diff --git a/docs/hoc/programming/guidesign.rst b/docs/hoc/programming/guidesign.rst index 2e9d70165a..8fa2c3d429 100644 --- a/docs/hoc/programming/guidesign.rst +++ b/docs/hoc/programming/guidesign.rst @@ -13,7 +13,6 @@ GUI Design gui/vfe.rst gui/pwman.rst gui/nfunc.rst - gui/textmenus.rst gui/dialog.rst gui/misc.rst diff --git a/docs/hoc/programming/math/matrix.rst b/docs/hoc/programming/math/matrix.rst index 15a31632b2..617ad13fa4 100644 --- a/docs/hoc/programming/math/matrix.rst +++ b/docs/hoc/programming/math/matrix.rst @@ -53,20 +53,17 @@ Matrix By default, a new Matrix is of type MFULL (= 1) and allocates storage for all nrow*ncol elements. Scaffolding is in place for matrices of storage type MSPARSE (=2) and MBAND (=3) but not many methods have been interfaced - to the meschach library at this time. If a method is called on a matrix type + to the eigen library at this time. If a method is called on a matrix type whose method has not been implemented, an error message will be printed. It is intended that implemented methods will be transparent to the user, eg m*x=b (``x = m.solv(b)`` ) will solve the linear system regardless of the type of m and v1 = m*v2 (``v1 = m.mulv(v2)`` ) will perform the vector multiplication. - Matrix is implemented using the - `meschach c library by David E. Stewart `_ - (discovered at http://www.netlib.org/c/index.html\ ) which contains a large collection - of routines for sparse, banded, and full matrices. Many of the useful - routines have not - been interfaced with the hoc interpreter but can be easily added on request - or you can add it yourself + Matrix is implemented using the `eigen3 library `_ + which contains a large collection of routines for sparse, banded, and full matrices. + Many of the useful routines have not been interfaced with the hoc + interpreter but can be easily added on request or you can add it yourself by analogy with the code in ``nrn/src/ivoc/(matrix.c ocmatrix.[ch])`` At this time the MFULL matrix type is complete enough to do useful work and MSPARSE can be used to multiply a matrix by a vector and solve diff --git a/docs/hoc/programming/neuronpython.rst b/docs/hoc/programming/neuronpython.rst deleted file mode 100644 index b207a3e5f3..0000000000 --- a/docs/hoc/programming/neuronpython.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. automodule:: neuron - :members: - -.. seealso:: - - .. toctree:: - :maxdepth: 2 - - ../modelspec/programmatic/rxd.rst - diff --git a/docs/hoc/programming/oop.rst b/docs/hoc/programming/oop.rst index 9740de0fc0..0cb3dae349 100644 --- a/docs/hoc/programming/oop.rst +++ b/docs/hoc/programming/oop.rst @@ -6,6 +6,10 @@ Object Oriented Programming See `Object Oriented Programming `_ in the reference manual. +.. note:: + + Classes defined in HOC may be accessed in Python via ``h.ClassName``. + .. index:: begintemplate (keyword) diff --git a/docs/hoc/programming/pointers/ptrvector.rst b/docs/hoc/programming/pointers/ptrvector.rst index 8e6a9db8b9..270d82fc86 100644 --- a/docs/hoc/programming/pointers/ptrvector.rst +++ b/docs/hoc/programming/pointers/ptrvector.rst @@ -16,8 +16,8 @@ PtrVector all pointers point to an internal dummy variable. So it is possible to scatter from a larger Vector into a smaller Vector. - If :hoc:meth:`CVode.cache_efficient` is used, a callback should be registered - with the :hoc:meth:`PtrVector.ptr_update_callback` method in order to prevent + A callback should be registered with the + :hoc:meth:`PtrVector.ptr_update_callback` method in order to prevent memory segfaults when internal memory is reallocated. Python Example: @@ -125,7 +125,6 @@ PtrVector Description: The statement or pythoncallback is executed whenever range variables - are re-allocated in order to establish cache efficiency. - (see :hoc:meth:`CVode.cache_efficient`) Within the callback, the + are re-allocated. Within the callback, the :hoc:meth:`PtrVector.resize` method may be called but the PtrVector should not be destroyed. diff --git a/docs/hoc/programming/python-from-hoc.rst b/docs/hoc/programming/python-from-hoc.rst new file mode 100644 index 0000000000..911647a4f5 --- /dev/null +++ b/docs/hoc/programming/python-from-hoc.rst @@ -0,0 +1,118 @@ +.. _hoc_python_accessing_hoc: + +HOC accessing Python +~~~~~~~~~~~~~~~~~~~~ + +This section describes how one can interact with Python from HOC code. +For more information about the Python interface to NEURON, see +:ref:`the Python documentation `. + +.. _hoc_function_nrnpython: +.. hoc:function:: nrnpython + + + Syntax: + ``nrnpython("any python statement")`` + + + Description: + Executes any python statement. Returns 1 on success; 0 if an exception + was raised or if python support is not available. + + In particular, ``python_available = nrnpython("")`` is 1 (true) if + python support is available and 0 (false) if python support is not + available. + + Example: + + .. code-block:: + python + + nrnpython("import sys") + nrnpython("print sys.path") + nrnpython("a = [1,2,3]") + nrnpython("print a") + nrnpython("import hoc") + nrnpython("hoc.execute('print PI')") + + + + +---- + + + +.. hoc:class:: PythonObject + + + Syntax: + ``p = new PythonObject()`` + + + Description: + Accesses any python object. Almost equivalent to :hoc:class:`~neuron.hoc.HocObject` in the + python world but because of some hoc syntax limitations, ie. hoc does not + allow an object to be a callable function, and top level indices have + different semantics, we sometimes need to use a special idiom, ie. the '_' + method. Strings and double numbers move back and forth between Python and + Hoc (but Python integers, etc. become double values in Hoc, and when they + get back to the Python world, they are doubles). + + + .. code-block:: + + objref p + p = new PythonObject() + nrnpython("ev = lambda arg : eval(arg)") // interprets the string arg as an + //expression and returns the value + objref tup + print p.ev("3 + 4") // prints 7 + print p.ev("'hello' + 'world'") // prints helloworld + tup = p.ev("('xyz',2,3)") // tup is a PythonObject wrapping a Python tuple + print tup // prints PythonObject[1] + print tup._[2] // the 2th tuple element is 3 + print tup._[0] // the 0th tuple element is xyz + + nrnpython("import hoc") // back in the Python world + nrnpython("h = hoc.HocObject()") // tup is a Python Tuple object + nrnpython("print h.tup") // prints ('xyz', 2, 3) + + Note that one needs the '_' method, equivalent to 'this', because trying to + get at an element through the built-in python method name via + + .. code-block:: python + + tup.__getitem__(0) + + gives the error "TypeError: tuple indices must be integers" since + the Hoc 0 argument is a double 0.0 when it gets into Python. + It is difficult to pass an integer to a Python function from the hoc world. + The only time Hoc doubles appear as integers in Python, is when they are + the value of an index. If the index is not an integer, e.g. a string, use + the __getitem__ idiom. + + .. code-block:: + + objref p + p = new PythonObject() + nrnpython("ev = lambda arg : eval(arg)") + objref d + d = p.ev("{'one':1, 'two':2, 'three':3}") + print d.__getitem__("two") // prints 2 + + objref dg + dg = d.__getitem__ + print dg._("two") // prints 2 + + + To assign a value to a python variable that exists in a module use + + .. code-block:: + + nrnpython("a = 10") + p = new PythonObject() + p.a = 25 + p.a = "hello" + p.a = new Vector(4) + nrnpython("b = []") + p.a = p.b diff --git a/docs/hoc/programming/python.rst b/docs/hoc/programming/python.rst deleted file mode 100644 index 35686fcbe6..0000000000 --- a/docs/hoc/programming/python.rst +++ /dev/null @@ -1,877 +0,0 @@ - -.. _hoc_python: - - -Python Language ---------------- - -This document describes installation and basic use of NEURON's Python interface. For information on the modules in the ``neuron`` namespace, see: - -.. toctree:: :maxdepth: 1 - - neuronpython.rst - - -Installation -~~~~~~~~~~~~ - - -Syntax: - ``./configure --with-nrnpython ...`` - - ``make`` - - ``make install`` - - -Description: - Builds NEURON with Python embedded as an alternative interpreter to HOC. - The python version used is that found from ``which python``. - - NEURON can be used as an extension to Python if, after building as above, - one goes to the src/nrnpython directory containing the Makefile and types - something analogous to - - .. code-block:: - none - - python setup.py install --home=$HOME - - Which on my machine installs in :file:`/home/hines/lib64/python/neuron` - and can be imported into NEURON with - - .. code-block:: - python - - ipython - import sys - sys.path.append("/home/hines/lib64/python") - import neuron - - It is probably better to avoid the incessant ``import sys``... and instead - add to your shell environment something analogous to - - .. code-block:: - none - - export PYTHONPATH=$PYTHONPATH:/home/hines/lib64/python - - since when launching NEURON and embedding Python, the path is automatically - defined so that ``import neuron`` does not require any prerequisites. - If there is a ``@/.libs/libnrnmech.so`` file in your working - directory, those nmodl mechanisms will be loaded as well. - After this, you will probably want to: - - .. code-block:: - python - - h = neuron.h # neuron imports hoc and does a h = hoc.HocObject() - - In the past we also recommended an "import nrn" but this is no longer - necessary as everything in that module is also directly available from - the "h" object. - You can use the hoc function :hoc:func:`nrn_load_dll` to load mechanism files - as well, e.g. if neurondemo was used earlier so the shared object exists, - - .. code-block:: - python - - h = hoc.HocObject() - h('nrn_load_dll("$(NEURONHOME)/demo/release/x86_64/.libs/libnrnmech.so")') - - - -.. _hoc_python_accessing_hoc: - -Python Accessing HOC -~~~~~~~~~~~~~~~~~~~~ - - - -Syntax: - ``nrniv -python [file.hoc file.py -c "python_statement"]`` - - ``nrngui -python ...`` - - ``neurondemo -python ...`` - - -Description: - Launches NEURON with Python as the command line interpreter. - File arguments with a .hoc suffix are interpreted using the - Hoc interpreter. File arguments with the .py suffix are interpreted - using the Python interpreter. The -c statement causes python to - execute the statement. - The import statements allow use of the following - - - ----- - - - -.. hoc:method:: neuron.hoc.execute - - - Syntax: - ``import neuron`` - - ``neuron.hoc.execute('any hoc statement')`` - - - Description: - Execute any statement or expression using the Hoc interpreter. This is - obsolete since the same thing can be accomplished with HocObject with - less typing. - Note that triple quotes can be used for multiple line statements. - A '\n' should be escaped as '\\n'. - - .. code-block:: - python - - hoc.execute('load_file("nrngui.hoc")') - - - .. seealso:: - :hoc:func:`nrnpython` - - - ----- - - - -.. hoc:class:: neuron.hoc.HocObject - - - Syntax: - ``import neuron`` - - ``h = neuron.hoc.HocObject()`` - - - Description: - Allow access to anything in the Hoc interpreter. - Note that ``h = neuron.h`` is the typical statement used since the - neuron module creates an h field. - When created via ``hoc.HocObject()`` its print string is "TopLevelHocInterpreter". - - .. code-block:: - python - - h("any hoc statement") - - is the same as hoc.execute(...) - - Any hoc variable or string in the Hoc world can be accessed - in the Python world: - - .. code-block:: - python - - h('strdef s') - h('{x = 3 s = "hello"}') - print h.x # prints 3.0 - print h.s # prints hello - - And if it is assigned a value in the python world it will be that value - in the Hoc world. (Note that any numeric python type becomes a double - in Hoc.) - - .. code-block:: - python - - h.x = 25 - h.s = 'goodbye' - h('print x, s') #prints 25 goodbye - - - Any hoc object can be handled in Python. - - .. code-block:: - python - - h('objref vec') - h('vec = new Vector(5)') - print h.vec # prints Vector[0] - print h.vec.size() # prints 5.0 - - Note that any hoc object method or field may be called, or evaluated/assigned - using the normal dot notation which is consistent between hoc and python. - However, hoc object methods MUST have the parentheses or else the Python - object is not the return value of the method but a method object. ie. - - .. code-block:: - python - - x = h.vec.size # not 5 but a python callable object - print x # prints: Vector[0].size() - print x() # prints 5.0 - - This is also true for indices - - .. code-block:: - python - - h.vec.indgen().add(10) # fills elements with 10, 11, ..., 14 - print h.vec.x[2] # prints 12.0 - x = h.vec.x # a python indexable object - print x # prints Vector[0].x[?] - print x[2] # prints 12.0 - - The hoc object can be created directly in Python. E.g. - - .. code-block:: - python - - v = h.Vector(10).indgen.add(10) - - - Iteration over hoc Vector, List, and arrays is supported. e.g. - - .. code-block:: - python - - v = h.Vector(4).indgen().add(10) - for x in v : - print x - - l = h.List() ; l.append(v); l.append(v); l.append(v) - for x in l : - print x - - h('objref o[2][3]') - for x in h.o : - for y in x : - print x, y - - - - Any hoc Section can be handled in Python. E.g. - - .. code-block:: - python - - h('create soma, axon') - ax = h.axon - - makes ax a Python :hoc:class:`~neuron.h.Section` which references the hoc - axon section. Many hoc functions require a currently accessed section - and for these a typical idiom is - - .. code-block:: - python - - ax.push() ; print secname() ; h.pop_section() - - More compact is to use the "sec" keyword parameter after the last positional - parameter which makes the Section value the currently accessed section during - the scope of the function call. e.g - - .. code-block:: - python - - print secname(sec=ax) - - - Point processes are handled by direct object creation as in - - .. code-block:: - python - - stim = IClamp(1.0, sec = ax) - // or - stim = IClamp(ax(1.0)) - - The latter is a somewhat simpler idiom that uses the Segment object which knows both the - section and the location in the section and can also be used with the - stim.loc function. - - Many hoc functions use call by reference and return information by - changing the value of an argument. These are called from the python - world by passing a HocObject.ref() object. Here is an example that - changes a string. - - .. code-block:: - python - - h('proc chgstr() { $s1 = "goodbye" }') - s = h.ref('hello') - print s[0] # notice the index to dereference. prints hello - h.chgstr(s) - print s[0] # prints goodbye - h.sprint(s, 'value is %d', 2+2) - print s[0] # prints value is 4 - - and here is an example that changes a pointer to a double - - .. code-block:: - python - - h('proc chgval() { $&1 = $2 }') - x = h.ref(5) - print x[0] # prints 5.0 - h.chgval(x, 1+1) - print x[0] # prints 2.0 - - Finally, here is an example that changes a objref arg. - - .. code-block:: - python - - h('proc chgobj() { $o1 = new List() }') - v = h.ref([1,2,3]) # references a Python object - print v[0] # prints [1, 2, 3] - h.chgobj(v) - print v[0] # prints List[0] - - Unfortunately, the HocObject.ref() is not often useful since it is not really - a pointer to a variable. For example consider - - .. code-block:: - python - - h('x = 1') - y = h.ref(h.x) - print y # prints hoc ref value 1 - print h.x, y[0] # prints 1.0 1.0 - h.x = 2 - print h.x, y[0] # prints 2.0 1.0 - - and thus in not what is needed in the most common - case of a hoc function holding a pointer to a variable such as - :hoc:meth:`Vector.record` or :hoc:meth:`Vector.play`. For this one needs the :samp:`_ref_{varname}` idiom - which works for any hoc variable and acts exactly like a c pointer. eg: - - .. code-block:: - python - - h('x = 1') - y = h._ref_x - print y # prints pointer to hoc value 1 - print h.x, y[0] # prints 1.0 1.0 - h.x = 2 - print h.x, y[0] # prints 2.0 2.0 - y[0] = 3 - print h.x, y[0] # prints 3.0 3.0 - - Of course, this works only for hoc variables, not python variables. For - arrays, use all the index arguments and prefix the name with _ref_. The - pointer will be to the location indexed and one may access any element - beyond the location by giving one more non-negative index. No checking - is done with regard to array bounds errors. e.g - - .. code-block:: - python - - v = h.Vector(4).indgen().add(10) - y = v._ref_x[1] # holds pointer to second element of v - print v.x[2], y[1] # prints 12.0 12.0 - y[1] = 50 - v.printf() # prints 10 11 50 13 - - The idiom is used to record from (or play into) voltage and mechanism variables. eg - - .. code-block:: - python - - v = h.Vector() - v.record(h.soma(.5)._ref_v, sec = h.soma) - pi = h.Vector() - pi.record(h.soma(.5).pas._ref_i, sec = h.soma) - ip = h.Vector() - ip.record(h.soma(.5)._ref_i_pas, sec = h.soma) - - - The factory idiom is one way to create Hoc objects and use them - in Python. - - .. code-block:: - python - - h('obfunc newvec() { return new Vector($1) }') - v = h.newvec(10).indgen().add(10) - v.printf() # prints 10 11 ... 19 (not 10.0 ... since printf is a hoc function) - - but that idiom is more or less obsolete as the same thing can be accomplished - directly as shown a few fragments back. Also consider the minimalist - - .. code-block:: - python - - vt = h.Vector - v = vt(4).indgen().add(10) - - Any Python object can be stored in a Hoc List. It is more efficient - when navigating the List to use a python callable that avoids repeated - lookup of a Hoc method symbol. Note that in the Hoc world a python object - is of type PythonObject but python strings and scalars are translated back - and forth as strdef and scalar doubles respectively. - - .. code-block:: - python - - h('obfunc newlist() { return new List() }') - list = h.newlist() - apnd = list.append - apnd([1,2,3]) # Python list in hoc List - apnd(('a', 'b', 'c')) # Python tuple in hoc List - apnd({'a':1, 'b':2, 'c':3}) # Python dictionary in hoc List - item = list.object - for i in range(0, int(list.count())) : # notice the irksome cast to int. - print item(i) - - h('for i=0, List[0].count-1 print List[0].object(i)') - - - To see all the methods available for a hoc object, use, for example, - - .. code-block:: - python - - dir(h.Vector) - - - h.anyclass can be subclassed with - - .. code-block:: - python - - class MyVector(neuron.hclass(neuron.h.Vector)) : - pass - v = MyVector(10) - v.zzz = 'hello' # a new attribute - print v.size() # call any base method - - If you override a base method such as 'size' use - - .. code-block:: - python - - v.baseattr('size')() - - to access the base method. Multiple inheritance involving hoc classes - probably does not make sense. - If you override the __init__ procedure when subclassing a Section, - be sure to explicitly - initialize the Section part of the instance with - - .. code-block:: - python - - nrn.Section.__init__() - - - Since nrn.Section is a standard Python class one can - subclass it normally with - - .. code-block:: - python - - class MySection(neuron.nrn.Section): - pass - - - The hoc setpointer statement is effected in Python as a function call - with a syntax for POINT_PROCESS and SUFFIX (density)mechanisms respectively - of - - .. code-block:: - python - - h.setpointer(_ref_hocvar, 'POINTER_name', point_proces_object) - h.setpointer(_ref_hocvar, 'POINTER_name', nrn.Mechanism_object) - - See :file:`nrn/share/examples/nrniv/nmodl/`\ (:file:`tstpnt1.py` and :file:`tstpnt2.py`) for - examples of usage. For a density mechanism, the 'POINTER_name' cannot - have the SUFFIX appended. For example if a mechanism with suffix foo has - a POINTER bar and you want it to point to t use - - .. code-block:: - python - - h.setpointer(_ref_t, 'bar', sec(x).foo) - - - - .. seealso:: - :hoc:meth:`Vector.to_python`, :hoc:meth:`Vector.from_python` - - - ----- - - - -.. hoc:method:: neuron.hoc.hoc_ac - - - Syntax: - ``import hoc`` - - ``double_value = hoc.hoc_ac()`` - - ``hoc.hoc_ac(double_value)`` - - - Description: - Get and set the hoc global scalar, :hoc:data:`hoc_ac_`-variables. - This is obsolete since HocObject - is far more general. - - .. code-block:: - python - - import hoc - hoc.hoc_ac(25) - hoc.execute('print hoc_ac_') # prints 25 - hoc.execute('hoc_ac_ = 17') - print hoc.hoc_ac() # prints 17 - - - - ----- - - - -.. hoc:method:: neuron.h.cas - - - Syntax: - ``sec = h.cas()`` - - ``or`` - - ``import nrn`` - - ``sec = nrn.cas()`` - - - Description: - Returns the :ref:`currently accessed section ` as a Python - :hoc:class:`~neuron.h.Section` object. - - .. code-block:: - python - - import neuron - neuron.h(''' - create soma, dend[3], axon - access dend[1] - ''') - - sec = h.cas() - print sec, sec.name() - - - - ----- - - - -.. hoc:class:: neuron.h.Section - - - Syntax: - ``sec = h.Section()`` - - ``sec = h.Section([name='string', [cell=self])`` - - ``or`` - - ``import nrn`` - - ``sec = nrn.Section()`` - - - Description: - The Python Section object allows modification and evaluation of the - information associated with a NEURON :ref:`hoc_geometry_section`. The typical way to get - a reference to a Section in Python is with :hoc:meth:`neuron.h.cas` or - by using the hoc section name as in ``asec = h.dend[4]``. - The ``sec = Section()`` will create an anonymous Section with a hoc name - constructed from "Section" and the Python reference address. - Access to Section variables is through standard dot notation. - The "anonymous" python section can be given a name with the named - parameter and/or associated with a cell object using the named cell parameter. - Note that a cell association is required if one anticipates using the - :hoc:meth:`~ParallelContext.gid2cell` method of :hoc:class:`ParallelContext`. - - .. code-block:: - python - - import neuron - h = neuron.h - sec = h.Section() - print sec # prints - print sec.name() # prints PySec_2a96982108 - sec.nseg = 3 # section has 3 segments (compartments) - sec.insert("hh") # all compartments have the hh mechanism - sec.L = 20 # Length of the entire section is 20 um. - for seg in sec : # iterates over the section compartments - for mech in seg : # iterates over the segment mechanisms - print sec.name(), seg.x, mech.name() - - A Python Section can be made the currently accessed - section by using its push method. Be sure to use :hoc:func:`pop_section` - when done with it to restore the previous currently accessed section. - I.e, given the above fragment, - - .. code-block:: - python - - from neuron import h - h(''' - objref p - p = new PythonObject() - {p.sec.push() psection() pop_section()} - ''') - #or - sec.push() - h.secname() - h.psection() - h.pop_section() - - When calling a hoc function it is generally preferred to named sec arg style - to automatically push and pop the section stack during the scope of the - hoc function. ie - - .. code-block:: - python - - h.psection(sec=sec) - - - With a :hoc:class:`SectionRef` one can, for example, - - .. code-block:: - python - - h.dend[2].push() ; sr = h.SectionRef() ; h.pop_section() - sr.root.push() ; print h.secname() ; h.pop_section() - - or, more compactly, - - .. code-block:: - python - - sr = h.SectionRef(sec=h.dend[2]) - print sr.root.name(), h.secname(sec=sr.root) - - - Iteration over sections is accomplished with - - .. code-block:: - python - - for s in h.allsec() : - print h.secname() - - sl = h.SectionList() ; sl.wholetree() - for s in sl : - print h.secname() - - - - Connecting a child section to a parent section uses the connect method - using either - - .. code-block:: - python - - childsec.connect(parentsec, parentx, childx) - childsec.connect(parentsegment, childx) - - In the first form parentx and childx are optional with default values of - 1 and 0 respectively. Parentx must be 0 or 1. In the second form, childx - is optional and by default is 0. The parentsegment must be either - parentsec(0) or parentsec(1). - - sec.cell() returns the cell object that 'owns' the section. The return - value is None if no object owns the section (a top level section), the - instance of the hoc template that created the section, or the python - object specified by the named cell parameter - when the python section was created. - - ----- - - - -Segment -======= - - Syntax: - ``seg = section(x)`` - - - Description: - A Segment object is obtained from a Section with the function notation where - the argument is 0 <= x <= 1 an the segment is the compartment that contains - the location x. The x value of the segment is seg.x and the section is - seg.sec . From a Segment one can obtain a Mechanism. - - - ----- - - - -Mechanism -========= - - - Syntax: - ``mech = segment.mechname`` - - - Description: - A Mechanism object is obtained from a Segment. From a Mechanism one can - obtain a range variable. The range variable can also be obtained from the - segment by using the hoc range variable name that has the mechanism suffix. - - - ----- - - -.. _hoc_Hoc_accessing_Python: - -HOC accessing Python -~~~~~~~~~~~~~~~~~~~~ - - - Syntax: - ``nrniv [file.hoc...]`` - - - Description: - The absence of a -python argument causes NEURON to launch with Hoc - as the command line interpreter. At present, no :file:`file.py` arguments - are allowed as all named files are treated as hoc files. Nevertheless, - from the hoc world any python statement can be executed and anything - in the python world can be assigned or evaluated. - - ----- - - - -.. hoc:function:: nrnpython - - - Syntax: - ``nrnpython("any python statement")`` - - - Description: - Executes any python statement. Returns 1 on success; 0 if an exception - was raised or if python support is not available. - - In particular, ``python_available = nrnpython("")`` is 1 (true) if - python support is available and 0 (false) if python support is not - available. - - Example: - - .. code-block:: - python - - nrnpython("import sys") - nrnpython("print sys.path") - nrnpython("a = [1,2,3]") - nrnpython("print a") - nrnpython("import hoc") - nrnpython("hoc.execute('print PI')") - - - - ----- - - - -.. hoc:class:: PythonObject - - - Syntax: - ``p = new PythonObject()`` - - - Description: - Accesses any python object. Almost equivalent to :hoc:class:`~neuron.hoc.HocObject` in the - python world but because of some hoc syntax limitations, ie. hoc does not - allow an object to be a callable function, and top level indices have - different semantics, we sometimes need to use a special idiom, ie. the '_' - method. Strings and double numbers move back and forth between Python and - Hoc (but Python integers, etc. become double values in Hoc, and when they - get back to the Python world, they are doubles). - - - .. code-block:: - python - - objref p - p = new PythonObject() - nrnpython("ev = lambda arg : eval(arg)") // interprets the string arg as an - //expression and returns the value - objref tup - print p.ev("3 + 4") // prints 7 - print p.ev("'hello' + 'world'") // prints helloworld - tup = p.ev("('xyz',2,3)") // tup is a PythonObject wrapping a Python tuple - print tup // prints PythonObject[1] - print tup._[2] // the 2th tuple element is 3 - print tup._[0] // the 0th tuple element is xyz - - nrnpython("import hoc") // back in the Python world - nrnpython("h = hoc.HocObject()") // tup is a Python Tuple object - nrnpython("print h.tup") // prints ('xyz', 2, 3) - - Note that one needs the '_' method, equivalent to 'this', because trying to - get at an element through the built-in python method name via - - .. code-block:: - python - - tup.__getitem__(0) - - gives the error "TypeError: tuple indices must be integers" since - the Hoc 0 argument is a double 0.0 when it gets into Python. - It is difficult to pass an integer to a Python function from the hoc world. - The only time Hoc doubles appear as integers in Python, is when they are - the value of an index. If the index is not an integer, e.g. a string, use - the __getitem__ idiom. - - .. code-block:: - python - - objref p - p = new PythonObject() - nrnpython("ev = lambda arg : eval(arg)") - objref d - d = p.ev("{'one':1, 'two':2, 'three':3}") - print d.__getitem__("two") // prints 2 - - objref dg - dg = d.__getitem__ - print dg._("two") // prints 2 - - - To assign a value to a python variable that exists in a module use - - .. code-block:: - python - - nrnpython("a = 10") - p = new PythonObject() - p.a = 25 - p.a = "hello" - p.a = new Vector(4) - nrnpython("b = []") - p.a = p.b - - - - diff --git a/docs/hoc/simctrl/cvode.rst b/docs/hoc/simctrl/cvode.rst index 00940f6a26..078ef6ab37 100644 --- a/docs/hoc/simctrl/cvode.rst +++ b/docs/hoc/simctrl/cvode.rst @@ -1347,21 +1347,14 @@ CVode Description: - When set, G*v = R matrix and vectors are reallocated in tree order so that - all the elements of each type are contiguous in memory. Pointers to these - elements used by the GUI, Vector, Pointer, etc. are updated. - - Much of the implementation was contributed by Hubert Eichner - - .. code-block:: - none - - - - - :hoc:meth:`ParallelContext.multisplit` automatically sets cache_efficient(1) + Deprecated method. + This used to cause the G*v = R matrix and vectors to be reallocated in + tree order so that all the elements of each type are contiguous in + memory. + This is no longer required because this scheme is now used all the time + and cannot be disabled. + Pointers to these elements used by the GUI, Vector, Pointer, etc. are updated. - ---- diff --git a/docs/index.rst b/docs/index.rst index 01b4183711..00d8791a9e 100644 --- a/docs/index.rst +++ b/docs/index.rst @@ -8,7 +8,8 @@ NEURON is a simulator for neurons and networks of neurons that runs efficiently Build and simulate models using Python, HOC, and/or NEURON's graphical interface. From this page you can watch :ref:`recorded NEURON classes `, read the :ref:`Python ` or :ref:`HOC ` programmer's references, `browse the NEURON forum `_, -explore the `source code for over 750 NEURON models on ModelDB `_, and more (use the links on the side or search). +explore the `source code for over 800 NEURON models on ModelDB `_, +and more (use the links on the side or search). .. toctree:: :maxdepth: 1 @@ -38,6 +39,7 @@ explore the `source code for over 750 NEURON models on ModelDB `_. + Alternatively, you can use the `PKG installer `_. - For troubleshooting, see the `detailed installation instructions `_. + For troubleshooting, see the `detailed installation instructions `_. .. tab-item:: Linux - The recommended installation is to: + The recommended installation is to open a terminal and type: .. code:: pip3 install neuron - For troubleshooting, see the `detailed installation instructions `_. + For troubleshooting, see the `detailed installation instructions `_. .. tab-item:: Windows - `Download the Windows Installer `_. + `Download the Windows Installer `_. You can also install the Linux wheel via the Windows Subsystem for Linux (WSL). See `instructions `_. - For troubleshooting, see the `detailed installation instructions `_. + For troubleshooting, see the `detailed installation instructions `_. .. tab-item:: Cloud diff --git a/docs/install/code_coverage.md b/docs/install/code_coverage.md index dca1669dd1..ea63d2ec03 100644 --- a/docs/install/code_coverage.md +++ b/docs/install/code_coverage.md @@ -17,7 +17,7 @@ Clone the nrn repository and get ready to build. ``` Note: A simplified workflow is supported via the cmake option -[-DNRN_ENABLE_COVERAGE=ON](../cmake_doc/options.html#nrn-enable-coverage-bool-off) +[-DNRN_ENABLE_COVERAGE=ON](../cmake_doc/options.rst#nrn-enable-coverage-bool-off) that removes the need to be concerned with COVERAGE_FLAGS and explicit use of lcov and genhtml by providing the make targets ``make cover_begin`` and ``make cover_html``. See [Simplified Workflow](#simplified-workflow) below. diff --git a/docs/install/debug.md b/docs/install/debug.md index 098fb577f4..876193a609 100644 --- a/docs/install/debug.md +++ b/docs/install/debug.md @@ -25,14 +25,14 @@ while continuing to experience the error, it may be worthwhile to look into [LLVM address sanitizer](https://github.com/neuronsimulator/nrn/issues/1213). #### NaN or Inf values -Use [h.nrn_feenableexcept(1)](../python/programming/errors.html#nrn_fenableexcept) +Use [h.nrn_feenableexcept(1)](../python/programming/errors.rst#nrn_feenableexcept) to generate floating point exception for DIVBYZERO, INVALID, OVERFLOW, exp(700). [GDB](#GDB) can then be used to show where the SIGFPE occurred. #### Different results with different nhost or nthread. What is the gid and spiketime of the earliest difference? -Use [ParallelContext.prcellstate](../python/modelspec/programmatic/network/parcon.html?highlight=prcellstate#ParallelContext.prcellstate) +Use [ParallelContext.prcellstate](../python/modelspec/programmatic/network/parcon.rst#ParallelContext.prcellstate) for that gid at various times before spiketime to see why and when the prcellstate files become different. Time 0 after initialization is often a good place to start. @@ -128,8 +128,8 @@ the next valgrind error. #### Sanitizers -The `AddressSanitizer` (ASan), `LeakSanitizer` (LSan), `ThreadSanitizer` (TSan, -see below) and `UndefinedBehaviorSanitizer` (UBSan) are a collection of tools +The `AddressSanitizer` (ASan), `LeakSanitizer` (LSan), `ThreadSanitizer` (TSan) +and `UndefinedBehaviorSanitizer` (UBSan) are a collection of tools that rely on compiler instrumentation to catch dangerous behaviour at runtime. Compiler support is widespread but not ubiquitous, but both Clang and GCC provide support. @@ -147,13 +147,14 @@ The typical example of this case is loading NEURON from Python, where the As of [#1842](https://github.com/neuronsimulator/nrn/pull/1842), the NEURON build system is aware of ASan, LSan and UBSan, and it tries to configure the sanitizers correctly based on the `NRN_SANITIZERS` CMake variable. +In [#2034](https://github.com/neuronsimulator/nrn/pull/2034) this was extended +to TSan via `-DNRN_SANITIZERS=thread`, and support for GCC was added. For example, `cmake -DNRN_SANITIZERS=address,leak ...` will enable ASan and LSan, while `-DNRN_SANITIZERS=undefined` will enable UBSan. -Not all combinations of sanitizers are possible, so far ASan, ASan+LSan and -UBSan have been tested with Clang, GCC has not been tested and likely needs -minor changes. -Support for standalone LSan and for TSan (see below for manual instructions) -should be possible without major difficulties, but is not yet implemented. +Not all combinations of sanitizers are possible, so far ASan, ASan+LSan, TSan +and UBSan have been tested with Clang. +Support for standalone LSan should be possible without major difficulties, but +is not yet implemented. Depending on your system, you may also need to set the `LLVM_SYMBOLIZER_PATH` variable to point to the `llvm-symbolizer` executable matching your Clang. @@ -176,59 +177,30 @@ use `nrn-enable-sanitizer special -python path/to/script.py` or because the `python` binary is (presumably) not linked against the sanitizer runtime library. -LSan and UBSan support suppression files, which can be used to prevent tests -failing due to known issues. -NEURON includes a suppression file for UBSan under `.sanitizers/undefined.supp` -in the GitHub repository, no LSan equivalent exists for the moment. +LSan, TSan and UBSan support suppression files, which can be used to prevent +tests failing due to known issues. +NEURON includes a suppression file for TSan under `.sanitizers/thread.supp` and +one for UBSan under `.sanitizers/undefined.supp` in the GitHub repository, no +LSan equivalent exists for the moment. Note that LSan and MPI implementations typically do not play nicely together, so if you want to use LSan with NEURON, you may need to disable MPI or add a suppression file that is tuned to your MPI implementation. -The GitHub Actions CI for NEURON at the time of writing includes test jobs for -ASan and UBSan using Clang 14, but does not enable LSan. +Similarly, TSan does not work very well with MPI and (especially) OpenMP +implementations that were not compiled with TSan instrumentation (which they +are typically not). -CoreNEURON and NMODL both support the sanitizers in a similar way, but this has -to be enabled explicitly: `-DNRN_SANITIZERS=undefined` will not compile -CoreNEURON code with UBSan enabled, you must additionally pass -`-DCORENRN_SANITIZERS=undefined` to enable instrumentation of CoreNEURON code. -The equivalent variable for NMODL is `NMODL_SANITIZERS`. - -#### ThreadSanitizer (TSAN) -`ThreadSanitizer` is a tool that detects data races. Be aware that a slowdown is incurred by using ThreadSanitizer of about 5x-15x, with typical memory overhead of about 5x-10x. - -Here is how to enable it: -``` -cmake ... -DNRN_ENABLE_TESTS=ON -DCMAKE_C_FLAGS="-O0 -fno-inline -g -fsanitize=thread" -DCMAKE_CXX_FLAGS="-O0 -fno-inline -g -fsanitize=thread" .. -``` -You can then target a specific test (for example `ctest -VV -R test_name` or `bin/nrniv -nogui -nopython test.hoc`) and have a look at the generated output. In case of data races, you would see something similar to: -``` -94: WARNING: ThreadSanitizer: data race (pid=2572) -94: Read of size 8 at 0x7b3c00000bf0 by thread T1: -94: #0 Cvode::at_time(double, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvodeobj.cpp:751 (libnrniv.so+0x38673e) -94: #1 at_time /home/savulesc/Workspace/nrn/src/nrncvode/cvodestb.cpp:133 (libnrniv.so+0x389e27) -94: #2 _nrn_current__IClamp /home/savulesc/Workspace/nrn/src/nrnoc/stim.c:266 (libnrniv.so+0x5b8f02) -94: #3 _nrn_cur__IClamp /home/savulesc/Workspace/nrn/src/nrnoc/stim.c:306 (libnrniv.so+0x5b9236) -94: #4 Cvode::rhs_memb(CvMembList*, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvtrset.cpp:68 (libnrniv.so+0x38a0eb) -94: #5 Cvode::rhs(NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvtrset.cpp:35 (libnrniv.so+0x38a2f6) -94: #6 Cvode::fun_thread_transfer_part2(double*, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/occvode.cpp:671 (libnrniv.so+0x3bbbf1) -94: #7 Cvode::fun_thread(double, double*, double*, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/occvode.cpp:639 (libnrniv.so+0x3bd049) -94: #8 f_thread /home/savulesc/Workspace/nrn/src/nrncvode/cvodeobj.cpp:1532 (libnrniv.so+0x384f45) -94: #9 slave_main /home/savulesc/Workspace/nrn/src/nrnoc/multicore.cpp:337 (libnrniv.so+0x5157ee) -94: -94: Previous write of size 8 at 0x7b3c00000bf0 by main thread: -94: #0 Cvode::at_time(double, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvodeobj.cpp:753 (libnrniv.so+0x386759) -94: #1 at_time /home/savulesc/Workspace/nrn/src/nrncvode/cvodestb.cpp:133 (libnrniv.so+0x389e27) -94: #2 _nrn_current__IClamp /home/savulesc/Workspace/nrn/src/nrnoc/stim.c:266 (libnrniv.so+0x5b8f02) -94: #3 _nrn_cur__IClamp /home/savulesc/Workspace/nrn/src/nrnoc/stim.c:306 (libnrniv.so+0x5b9236) -94: #4 Cvode::rhs_memb(CvMembList*, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvtrset.cpp:68 (libnrniv.so+0x38a0eb) -94: #5 Cvode::rhs(NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/cvtrset.cpp:35 (libnrniv.so+0x38a2f6) -94: #6 Cvode::fun_thread_transfer_part2(double*, NrnThread*) /home/savulesc/Workspace/nrn/src/nrncvode/occvode.cpp:671 (libnrniv.so+0x3bbbf1) -.............................................................. -94: SUMMARY: ThreadSanitizer: data race /home/savulesc/Workspace/nrn/src/nrncvode/cvodeobj.cpp:751 in Cvode::at_time(double, NrnThread*) -94: ================== -``` +The GitHub Actions CI for NEURON at the time of writing includes three jobs +using Ubuntu 22.04: ASan (but not LSan) using Clang 14, UBSan using Clang 14, +and TSan using GCC 12. +In addition, there is a macOS-based ASan build using AppleClang, which has the +advantage that it uses `libc++` instead of `libstdc++`. +NMODL supports the sanitizers in a similar way, but this has to be enabled +explicitly: `-DNRN_SANITIZERS=undefined` will not compile NMODL code with UBSan +enabled, you must additionally pass `-DNMODL_SANITIZERS=undefined` to enable +instrumentation of NMODL code. Profiling and performance benchmarking -------------------------------------- diff --git a/docs/install/install_instructions.md b/docs/install/install_instructions.md index 31a3d30086..7c154c20f2 100644 --- a/docs/install/install_instructions.md +++ b/docs/install/install_instructions.md @@ -8,7 +8,7 @@ Windows platforms. #### Mac OS -Since version 7.8.1 we are providing Python wheels and NEURON can be installed using `pip` as: +Since version 7.8.1 we are providing Python wheels and NEURON can be installed using `pip` by opening a Terminal (Press `⌘` + `Space` and type "terminal") and typing: ``` pip3 install neuron @@ -107,7 +107,7 @@ architecture. #### Linux -Like Mac OS, since 7.8.1 release python wheels are provided and you can use `pip` to install NEURON as: +Like Mac OS, since 7.8.1 release python wheels are provided and you can use `pip` to install NEURON by opening a terminal and typing: ``` pip3 install neuron @@ -199,16 +199,23 @@ In order to build NEURON from source, the following packages must be available: - Bison - Flex >= 2.6 -- C/C++ compiler suite supporting C++17 -- CMake 3.15.0 +- C/C++ compiler suite supporting C++17 (e.g. GCC >=9.3.1, Clang >= 11.0.0) + - Note that some C++17 features require a newer compiler version. + - C++17 features must be available without linking extra libraries. This notably excludes some older versions of GCC where `std::filesystem` required `libstdc++fs.so`. +- CMake >= 3.15 (>= 3.18 if ``-DNRN_ENABLE_PYTHON_DYNAMIC=ON``) The following packages are optional (see build options): - Python >=3.8 (for Python interface) -- Cython (for RXD) +- Cython < 3 (for RXD) - MPI (for parallel) - X11 (Linux) or XQuartz (MacOS) (for GUI) +Note that you may have to force Cython version: +```bash +pip install "cython<3" +``` + Depending on platform you can install these dependencies as follows: @@ -296,80 +303,20 @@ We recommend using platform specific instructions provided in [nrn-build-ci](htt Starting with the 7.8.1 release, NEURON can be installed using the [CMake build system](https://cmake.org/). One of the primary advantages of a CMake-based build system is cross-platform support and integration with -other projects like [Interviews](https://github.com/neuronsimulator/iv), [CoreNEURON](https://github.com/BlueBrain/CoreNeuron/), +other projects like [Interviews](https://github.com/neuronsimulator/iv), [CoreNEURON](https://github.com/neuronsimulator/nrn/tree/master/src/coreneuron), [NMODL](https://github.com/BlueBrain/nmodl/) etc. These projects are now integrated into single a CMake-based -build system, and they can be installed together as shown below: - -1. Clone the latest version or specific release: - - ``` - git clone https://github.com/neuronsimulator/nrn # latest development branch - git clone https://github.com/neuronsimulator/nrn -b 8.0.0 # specific release version 8.0.0 - cd nrn - ``` - - > :warning: To build NEURON from source you either need to clone the NEURON Git repository or download a - > source code archive that includes Git submodules, such as the `full-src-package-X.Y.Z.tar.gz` file in - > the [NEURON releases](https://github.com/neuronsimulator/nrn/releases) on GitHub. The tarballs like - > `Source code (tar.gz)` or `Source code (zip)` created by GitHub are incomplete. - - -2. Create a build directory: - - ``` - mkdir build - cd build - ``` - -3. Run `cmake` with the appropriate options (see below for a list of common options). A full list of options -can be found in `nrn/CMakeLists.txt` and defaults are shown in `nrn/cmake/BuildOptionDefaults.cmake`. e.g. a bare-bones installation: - - - ``` - cmake .. \ - -DNRN_ENABLE_INTERVIEWS=OFF \ - -DNRN_ENABLE_MPI=OFF \ - -DNRN_ENABLE_RX3D=OFF \ - -DPYTHON_EXECUTABLE=$(which python3) \ - -DCMAKE_INSTALL_PREFIX=/path/to/install/directory - ``` - -4. Build the code: - - ``` - cmake --build . --parallel 8 --target install - ``` - Feel free to set the number of parallel jobs (i.e. 8) according to your system using the `--parallel` option. - -5. Set PATH and PYTHONPATH environmental variables to use the installation: - - ``` - export PATH=/path/to/install/directory/bin:$PATH - export PYTHONPATH=/path/to/install/directory/lib/python:$PYTHONPATH - ``` - -Particularly useful CMake options are (use **ON** to enable and **OFF** to disable feature): - -* **-DNRN\_ENABLE\_INTERVIEWS=OFF** : Disable Interviews (native GUI support) -* **-DNRN\_ENABLE\_PYTHON=OFF** : Disable Python support -* **-DNRN\_ENABLE\_MPI=OFF** : Disable MPI support for parallelization -* **-DNRN\_ENABLE\_RX3D=OFF** : Disable rx3d support -* **-DNRN\_ENABLE\_CORENEURON=ON** : Enable CoreNEURON support -* **-DNRN\_ENABLE\_TESTS=ON** : Enable unit tests -* **-DPYTHON\_EXECUTABLE=/python/binary/path** : Use provided Python binary to build Python interface -* **-DCMAKE_INSTALL_PREFIX=/install/dir/path** : Location for installing -* **-DCORENRN\_ENABLE\_NMODL=ON** : Use [NMODL](https://github.com/BlueBrain/nmodl/) instead of [MOD2C](https://github.com/BlueBrain/mod2c/) for code generation with CoreNEURON +build system. -Please refer to [docs/cmake_doc/options.rst](docs/cmake_doc/options.rst) for more information on -the CMake options. +Please refer to [the CMake build system +options](../cmake_doc/options.rst) for more information on how to +install the project using ``CMake``. #### Optimized CPU and GPU Support using CoreNEURON -NEURON now integrates [CoreNEURON library](https://github.com/BlueBrain/CoreNeuron/) for improved simulation +NEURON now integrates [CoreNEURON library](https://github.com/neuronsimulator/nrn/blob/master/src/coreneuron) for improved simulation performance on modern CPU and GPU architectures. CoreNEURON is designed as a library within the NEURON simulator and can transparently handle all spiking network simulations including gap junction coupling with the fixed time -step method. You can find detailed instructions [here](../coreneuron/index.html) and -[here](https://github.com/BlueBrain/CoreNeuron/#installation). +step method. You can find detailed instructions [here](../coreneuron/index.rst) and [here](../cmake_doc/options.rst). #### Run integrated tests @@ -467,10 +414,9 @@ being used by running following command: ```bash $ nrnpyenv.sh ... -export NRN_PYTHONHOME="/python/install/path/python-3.8.3/" export NRN_PYLIB="/python/install/path/python-3.8.3/lib/libpython3.8.so.1.0" ``` -If `NRN_PYTHONHOME` and `NRN_PYLIB` are inappropriate then you can set them explicitly or use `-pyexe` option mentioned above. +If `NRN_PYLIB` is inappropriate then you can set it explicitly or use `-pyexe` option mentioned above. * **How to build NEURON in cluster environment where build node architecture is different than compute node?** diff --git a/docs/install/mac_pkg.md b/docs/install/mac_pkg.md index 0f2b0285c0..8f69c73c64 100644 --- a/docs/install/mac_pkg.md +++ b/docs/install/mac_pkg.md @@ -9,7 +9,7 @@ The build will be universal2 and work on arm64 and x86_64 architectures (if the pythons used are themselves, universal2). Preparing your Mac development environment for correct functioning of the script requires installing a few extra [Dependencies](#Dependencies) beyond the -[normal user source build](./install_instructions.html#Mac-OS-Depend), +[normal user source build](./install_instructions.md#Mac-OS-Depend), obtaining an Apple Developer Program membership, and requesting two signing certificates from Apple. Those actions are described in separate sections below. @@ -17,7 +17,7 @@ On an Apple M1 or x86_64, the script, by default, creates, e.g., ```nrn-8.0a-726-gb9a811a32-macosx-11-universal2-py-38-39-310.pkg``` where the information between nrn and macosx comes from ```git describe```, -the number after macos refers to the ```MACOSX_DEPLOYMENT_TARGET=11``` +the number after macosx refers to the ```MACOSX_DEPLOYMENT_TARGET=11``` the next item(s) before py indicate the architectures on which the program can run (i.e. ```arm64```, ```x86_64```, or ```universal2``` for both) @@ -32,7 +32,7 @@ the same MACOSX_DEPLOYMENT_TARGET. You can check both of these with A space separated list of python executable arguments can be used in place of the internal default lists. ```$NRN_SRC``` is the location of the -repository, default ```$HOME/neuron/nrn```. The script makes sure ```$NRN_SRC/build``` +repository, default is the current working directory (hopefully you launch at the location of this script, e.g. ```$HOME/neuron/nrn```). The script makes sure ```$NRN_SRC/build``` exists and uses that folder to configure and build the software. The software is installed in ```/Applications/NEURON```. @@ -50,9 +50,12 @@ cmake .. -DCMAKE_INSTALL_PREFIX=$NRN_INSTALL \ -DCMAKE_PREFIX_PATH=/usr/X11 \ -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++ ``` +[^1] +[^1]: NRN_RX3D_OPT_LEVEL=2 can build VERY slowly (cython translated cpp file can take a half hour or more). So for testing, we generally copy the script to temp.sh and modify to NRN_RX3D_OPT_LEVEL=0 + The default variables above will be ``` -pythons="python3.8;python3.9;python3.10" +pythons="python3.8;python3.9;python3.10;python3.11" archs_cmake='-DCMAKE_OSX_ARCHITECTURES=arm64;x86_64' ``` @@ -62,7 +65,11 @@ The MPI library does not have to be universal if configuring with ```-DNRN_ENABLE_MPI_DYNAMIC=ON``` as the library is not linked against during the build. -```make -j install``` is used to build and install in ```/Applications/NEURON``` +```make -j install``` [^2] is used to build and install in ```/Applications/NEURON``` + +[^2]: Instead of make, the script uses Ninja. ```ninja install``` and the cmake line begins with ```cmake .. -G Ninja ...``` + +Runs some basic tests to verify a successful build ```make macpkg``` ( see ```src/mac/CMakeLists.txt``` ) is used to: @@ -90,42 +97,47 @@ during the build. - request Apple to notarize NEURON.pkg ```src/macnrn_notarize.sh``` + If notarization succeeds it will finish with a few lines of the form + ``` + Current status: Accepted........Processing complete + id: bda82fa9-e0ab-4f86-aad8-3fee1d8c2369 + status: Accepted + ``` + and staple the package. + If notarizaton fails, it is occasionally due to Apple changing the contracts and demanding that "You must first sign the relevant contracts online. (1048)". In that case, go to [appstoreconnect.apple.com](https://appstoreconnect.apple.com) - to accept the legal docs. For other notarization failures, one must consult - the LogFileURL which can be obtained with + to accept the legal docs an try again. For other notarization failures, one must consult the LogFileURL which can be obtained with [^3] + [^3]: altool has been replaced by notarytool for purposes of notarization. See + https://developer.apple.com/documentation/technotes/tn3147-migrating-to-the-latest-notarization-tool + and ```nrn/src/mac/nrn_notarize.sh``` + + ``` + % xcrun notarytool log \ + --apple-id "$apple_id" \ + --team-id "$team_id" \ + --password "$app_specific_password" \ + "$id" ``` - % xcrun altool --notarization-info $RequestIdentifier \ - --username "michael.hines@yale.edu" \ - --password "`cat ~/.ssh/notarization-password`" - No errors getting notarization info. - - Date: 2022-01-02 23:38:12 +0000 - Hash: 7254157952d4f3573c2804879cf6da8d... - LogFileURL: https://osxapps-ssl.itunes.apple.com/itunes-assets... - RequestUUID: 152f0f0e-af58-4d22-b291-6a441825dd20 - Status: invalid - Status Code: 2 - Status Message: Package Invalid + where $id was printed by the notarization request. The apple_id, team_id, and + app_specfic_password are set by the script to the contents of the files ``` - where RequestIdentifer (the RequestUUID) appears in the email sent - back in response to the notarization request. + $HOME/.ssh/apple-id + $HOME/.ssh/apple-team-id + $HOME/.ssh/apple-notarization-password + ``` + The script ends by printing: ``` - Until we figure out how to automatically staple the notarization - the following two commands must be executed manually. - xcrun stapler staple $PACKAGE_FILE_NAME cp $PACKAGE_FILE_NAME $HOME/$PACKAGE_FULL_NAME + Manually upload $HOME/nrn-9.0a-88-gc6d8b9af6-macosx-10.15-universal2-py-39-310-311.pkg to github ``` -where the variables are filled out and can be copy/pasted to your -terminal after Apple sends an email declaring that notarization was successful. -The email from Apple usually takes just a few minutes but can be hours. I've been uploading the ```$PACKAGE_FULL_NAME``` as an artifact for a -Release version from https://github.com/neuronsimulatior/nrn by choosing +Release version from https://github.com/neuronsimulator/nrn by choosing Releases, choosing the proper Release tag (e.g. Release 8.0a), Edit release, and clicking in the "Attach binaries ..." near the bottom of the page. @@ -163,7 +175,7 @@ can be found at [python.org](http://python.org/Downloads/macOS) at least for - Python 3.8 is already installed as /usr/bin/python3 and is universal2. - - The [normal source build](./install_instructions.html#Mac-OS-Depend) + - The [normal source build](./install_instructions.md#Mac-OS-Depend) explains how to install brew and add it to the PATH. ```bash /bin/bash -c "$(curl -fsSL https://raw.githubusercontent.com/Homebrew/install/HEAD/install.sh)" diff --git a/docs/install/python_wheels.md b/docs/install/python_wheels.md index aea284ae62..7b6d6b1cf8 100644 --- a/docs/install/python_wheels.md +++ b/docs/install/python_wheels.md @@ -9,20 +9,19 @@ Current NEURON Linux image is based on `manylinux2014`. ### Setting up Docker [Docker](https://en.wikipedia.org/wiki/Docker_(software)) is required for building Linux wheels. -You can find instructions on how to setup Docker on Linux [here](https://docs.docker.com/engine/install/). +You can find instructions on how to setup Docker on Linux [here](https://docs.docker.com/engine/install/). ### NEURON Docker Image Workflow -When required (i.e. update packages, add new software), `NEURON maintainers` are in charge of updating the NEURON docker -images published on Docker Hub under: -* [neuronsimulator/neuron_wheel](https://hub.docker.com/r/neuronsimulator/neuron_wheel) -* [neuronsimulator/neuron_wheel_gpu](https://hub.docker.com/r/neuronsimulator/neuron_wheel_gpu) +When required (i.e. update packages, add new software), `NEURON maintainers` are in charge of +updating the NEURON docker images published on Docker Hub under +[neuronsimulator/neuron_wheel](https://hub.docker.com/r/neuronsimulator/neuron_wheel). Azure pipelines pull this image off DockerHub for Linux wheels building. -Updating and publishing the public images are done by a manual process that relies on a `Docker file` -(see [packaging/python/Dockerfile](../../packaging/python/Dockerfile) and [packaging/python/Dockerfile_gpu](../../packaging/python/Dockerfile_gpu)). +Updating and publishing the public images are done by a manual process that relies on a +`Docker file` (see [packaging/python/Dockerfile](../../packaging/python/Dockerfile)). Any official update of these files shall imply a PR reviewed and merged before `DockerHub` publishing. All wheels built on Azure are: @@ -30,25 +29,39 @@ All wheels built on Azure are: * Published to `Pypi.org` as * `neuron-nightly` -> when the pipeline is launched in CRON mode * `neuron-x.y.z` -> when the pipeline is manually triggered for release `x.y.z` - * additionally, for Linux only: `neuron-gpu-nightly` and `neuron-gpu-x.y.z` * Stored as `Azure artifacts` in the Azure pipeline for every run. -Refer to the following image for the NEURON Docker Image workflow: +Refer to the following image for the NEURON Docker Image workflow: ![](images/docker-workflow.png) -### Building the docker image +### Building the docker images automatically +If you run the workflow manually on Gitlab (with the "Run pipeline" button), it will now have the `mac_m1_container_build` and `x86_64_container_build` jobs added to it. These jobs need to be started manually and will not affect the overal workflow status. They don't need to be run every time, just when a refresh of the container images is necessary. +They will build the container images and push to docker hub. If you want to, you can still build manually (see next section), but there shouldn't be a requirement to do so any more. + +A word of warning: podman on OSX uses a virtual machine. The job can take care of starting it, but we generally try to have it running to avoid jobs cleaning up after themselves and killing the machine for other jobs. When starting the machine, set the variables that need to be set during the container build, ie. proxy and `BUILDAH_FORMAT`. + +`BUILDAH_FORMAT` ensures that `ONBUILD` instructions are enabled. + +``` +export http_proxy=http://bbpproxy.epfl.ch:80 +export https_proxy=http://bbpproxy.epfl.ch:80 +export HTTP_PROXY=http://bbpproxy.epfl.ch:80 +export HTTPS_PROXY=http://bbpproxy.epfl.ch:80 +export BUILDAH_FORMAT=docker +``` + +### Building the docker image manually After making updates to any of the docker files, you can build the image with: ``` cd nrn/packaging/python # update Dockerfile -docker build -t neuronsimulator/neuron_wheel[_gpu]: . +docker build -t neuronsimulator/neuron_wheel: . ``` where `` is: -* `latest-x86_64` or `latest-aarch64` for official publishing on respective platforms. For `master`, we are using `latest-gcc9-x86_64` and `latest-gcc9-aarch64` (see [Use GCC9 for building wheels #1971](https://github.com/neuronsimulator/nrn/pull/1971)). -* `nvhpc-X.Y-cuda-A.B` for the GPU wheels where `X.Y` is the NVHPC version and `A.B` is the CUDA one. I.e `nvhpc-22.1-cuda-11.5`. For `master` we are using `nvhpc-22.1-cuda-11.5-gcc9`(see [Use GCC9 for building wheels #1971](https://github.com/neuronsimulator/nrn/pull/1971)). +* `latest-x86_64` or `latest-aarch64` for official publishing on respective platforms. For `master`, we are using `latest-gcc9-x86_64` and `latest-gcc9-aarch64` (see [Use GCC9 for building wheels #1971](https://github.com/neuronsimulator/nrn/pull/1971)). * `feature-name` for updates (for local testing or for PR testing purposes where you can temporarily publish the tag on DockerHub and tweak Azure CI pipelines to use it - refer to - `Job: 'ManyLinuxWheels'` or `Job: 'ManyLinuxGPUWheels'` in [azure-pipelines.yml](../../azure-pipelines.yml) ) + `Job: 'ManyLinuxWheels'` in [azure-pipelines.yml](../../azure-pipelines.yml) ) If you are building an image for AArch64 i.e. with `latest-aarch64` tag then you additionally pass `--build-arg` argument to docker build command in order to use compatible manylinux image for ARM64 platform (e.g. while building on Apple M1 or QEMU emulation): @@ -62,43 +75,37 @@ docker build -t neuronsimulator/neuron_wheel:latest-aarch64 --build-arg MANYLINU In order to push the image and its tag: ``` docker login --username= -docker push neuronsimulator/neuron_wheel[_gpu]: +docker push neuronsimulator/neuron_wheel: ``` ### Using the docker image You can either build the neuron images locally or pull them from DockerHub: ``` -$ docker pull neuronsimulator/neuron_wheel -Using default tag: latest +$ docker pull neuronsimulator/neuron_wheel:latest-x86_64 +Using default tag: latest-x86_64 latest: Pulling from neuronsimulator/neuron_wheel .... Status: Downloaded newer image for neuronsimulator/neuron_wheel:latest -docker.io/neuronsimulator/neuron_wheel:latest +docker.io/neuronsimulator/neuron_wheel:latest-x86_64 ``` We can conveniently mount the local NEURON repository inside docker, by using the `-v` option: ``` -docker run -v $PWD/nrn:/root/nrn -w /root/nrn -it neuronsimulator/neuron_wheel bash +docker run -v $PWD/nrn:/root/nrn -w /root/nrn -it neuronsimulator/neuron_wheel:latest-x86_64 bash ``` where `$PWD/nrn` is a NEURON repository on the host machine that ends up mounted at `/root/nrn`. -This is how you can test your NEURON updates inside the NEURON Docker image. +This is how you can test your NEURON updates inside the NEURON Docker image. Note that `-w` sets the working directory inside the container. -If you want to build wheels with `GPU support` via CoreNEURON, then you have to use the `neuronsimulator/neuron_wheel_gpu` image: - -``` -docker run -v $PWD/nrn:/root/nrn -w /root/nrn -it neuronsimulator/neuron_wheel_gpu bash -``` - ### MPI support The `neuronsimulator/neuron_wheel` provides out-of-the-box support for `mpich` and `openmpi`. For `HPE-MPT MPI`, since it's not open source, you need to acquire the headers and mount them in the docker image: ``` -docker run -v $PWD/nrn:/root/nrn -w /root/nrn -v $PWD/mpt-headers/2.21/include:/nrnwheel/mpt/include -it neuronsimulator/neuron_wheel bash +docker run -v $PWD/nrn:/root/nrn -w /root/nrn -v $PWD/mpt-headers/2.21/include:/nrnwheel/mpt/include -it neuronsimulator/neuron_wheel:latest-x86_64 bash ``` where `$PWD/mpt-headers` is the path to the HPE-MPT MPI headers on the host machine that end up mounted at `/nrnwheel/mpt/include`. You can download the headers with: @@ -113,44 +120,56 @@ Note that for macOS there is no docker image needed, but all required dependenci In order to have the wheels working on multiple macOS target versions, special consideration must be made for `MACOSX_DEPLOYMENT_TARGET`. -Taking Azure macOS `x86_64` wheels for example, `readline` was built with `MACOSX_DEPLOYMENT_TARGET=10.9` and stored as secure file on Azure. -For `arm64` we need to set `MACOSX_DEPLOYMENT_TARGET=11.0`. The wheels currently need to be built manually, using `universal2` Python installers. +Taking Azure macOS `x86_64` wheels for example, `readline` was built with `MACOSX_DEPLOYMENT_TARGET=10.9` and stored as secure file on Azure (under `Pipelines > Library > Secure files`). +For `arm64` we need to set `MACOSX_DEPLOYMENT_TARGET=11.0`. The wheels currently need to be built manually, using `universal2` Python installers. For upcoming `universal2` wheels (targeting both `x86_64` and `arm64`) we will consider leveling everything to `MACOSX_DEPLOYMENT_TARGET=11.0`. You can use [packaging/python/build_static_readline_osx.bash](../../packaging/python/build_static_readline_osx.bash) to build a static readline library. -You can have a look at the script for requirements and usage. +You can have a look at the script for requirements and usage. + +### Installing macOS prerequisites + +Install the necessary Python versions by downloading the universal2 installers from https://www.python.org/downloads/macos/ +You'll need several other packages installed as well (brew is fine): + +``` +brew install --cask xquartz +brew install flex bison mpich cmake +brew unlink mpich && brew install openmpi +brew uninstall --ignore-dependencies libomp || echo "libomp doesn't exist" +``` + +Bison and flex installed through brew will not be symlinked into /opt/homebrew (installing it next to the version provided by OSX can cause problems). To ensure the installed versions will actually be picked up: + +``` +export BREW_PREFIX=$(brew --prefix) +export PATH=/opt/homebrew/opt/bison/bin:/opt/homebrew/opt/flex/bin:$PATH +``` ## Launch the wheel building ### Linux -Once we've cloned and mounted NEURON inside Docker(c.f. `-v` option described previously), we can proceed with wheels building. +Once we've cloned and mounted NEURON inside Docker(c.f. `-v` option described previously), we can proceed with wheels building. There is a build script which loops over available pythons in the Docker image under `/opt/python`, and then builds and audits the generated wheels. Wheels are generated under `/root/nrn/wheelhouse` and also accessible in the mounted NEURON folder from outside the Docker image. ``` # Working directory is /root/nrn -bash packaging/python/build_wheels.bash linux +bash packaging/python/build_wheels.bash linux ls -la wheelhouse ``` -You can build the wheel for a specific python version: +You can build the wheel for a specific python version: ``` bash packaging/python/build_wheels.bash linux 38 # 38 for Python v3.8 ``` -To build wheels with GPU support you have to pass an additional argument: -* `coreneuron` : build wheel with `CoreNEURON` support -* `coreneuron-gpu` : build wheel with `CoreNEURON` and `GPU` support - +To build wheels with CoreNEURON support you have to pass an additional argument: `coreneuron`. ``` -bash packaging/python/build_wheels.bash linux 38 coreneuron-gpu - -# or - bash packaging/python/build_wheels.bash linux 3* coreneuron ``` -In the last example we are passing `3*` to build the wheels with `CoreNEURON` support for all python 3 versions. +Where we are passing `3*` to build the wheels with `CoreNEURON` support for all python 3 versions. ### macOS As mentioned above, for macOS all dependencies have to be available on a system. You have to then clone NEURON repository and execute: @@ -160,6 +179,14 @@ cd nrn bash packaging/python/build_wheels.bash osx ``` +In some cases, setuptools-scm will see extra commits and consider your build as "dirty," resulting in filenames such as `NEURON-9.0a1.dev0+g9a96a3a4d.d20230717-cp310-cp310-macosx_11_0_arm64.whl` (which should have been `NEURON-9.0a0-cp310-cp310-macosx_11_0_arm64.whl`). If this happens, you can set an environment variable to correct this behavior: + +``` +export SETUPTOOLS_SCM_PRETEND_VERSION=9.0a +``` + +Change the pretend version to whatever is relevant for your case. + ## Testing the wheels To test the generated wheels, you can do: @@ -174,7 +201,7 @@ bash packaging/python/test_wheels.sh python3.8 "-i https://test.pypi.org/simple/ ### MacOS considerations -On MacOS, launching `nrniv -python` or `special -python` can fail to load `neuron` module due to security restrictions. +On MacOS, launching `nrniv -python` or `special -python` can fail to load `neuron` module due to security restrictions. For this specific purpose, please `export SKIP_EMBEDED_PYTHON_TEST=true` before launching the tests. ### Testing on BB5 @@ -186,26 +213,6 @@ module load unstable python bash packaging/python/test_wheels.sh python3.8 wheelhouse/NEURON-7.8.0.236-cp38-cp38m-manylinux1_x86_64.whl ``` -The GPU wheels can be also tested in same way on the CPU partition. In this case only pre-compiled binaries -like `nrniv` and `nrniv-core` are tested on the CPU. In order to test full functionality of GPU wheels we need to -do the following: -* Allocate GPU node -* Load NVHPC compiler -* Launch `test_wheels.sh` - -``` -salloc -A proj16 -N 1 --ntasks-per-node=4 -C "volta" --time=1:00:00 -p prod --partition=prod --exclusive -module load unstable python nvhpc - -bash packaging/python/test_wheels.sh python3 NEURON_gpu_nightly-8.0a709-cp38-cp38-manylinux_2_17_x86_64.manylinux2014_x86_64.whl -``` - -The `test_wheels.sh` will check if `nvc/nvc++` compilers are available and run tests for `hpe-mpi`, `intel-mpi` and `mvapich2` MPI modules. -Also, it checks if GPU is available (using `pgaccelinfo -nvidia` command) and then runs a few tests on the GPU as well. - -Similar to BB5, the wheel can be tested on any desktop system provided that NVHPC compiler module is loaded or appropriate PATH environment variable is setup. - - ## Publishing the wheels on Pypi via Azure ### Variables that drive PyPI upload @@ -280,3 +287,17 @@ The reason we are setting `SETUPTOOLS_SCM_PRETEND_VERSION` to a desired version ## Nightly wheels Nightly wheels get automatically published from `master` in CRON mode. + + +## How to test Azure wheels locally + +After retrieving the Azure drop URL (i.e. from the GitHub PR comment, or by going to Azure for a specific build): + +```bash +python3 -m pip wheel neuron-gpu-nightly --wheel-dir tmp --find-links 'https://dev.azure.com/neuronsimulator/aa1fb98d-a914-45c3-a215-5e5ef1bd7687/_apis/build/builds/7600/artifacts?artifactName=drop&api-version=7.0&%24format=zip' +``` +will download the wheel and its dependencies to `tmp/` and then you can test it with: + +```bash +./packaging/python/test_wheels.sh python3 ./tmp/NEURON_gpu_nightly-...whl true +``` diff --git a/docs/install/windows.md b/docs/install/windows.md index 384a54a0c7..daf80e42b2 100644 --- a/docs/install/windows.md +++ b/docs/install/windows.md @@ -33,6 +33,43 @@ in order to: * install MPI * install MSYS2 (via `Chocolatey`) and then MinGW toolchain and required build pacakages +## Setting up Visual Studio Code + +It is highly recommended to use Visual Studio Code for development. You can install it from [https://code.visualstudio.com/](https://code.visualstudio.com/). + +During the development process, you will be using PowerShell, cmd and moreover MSYS2 MINGW64 shell. In order to be able to launch any of these in the IDE, add the following to `.vscode/setting.json` file in the root of the `nrn` repository: + +```json +{ + "cmake.configureOnOpen": false, + "terminal.integrated.profiles.windows": { + "PowerShell": { + "source": "PowerShell", + "icon": "terminal-powershell" + }, + "Command Prompt": { + "path": [ + "${env:windir}\\Sysnative\\cmd.exe", + "${env:windir}\\System32\\cmd.exe" + ], + "args": [], + "icon": "terminal-cmd" + }, + "MSYS2": { + "path": "C:\\msys64\\usr\\bin\\bash.exe", + "args": [ + "--login", + "-i" + ], + "env": { + "MSYSTEM": "MINGW64", + "CHERE_INVOKING": "1" + } + } + }, +} +``` + ## How to build NEURON For a complete `build/install/create setup.exe`, in a `MinGW64` shell you can run: @@ -64,3 +101,55 @@ make install make setup_exe ``` +Note that by default, the install path is `C:\nrn-install`. When building the installer via `setup_exe`, that is the path that will be used. + +## Troubleshooting + +### My PR is breaking the GitHub Windows CI + +You can use the `live-debug-win` PR tag. To enable it, you have to: + * add 'live-debug-win' to your PR title + * push something to your PR branch (note that just re-running the pipeline disregards the title update) + +This will setup an ssh session on `tmate.io` and you will get an MSYS2 shell to debug your feature. Be aware that this is limited to the MSYS2 environment. For more adapted debugging, you must set up a local Windows environment. + +### The installer is missing some files (includes, libraries, etc.) + +We ship out a minimal g++ compiler toolchain to allow nrnivmodl (mknrndll) to build nrnmech.dll. +This is handled by `mingw_files/nrnmingwenv.sh`. + +Points of interest: +* `cp_dlls()` -> ship all needed dlls by processing output of `cygcheck`. You can add more files to the caller list if needed. +* `copyinc()` -> ship all needed include files by processing output of `g++ -E`. You can add more files to the caller list if needed. +* `lib` paths for multiple versions of gcc. This is how you would handle it: + ```bash + gcclib=mingw64/lib/gcc/x86_64-w64-mingw32/$gccver # gcc 11.2.0 Rev 1 + if test -f /mingw64/lib/libgcc_s.a ; then # gcc 11.2.0 Rev 10 + gcclib=mingw64/lib + fi + copy $gcclib ' + libgcc_s.a + libstdc++.dll.a + ' + ``` + +### Windows CI has a new build failure without any code change + +GitHub/Azure runners are regularly updated. MSYS2 is already installed on the system with a specfic `pacman` cache at the time the runner images are built. As a consequence, some packages may not play well with the new environment. + +First line of attack is to compare successful and failed builds to see what changed. If the issue is related to a new package, one approach is to update the MSYS2 cache in `ci/win_install_deps.cmd` by uncommenting the following line: +```powershell +:: update pacman cache (sometimes required when new GH/Azure runner images are deployed) +:: %MSYS2_ROOT%\usr\bin\pacman -Syy +``` + +Downsides: +* slower CIs (more time to install new things from cache gradually over time) +* hit issues sooner rather than later (but then we can disable the cache update) + + +### association.hoc test failed + +Unfortunately there is not much we can do about it. This test is very very tricky to handle in the CI and it tests that `.hoc` files can be directly launched with the NEURON installation. We currently have a mitigation in place, we do it in two separate CI steps. Re-running the CI should fix the issue. + + diff --git a/docs/nmodl/python_scripts/utils/cell.py b/docs/nmodl/python_scripts/utils/cell.py index 9a47c2b3ac..1d232d7456 100644 --- a/docs/nmodl/python_scripts/utils/cell.py +++ b/docs/nmodl/python_scripts/utils/cell.py @@ -24,7 +24,6 @@ def simulate(self, tstop, dt, corenrn=False, gpu=False): coreneuron.enable = corenrn coreneuron.gpu = gpu - h.CVode().cache_efficient(1) pc = h.ParallelContext() pc.set_maxstep(10) h.stdinit() diff --git a/docs/notebooks.sh b/docs/notebooks.sh index 6f130d9bcb..e57243ac89 100644 --- a/docs/notebooks.sh +++ b/docs/notebooks.sh @@ -11,7 +11,7 @@ convert_notebooks() { set -e working_dir=$1 echo "Running convert_notebooks in $1" - (cd "$working_dir" && jupyter nbconvert --to notebook --inplace --execute *.ipynb) + (cd "$working_dir" && jupyter nbconvert --debug --to notebook --inplace --execute *.ipynb) } clean_notebooks() { diff --git a/docs/python/envvariables.rst b/docs/python/envvariables.rst index ac07a04c05..aa48b9dbc4 100644 --- a/docs/python/envvariables.rst +++ b/docs/python/envvariables.rst @@ -61,26 +61,3 @@ NEURON_MODULE_OPTIONS os.environ["NEURON_MODULE_OPTIONS"] = nrn_options from neuron import h assert(nrn_options in h.nrnversion(7)) - - - -NRNUNIT_USE_LEGACY ------------------- - When set to 1, legacy unit values for FARADAY, R, and a few other constants - are used. See ``nrn/share/lib/nrnunits.lib.in`` lines which begin with - ``@LegacyY@``, ``nrn/src/oc/hoc_init.c`` in the code section - ``static struct { /* Modern, Legacy units constants */``, and - ``nrn/src/nrnoc/eion.c``. - - When set to 0, (default), values from codata2018 are used. - See ``nrn/share/lib/nrnunits.lib.in`` lines that begin with - ``@LegacyN@`` and ``nrn/src/oc/nrnunits_modern.h``. - - Switching between legacy and modern units can also be done after launch - with the top level HOC function :func:`nrnunit_use_legacy`. - - The purpose of allowing legacy unit values is to easily validate - results of old models (double precision identity). - - This environment variable takes precedence over the CMake option - ``NRN_DYNAMIC_UNITS_USE_LEGACY``. diff --git a/docs/python/index.rst b/docs/python/index.rst index 3f6f79b226..1817e3b993 100755 --- a/docs/python/index.rst +++ b/docs/python/index.rst @@ -4,6 +4,11 @@ NEURON Python documentation =========================== (:ref:`Switch to HOC documentation `) +For a basic introduction on how to run Python scripts with NEURON, see +:ref:`running Python scripts with NEURON `, +and you may also like to refer to the `NEURON Python tutorial +<../tutorials/scripting-neuron-basics.html>`_. + Quick Links ----------- - :ref:`genindex` @@ -23,14 +28,14 @@ Quick Links `KSChan `_, `LinearMechanism `_, `MechanismStandard `_, `MechanismType `_, `NetCon `_, `ParallelContext `_, - `ParallelNetManager `_, `PlotShape `_, `Python `_, + `ParallelNetManager `_, `PlotShape `_, `RangeVarPlot `_, `SaveState `_, `SectionBrowser `_, `SectionList `_, `SectionRef `_, `Shape `_, `StateTransitionEvent `_ - :ref:`panel`, :ref:`funfit`, :ref:`geometry`, :ref:`printf_doc`, :ref:`ockeywor`, :ref:`math`, - :ref:`nmodl2`, :ref:`nmodl`, :ref:`mech`, :ref:`predec`, :ref:`standardruntools`, - :ref:`ocsyntax`, :ref:`topology` + :ref:`panel`, :ref:`funfit`, :ref:`geometry`, :ref:`printf_doc`, :ref:`math`, + :ref:`nmodl2`, :ref:`nmodl`, :ref:`mech`, :ref:`standardruntools`, + :ref:`topology` :ref:`neuron_rxd` @@ -40,8 +45,6 @@ Basic Programming .. toctree:: :maxdepth: 1 - NEURON Python tutorial - programming/hoc.rst programming/mathematics.rst programming/strings.rst programming/guidesign.rst @@ -52,6 +55,8 @@ Basic Programming programming/dynamiccode.rst programming/projectmanagement.rst programming/internals.rst + programming/neuron_classes.rst + programming/hoc-from-python.rst Model Specification ------------------- diff --git a/docs/python/modelspec/programmatic/kschan.rst b/docs/python/modelspec/programmatic/kschan.rst index 3620a94394..7da5ede4af 100755 --- a/docs/python/modelspec/programmatic/kschan.rst +++ b/docs/python/modelspec/programmatic/kschan.rst @@ -11,28 +11,35 @@ KSChan Syntax: ``kschan = h.KSChan()`` + + ``kschan = h.KSChan(is_PointProcess)`` Description: - Declare and manage a new density channel type which can - be instantiated in sections with the :ref:`insert ` - statement. After the type comes into existence it - is always a valid type and the conductance style, - ligands, name, gating functions, etc can be changed - at any time. The type cannot be destroyed. + Declare and manage a new density channel or PointProcess type. + Density channels can be instantiated in sections with the :ref:`insert ` + statement. PointProcess channels are instantiated like :class:`IClamp`. After the type comes into existence it + is always a valid type and the conductance style, + ligands, name, gating functions, etc can be changed + at any time. However if an instance of the channel is currently in existance + no change is allowed to the number of states or parameters of the channel. + For example, if an instance of the channel exists, one cannot switch between single + channel mode and continuous mode as the former has an extra range variable called Nsingle. + (But when Nsingle is a parameter, a value of 0 causes the channel to compute in continuous mode). + The type cannot be destroyed. - This is an extension of the KSChan managed by the - Java catacomb channel builder tool - for the past several - years. The primary functional extension is the - ability to define HH-style gates in addition to - kinetic scheme gates. The administrative extensions - allowed a more convenient re-implementation of the - channel builder gui in NEURON --- albeit substantially - similar. The KSChan :meth:`KSChan.setstructure` method - uses a slightly modified vector format so the old - Java channelbuilder tool will not work without - updating the Java implementation. + This is an extension of the KSChan managed by the + Java catacomb channel builder tool + for the past several + years. The primary functional extension is the + ability to define HH-style gates in addition to + kinetic scheme gates. The administrative extensions + allowed a more convenient re-implementation of the + channel builder gui in NEURON --- albeit substantially + similar. The KSChan :meth:`KSChan.setstructure` method + uses a slightly modified vector format so the old + Java channelbuilder tool will not work without + updating the Java implementation. ---- diff --git a/docs/python/modelspec/programmatic/mechanisms/mech.rst b/docs/python/modelspec/programmatic/mechanisms/mech.rst index 6cf2311199..6a2ae358a2 100755 --- a/docs/python/modelspec/programmatic/mechanisms/mech.rst +++ b/docs/python/modelspec/programmatic/mechanisms/mech.rst @@ -668,7 +668,7 @@ General from neuron import h pc = h.ParallelContext() - + #Model cell = h.IntFire1() cell.refrac = 0 # no limit on spike rate @@ -678,13 +678,14 @@ General for i, nc in enumerate(nclist): nc.weight[0] = 2 # anything above 1 causes immediate firing for IntFire1 nc.delay = 1 + 0.1*i # incoming (t, gid) generates output (t + 1 + 0.1*gid, 0) - + # Record all spikes (cell is the only one generating output spikes) - out = [h.Vector() for _ in range(2)] - pc.spike_record(-1, out[0], out[1]) - + spike_ts = h.Vector() + spike_ids = h.Vector() + pc.spike_record(-1, spike_ts, spike_ids) + #PatternStim - tvec = h.Vector(range(10)) + tvec = h.Vector(range(10)) gidvec = h.Vector(range(10)) # only 0,1,2 go to cell ps = h.PatternStim() ps.play(tvec, gidvec) @@ -694,9 +695,10 @@ General pc.set_maxstep(10.) h.finitialize(-65) pc.psolve(7) - - for i, tsp in enumerate(out[0]): - print (tsp, int(out[1][i])) + + for spike_t, spike_cell_id in zip(spike_ts, spike_ids): + print(f"{spike_t} {int(spike_cell_id)}") + Output: Notice that 2.1 is the first output because (0, 0) is discarded by PatternStim @@ -704,6 +706,7 @@ General (1, 1) is the first spike that gets passed into a NetCon (with delay 1.1) so the first output spike is generated at 2.2 and that spike gets recursively regenerated every 1.0 ms. PatternStim spikes with gid > 3 are discarded. + .. code-block:: 2.1 0 @@ -944,7 +947,10 @@ Mechanisms Syntax: ``h.setdata_suffix(section(x))`` - + Deprecated for Python: + In Python one can use the syntax ``section(x).suffix.fname(args)`` to call a FUNCTION + or PROCEDURE regardless of whether the function uses RANGE variables. + Description: If a mechanism function is called that uses RANGE variables, then the appropriate data needed by the function must first be indicated via a setdata call. diff --git a/docs/python/modelspec/programmatic/mechanisms/nmodl.rst b/docs/python/modelspec/programmatic/mechanisms/nmodl.rst index 404ce6aea1..afc456ab39 100755 --- a/docs/python/modelspec/programmatic/mechanisms/nmodl.rst +++ b/docs/python/modelspec/programmatic/mechanisms/nmodl.rst @@ -114,7 +114,7 @@ this use the command: modlunit file -leaving off the file extension. For more information about units click here. +leaving off the file extension. For more information about units `click here `_. Rationale """"""""" @@ -283,12 +283,7 @@ Description: the UNIX units database. This can increase legibility and convenience, and is helpful both as a reminder to the user and as a means for automating the process of checking for consistency of units. - The UNIX units database taken into account is defined in the `nrnunits.lib file `_. - This file includes two versions of the units due to the updates in the values of their base - units. Currently there are legacy and modern units that contain the changes after the updates - introduced on 2019 to the nist constants. The selection between those two versions can be done - using the ``NRN_DYNAMIC_UNITS_USE_LEGACY`` CMake variable or a call to - ``h.nrnunit_use_legacy(bool)`` during runtime. + The UNIX units database (based on the 2019 updated NIST constants) taken into account is defined in the `nrnunits.lib file `_. New units can be defined in terms of default units and previously defined units by placing definitions in the UNITS block. e.g. @@ -485,7 +480,7 @@ that exists should be set up via the command: .. code-block:: python - h.setpointer(_ref_nrnvar, 'POINTER_name', mechanism_object) + mechanism_object._ref_somepointer = source_obj._ref_varname Here mechanism_object (a point process object or a density mechanism) and the other arguments @@ -511,7 +506,7 @@ Then python syn = h.Syn(section(0.8)) - h.setpointer(axon(1)._ref_v, 'vpre', syn) + syn._ref_vpre = axon(1)._ref_v will allow the ``syn`` object located at ``section(0.8)`` to know the voltage at the distal end of the axon section. As a variation on that example, if one supposed that the synapse @@ -523,7 +518,7 @@ statement would be .. code-block:: python - h.setpointer(rel._ref_ACH_release, 'trpe', syn) + syn._ref_trpe = rel._ref_ACH_release The caveat is that tight coupling between states in different models diff --git a/docs/python/modelspec/programmatic/mechanisms/nmodl2.rst b/docs/python/modelspec/programmatic/mechanisms/nmodl2.rst index dc949f32ec..e5ad80727d 100644 --- a/docs/python/modelspec/programmatic/mechanisms/nmodl2.rst +++ b/docs/python/modelspec/programmatic/mechanisms/nmodl2.rst @@ -44,6 +44,7 @@ Description: POINT_PROCESS ... POINTER pointer1, ... BBCOREPOINTER bbcore1, ... + RANDOM ranvar1, ... EXTERNAL external1, ... THREADSAFE REPRESENTS ontology_id @@ -410,6 +411,55 @@ Description: ``TODO``: Add description (?) and existing example mod file (provided by link) +.. _nmodlrandom: + +RANDOM +###### + +Description: + .. code-block:: + + NEURON { + RANDOM ranvar1, ... + } + + These names refer to random variable streams that are automatically + associated with nrnran123 generators. Such nrnran123 generators are also used, for example to implement + :meth:`Random.Random123` + These names are analogous to range variables in that the streams are distinct for every mechanism instance + of a POINT_PROCESS, ARTIFICIAL_CELL, or instance of a density mechanism in a segment of a cable section. + Each stream exists for the lifetime of the mechanism instance. While a stream exists, its properties can + be changed from the interpreter. + + Prior to the introduction of this keyword, random streams required a POINTER variable and + fairly elaborate VERBATIM blocks + to setup the streams and manage the stream properties from HOC or Python so that each stream was + statistically independent of all other streams. + + From the interpreter, the ranvar1 stream properties are assigned and evaluated using standard + range variable syntax where mention of ranvar1 returns a :class:`~NMODLRandom` object that wraps the stream + and provides method calls to get and set the three stream ids and the starting sequence number. + + When a stream is instantiated, its identifier triplet is default initialized to + (1, :meth:`mpiworldrank `, ++internal_id3) + so all streams are statistically independent (at launch time, internal_id3 = 0). + However since the identifier triplet depends on the order of + construction, it is recommended for parallel simulation reproducibility that triplets be algorithmically specified + at the interpreter level. And see :meth:`Random.Random123_globalindex`. + + At present, the list of random_... methods available for use within mod files (outside of VERBATIM blocks) are: + + * random_setseq(ranvar1, uint34_value) + * random_setids(ranvar1, id1_uint32, id2_uint32, id3_uint32) + * x = random_uniform(ranvar1) : uniform 0 to 1 -- minimum value is 2.3283064e-10 and max value is 1-min + * x = random_uniform(ranvar1, min, max) + * x = random_negexp(ranvar1) : mean 1.0 -- min value is 2.3283064e-10, max is 22.18071 + * x = random_negexp(ranvar1, mean) + * x = random_normal(ranvar1) : mean 1.0, std 1.0 + * x = random_normal(ranvar1, mean, std) + * x = random_ipick(ranvar1) : range 0 to 2^32-1 + * x = random_dpick(ranvar1) + EXTERNAL ######## diff --git a/docs/python/modelspec/programmatic/network/parcon.rst b/docs/python/modelspec/programmatic/network/parcon.rst index adca918782..4fd8d22f0e 100755 --- a/docs/python/modelspec/programmatic/network/parcon.rst +++ b/docs/python/modelspec/programmatic/network/parcon.rst @@ -3032,9 +3032,6 @@ Parallel Transfer a single cpu. It does not matter if a one sid subtree is declared short or not; it is solved exactly in any case. - Note: using multisplit automatically sets - ``CVode.cache_efficient(1)`` - .. warning:: Implemented only for fixed step methods. Cannot presently be used with variable step @@ -3209,6 +3206,21 @@ Parallel Transfer ---- +.. method:: ParallelContext.get_partition + + + Syntax: + ``seclist = pc.get_partition(i)`` + + + Description: + Returns a new :func:`SectionList` with references to all the root sections + of the ith thread. + + +---- + + .. method:: ParallelContext.thread_stat @@ -3293,7 +3305,7 @@ Parallel Transfer The high resolution walltime time in seconds the indicated thread used during time step integration. Note that this does not include reduced tree computation time used by thread 0 when :func:`multisplit` is - active. + active. With no arg, sets thread_ctime of all threads to 0. ---- @@ -3415,8 +3427,8 @@ Parallel Transfer teardown is the test_submodel.py in http://github.com/neuronsimulator/ringtest. - This function requires cvode.cache_efficient(1) . Multisplit is not - supported. The model cannot be more complicated than a spike or gap + Multisplit is not supported. + The model cannot be more complicated than a spike or gap junction coupled parallel network model of real and artificial cells. Real cells must have gids, Artificial cells without gids connect only to cells in the same thread. No POINTER to data outside of the @@ -3451,7 +3463,6 @@ Parallel Transfer # run model from neuron import coreneuron coreneuron.enable = True - h.CVode().cache_efficient(1) h.stdinit() pc.psolve(h.tstop) diff --git a/docs/python/programming/gui/textmenus.rst b/docs/python/programming/gui/textmenus.rst deleted file mode 100755 index ea01e01b48..0000000000 --- a/docs/python/programming/gui/textmenus.rst +++ /dev/null @@ -1,64 +0,0 @@ -.. _lw_doc: - -Obsolete Text Menus -------------------- - -The functions above have been superseded by the graphical user interface -but are available for use on unix machines and in the DOS version. Using -the arrow keys may or may not work on a modern machine, depending on terminal -configuration. -See :ref:`GUI`. - ----- - - - -.. function:: fmenu - - - Syntax: - - ``h.fmenu(nmenu, -1)`` - allocates space for nmenu number of menus - Menu identifier numbers start at 0,1,...nmenu-1 - - ``h.fmenu(imenu, 0)`` - erase previous menu identified by imenu. - - ``h.fmenu(imenu,1,var list)`` - add variables specified in list - to imenu. The variable names will - be added sequentially in the order - specified. - - ``h.fmenu(imenu,2,"prompt","command")`` - add the executable command - specified by a prompt,command pair - to imenu. - - ``h.fmenu(imenu)`` - executes menu imenu, displays, navigates through imenu. - - Description: - This is an old terminal based menu system that has been superseded by the - :ref:`GUI`. - - ``fmenu`` creates, displays, and allows user to move within a menu to - select and change - a displayed variable value or to execute a command. - - The user can create space for - a series of menus and execute individual menus with each menu consisting of - lists of - variables and commands. Menus can execute commands which call other - menus and in this way a hierarchical menu system can be constructed. - Menus can be navigated by using arrow keys or by typing the first character - of a menu item. To exit a menu, either press the :kbd:`Esc` key, execute the - "Exit" item, or execute a command which has a "stop" statement. - A command item is executed by pressing the Return key. A variable item - is changed by typing the new number followed by a Return. - - See the file :file:`$NEURONHOME/doc/man/oc/menu.tex` for a complete description - of this function. - - diff --git a/docs/python/programming/guidesign.rst b/docs/python/programming/guidesign.rst index 2e9d70165a..8fa2c3d429 100755 --- a/docs/python/programming/guidesign.rst +++ b/docs/python/programming/guidesign.rst @@ -13,7 +13,6 @@ GUI Design gui/vfe.rst gui/pwman.rst gui/nfunc.rst - gui/textmenus.rst gui/dialog.rst gui/misc.rst diff --git a/docs/python/programming/python.rst b/docs/python/programming/hoc-from-python.rst similarity index 80% rename from docs/python/programming/python.rst rename to docs/python/programming/hoc-from-python.rst index 08a2dfe956..24d530f593 100755 --- a/docs/python/programming/python.rst +++ b/docs/python/programming/hoc-from-python.rst @@ -1,46 +1,21 @@ -.. _python: - -.. warning:: - - Some of the idioms on this page are out of date, but they still work. - See the NEURON Python tutorial for modern idioms. - -Python Language ---------------- - -This document describes installation and basic use of NEURON's Python interface. For information on the modules in the ``neuron`` namespace, see: - -.. toctree:: :maxdepth: 1 - - neuronpython.rst - - .. _python_accessing_hoc: -Python Accessing HOC -~~~~~~~~~~~~~~~~~~~~ - - +Accessing HOC from Python +------------------------- -Syntax: - ``nrniv -python [file.hoc file.py -c "python_statement"]`` +This section describes how one can interact with HOC features and the HOC +interpreter from Python code. - ``nrngui -python ...`` +In many cases, HOC provides features that are natively supported in Python. +In these cases, it is usually preferable to use the Python version, which will +be familiar to a wider range of people. +Nonetheless, in isolated situations the following section may be useful: +:ref:`hoc_features_you_should_not_use_from_python`. - ``neurondemo -python ...`` - - -Description: - Launches NEURON with Python as the command line interpreter. - File arguments with a .hoc suffix are interpreted using the - Hoc interpreter. File arguments with the .py suffix are interpreted - using the Python interpreter. The -c statement causes python to - execute the statement. - The import statements allow use of the following - - +.. warning:: ----- + Some of the idioms on this page are out of date, but they still work. + See the NEURON Python tutorial for modern idioms. .. note:: @@ -48,7 +23,7 @@ Description: with HOC; for a Python-based introduction to NEURON, see `Scripting NEURON Basics <../../tutorials/scripting-neuron-basics.html>`_ - +.. _python_HocObject_class: .. class:: neuron.hoc.HocObject @@ -680,145 +655,6 @@ Mechanism To iterate over density mechanisms, use: ``for mech in seg: print (mech)`` To get a python list of point processes in a segment: ``pplist = seg.point_processes()`` ----- - -.. _Hoc_accessing_Python: - -HOC accessing Python -~~~~~~~~~~~~~~~~~~~~ - - - Syntax: - ``nrniv [file.py|file.hoc...]`` - - - Description: - The absence of a -python argument causes NEURON to launch with Hoc - as the command line interpreter. Python files (or Hoc files) are run - with the appropriate interpreter before presenting a Hoc user-interface. - From the hoc world any python statement can be executed and anything - in the python world can be assigned or evaluated. - - ----- - - - -.. function:: nrnpython - - - Syntax: - ``nrnpython("any python statement")`` - - - Description: - Executes any python statement. Returns 1 on success; 0 if an exception - was raised or if python support is not available. - - In particular, ``python_available = nrnpython("")`` is 1 (true) if - python support is available and 0 (false) if python support is not - available. - - Example: - - .. code-block:: - python - - nrnpython("import sys") - nrnpython("print(sys.path)") - nrnpython("a = [1,2,3]") - nrnpython("print(a)") - nrnpython("from neuron import h") - nrnpython("h('print PI')") - - - - ----- - - - -.. class:: PythonObject - - - Syntax: - ``p = new PythonObject()`` - - - Description: - Accesses any python object. Almost equivalent to :class:`~neuron.hoc.HocObject` in the - python world but because of some hoc syntax limitations, ie. hoc does not - allow an object to be a callable function, and top level indices have - different semantics, we sometimes need to use a special idiom, ie. the '_' - method. Strings and double numbers move back and forth between Python and - Hoc (but Python integers, etc. become double values in Hoc, and when they - get back to the Python world, they are doubles). - - - .. code-block:: - python - - objref p - p = new PythonObject() - nrnpython("ev = lambda arg : eval(arg)") // interprets the string arg as an - //expression and returns the value - objref tup - print p.ev("3 + 4") // prints 7 - print p.ev("'hello' + 'world'") // prints helloworld - tup = p.ev("('xyz',2,3)") // tup is a PythonObject wrapping a Python tuple - print tup // prints PythonObject[1] - print tup._[2] // the 2th tuple element is 3 - print tup._[0] // the 0th tuple element is xyz - - nrnpython("from neuron import h") // back in the Python world - nrnpython("print h.tup") // prints ('xyz', 2, 3) - - Note that one needs the '_' method, equivalent to 'this', because trying to - get at an element through the built-in python method name via - - .. code-block:: - python - - tup.__getitem__(0) - - gives the error "TypeError: tuple indices must be integers" since - the Hoc 0 argument is a double 0.0 when it gets into Python. - It is difficult to pass an integer to a Python function from the hoc world. - The only time Hoc doubles appear as integers in Python, is when they are - the value of an index. If the index is not an integer, e.g. a string, use - the __getitem__ idiom. - - .. code-block:: - python - - objref p - p = new PythonObject() - nrnpython("ev = lambda arg : eval(arg)") - objref d - d = p.ev("{'one':1, 'two':2, 'three':3}") - print d.__getitem__("two") // prints 2 - - objref dg - dg = d.__getitem__ - print dg._("two") // prints 2 - - - To assign a value to a python variable that exists in a module use - - .. code-block:: - python - - nrnpython("a = 10") - p = new PythonObject() - p.a = 25 - p.a = "hello" - p.a = new Vector(4) - nrnpython("b = []") - p.a = p.b - - - - ---- .. method:: neuron.hoc.execute @@ -846,4 +682,19 @@ HOC accessing Python .. seealso:: :func:`nrnpython` - \ No newline at end of file +.. _hoc_features_you_should_not_use_from_python: +Python-specific documentation of discouraged HOC features +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +This section contains versions of the HOC documentation for certain features +that have been updated to be somewhat Python-specific. +You may find them useful, but in general Python-native versions are to be +preferred. + +.. toctree:: + :maxdepth: 1 + + io/file.rst + io/printf.rst + io/read.rst + io/ropen.rst diff --git a/docs/python/programming/hoc.rst b/docs/python/programming/hoc.rst deleted file mode 100755 index 6738217872..0000000000 --- a/docs/python/programming/hoc.rst +++ /dev/null @@ -1,12 +0,0 @@ -HOC Language ------------- - -.. toctree:: - :maxdepth: 3 - - hocsyntax.rst - ockeywor.rst - io.rst - oop.rst - predec.rst - diff --git a/docs/python/programming/hocsyntax.rst b/docs/python/programming/hocsyntax.rst deleted file mode 100755 index bc8097d796..0000000000 --- a/docs/python/programming/hocsyntax.rst +++ /dev/null @@ -1,417 +0,0 @@ -.. index:: syntax (HOC) - -.. _ocsyntax: - -HOC Syntax ----------- - -HOC was the original programming language supported by NEURON. -NEURON may also be programmed in Python. - - -.. index:: comments - -comments -~~~~~~~~ -Syntax: - ``/*...*/`` - - ``//...`` - - - -Description: - Comments are similar in style to C++. Enclose any text in /*...*/ (but - do not nest them). The rest of a line after // is a comment. - - - - -.. index:: expression - -Expressions -~~~~~~~~~~~ - -Description: - An expression has a double precision value. It usually appears as the right - hand side of an assignment statement. An expression may be a number, - variable, function call, or combination of simpler expressions. - - -Options: - The ways in which expressions can be combined are listed below - in order of precedence. e stands for any expression, v stands for any variable - and operators are - left associative except for assignment operators which are right associative. - - - ``(e)`` - grouping - - ``e^e`` - exponentiation - - ``-e`` - negation - - ``e*e e/e e%e`` - multiplication, division, modulus - - ``e+e e-e`` - addition, subtraction - - ``e==e e!=e ee e>=e`` - logical equal, unequal, less than, less than or equal, greater than, - greater than or equal. These expressions have the numerical - value 1 (true) or 0 (false). The expression is considered true if it is - within :data:`float_epsilon` of being mathematically exact. - - Special logical expressions of the form objref1 == objref2 (and obj != obj) - are also allowed and return 1 (0) if the object references label the same - object. This makes the former comparison idiom using - :func:`object_id` obsolete. Logical expressions of the strdef1 == strdef2 - cannot be directly compared because of parser consistency reasons. However - obj1.string1 == obj2.string2 will return true if the strings are identical - in the sense of :func:`strcmp` . - - - - ``e&&e`` - Logical and. Both expressions - are always evaluated. A subexpression is considered false if it is within - :data:`float_epsilon` of 0 and true otherwise. If the entire expression is true - its value is 1. - - ``e||e`` - Logical or. Both expressions are always evaluated. - A subexpression is considered false if it is within - :data:`float_epsilon` of 0 and true otherwise. If the entire expression is true - its value is 1. - - ``v=e v+=e v-=e v*=e v/=e`` - assignment. others are equivalent to ``v = (v + e)``, - ``v = (v - e)``, - ``v = (v * e)``, - ``v = (v / e)``, respectively. - - - -.. seealso:: - :data:`float_epsilon` - - - - .. index:: statement - -Statements -~~~~~~~~~~ - -Syntax: - ``stmt`` - - ``{stmt}`` - - ``{stmt stmt ...stmt}`` - - - -Description: - A statement is something executable that does not have a value, eg. - for loops, procedure calls, or a compound statement between braces. - An expression may be used anywhere a statement is required. - - -Example: - - .. code-block:: - none - - i = 0 //initialize i - j = 0 //initialize j - if(vec.x[i] <= 10 && i < vec.size()){ //In the parentheses is an expression: - //if the value of the ith element in vec - //is less than or equal to 10, and - //if i is an index within vec - // - //Between the braces is/are statement(s): - vec1.x[j] = vec.x[i] - i = i+1 //increment i by 1 - j = j+1 //increment j by 1 - } else{ - //Here is also a statement - i = i+1 //simply go to the next element of vec - } - - Statements exist between the braces following the ``if`` and ``else`` commands. - The parentheses after the ``if`` command contain an expression. - - - -.. index:: proc - -.. _proc: - - -proc -~~~~ -Syntax: - :samp:`proc {name}() stmt` - - - -Description: - Introduce the definition of a procedure. A procedure does not return a value. - You should always try to distill your programs into small, manageable - procedures and functions. - - -Example: - - .. code-block:: - none - - proc printsquare() {local x - x = $1 - print x*x - } - printsquare(5) - - prints the square of 5. - - Procedures can also be called within other procedures. - The code which produces the interactive examples for the :class:`Random` class contains procedures - for both creating the buttons which allow you to select parameters as well as for creating - the histograms which appear on the screen. - - - -.. index:: func - -.. _func: - -func -~~~~ - - - -Syntax: - :samp:`func {name}() {{stmt1, stmt2, stmt3...}}` - - - -Description: - Introduce the definition of a function. - A function returns a double precision value. - - -Example: - - .. code-block:: - none - - func tan() { - return sin($1)/cos($1) - } - tan(PI/8) - - creates a function ``tan()`` which takes one argument (floating point - or whole number), and contains one - statement. - - - - -.. index:: obfunc - -.. _obfunc: - -obfunc -~~~~~~ - -Syntax: - :samp:`obfunc {name}() {{ statements }}` - - -Description: - Introduce the definition of a function returning an objref - -Example: - - .. code-block:: - none - - obfunc last() { // arg is List - return $o1.object($o1.count - 1) - } - - -.. seealso:: - :ref:`localobj `, :ref:`return ` - - - -.. index:: iterator - -.. _keyword_iterator: - -iterator -~~~~~~~~ - - - -Syntax: - ``iterator name() stmt`` - - - -Description: - Define a looping construct to be used subsequently in looping - over a statement. - - -Example: - - .. code-block:: - none - - iterator case() {local i - for i = 2, numarg() { //must begin at 2 because the first argument is - //in reference to the address - $&1 = $i //what is at the address will be changed - *iterator_statement* //This is where the iterator statement will - //be executed. - } - } - - In this case - - .. code-block:: - none - - x=0 - for case (&x, 1,2,4,7,-25) { - print x //the iterator statement - } - - will print the values 1, 2, 4, 7, -25 - - The body of the ``for name(..) statement`` is executed in the same - context as a normal for statement. The name is executed in the same - context as a normal procedure but should use only variables local to the - iterator. - - - - - -.. index:: arguments - -.. _arguments: - -Arguments -~~~~~~~~~ - -Arguments to functions and procedures are retrieved positionally. -``$1, $2, $3`` refer to the first, second, and third scalar arguments -respectively. - -If "``i``" is declared as a local variable, ``$i`` refers -to the scalar argument in the position given by the value of ``i``. -The value of ``i`` must be in the -range {1...numarg()}. - -The normal idiom is - - ``for i=1, numarg() {print $i}`` - -Scalar arguments use call by value so the variable in the calling -statement cannot be changed. - -If the calling statement has a '&' -prepended to the variable then it is passed by reference and must -be retrieved with the -syntax ``$&1, $&2, ..., $&i``. If the variable passed by reference -is a one dimensional array then ``$&1`` refers to the first (0th) element -and index i is denoted ``$&1[i]``. Warning, NO array bounds checking is -done and the array is treated as being one-dimensional. A scalar or -array reference may be passed to another procedure with -``&$&1``. To save a scalar reference use the :class:`Pointer` class. - -Retrieval of strdef arguments uses the syntax: ``$s1, $s2, ..., $si``. -Retrieval of objref arguments uses the syntax: ``$o1, $o2, ..., $oi``. -Arguments of type :ref:`strdef ` and ``objref`` use call by reference so the calling -value may be changed. - -Example: - - .. code-block:: - none - - func mult(){ - return $1*$2 - } - - defines a function which multiplies two arguments. - Therefore ``mult(4,5)`` will return the value 20. - - .. code-block:: - none - - proc pr(){ - print $s3 - print $1*$2 - print $o4 - } - - defines a procedure which first prints the string defined in - position 3, then prints the product of the two numbers in - positions 1 and 2, and finally prints the pointer reference to an - object in position 4. - - For a string '``s``' which is defined as ``s = "hello"``, and an - objref '``r``', ``pr(3,5,s,r)`` will return - - .. code-block:: - none - - hello - 15 - Graph[0] - - assuming ``r`` refers to the first graph. - -.. seealso:: - :ref:`func`, :ref:`proc`, :ref:`objref`, :ref:`strdef `, :class:`Pointer`, :func:`numarg`, :func:`argtype` - ----- - -.. function:: numarg - - Syntax: - ``n = numarg()`` - - Description: - Number of arguments passed to a user written hoc function. - - .. seealso:: - :ref:`arguments`, :func:`argtype` - - ----- - -.. function:: argtype - - Syntax: - ``itype = argtype(iarg)`` - - Description: - The type of the ith arg. The return value is 0 for numbers, 1 for objref, - 2 for strdef, 3 for pointers to numbers, and -1 if the arg does not exist. - - .. seealso:: - :ref:`arguments`, :func:`numarg` - - - - - diff --git a/docs/python/programming/io.rst b/docs/python/programming/io.rst deleted file mode 100755 index acc4f1aca1..0000000000 --- a/docs/python/programming/io.rst +++ /dev/null @@ -1,11 +0,0 @@ -Input and Output ----------------- - -.. toctree:: - :maxdepth: 3 - - io/printf.rst - io/file.rst - io/read.rst - io/ropen.rst - io/plotters.rst diff --git a/docs/python/programming/io/plotters.rst b/docs/python/programming/io/plotters.rst deleted file mode 100755 index 4a4c94d10e..0000000000 --- a/docs/python/programming/io/plotters.rst +++ /dev/null @@ -1,101 +0,0 @@ -Plotter Control (obsolete) --------------------------- - - -.. function:: lw - - - - - Name: - lw - laser writer graphical output (or HP pen plotter) - - - - Syntax: - ``h.lw(file)`` - - ``h.lw(file, device)`` - - ``h.lw()`` - - - - - Description: - ``h.lw(file, device)`` opens a file to keep a copy of subsequent - plots (*file* is a string variable or a name enclosed in double - quotes). All graphs which are generated on the screen are saved in - this file in a format given by the integer value of the *device* argument. - - - - *device* =1 - Hewlett Packard pen plotter style. - - *device* =2 - Fig style (Fig is a public domain graphics program available - on the SUN computer). The filter ``f2ps`` translates fig to postscript. - - *device* =3 - Codraw style. Files in this style can be read into the - PC program, ``CODRAW``. The file should be opened with the extension, - ``.DRA``. - - - Lw keeps copying every plot to the screen until the file is closed with - the command, ``h.lw()``. Note that erasing the screen with ``h.plt(-3)`` or - a :kbd:`Control-e` will throw away whatever is in the file and restart the file at the - beginning. Therefore, ``lw`` keeps an accurate representation of the - current graphic status of the screen. - - After setting the device once, it remains the same unless changed again - by another call with two arguments. The default device is 2. - - - - Example: - Suppose an HP plotter is connected to serial port, ``COM1:``. Then - the following procedure will plot whatever graphics information - happens to be on the screen (not normal text). - - - .. code-block:: - none - - from neuron import h, gui - import os - - # function for hp style plotter - def hp(): - h.plt(-1) - h.lw() - os.system("cp temp com1:") - h.lw("temp") - - h.lw("temp", 1) - - - - Notice that the above procedure closes a file, prints it, and then - re-opens :file:`temp`. The initial direct command makes sure the - file is open the first time hp is called. - - - - .. warning:: - It is often necessary to end all the plotting with a ``h.plt(-1)`` - command before closing the file to ensure that the last line drawing - is properly terminated. - - In our hands the the HP plotter works well at 9600 BAUD and - with the line ``\verb+MODE COM1:9600,,,,P+`` in the autoexec.bat file. - - - - .. seealso:: - :func:`plot`, :func:`graph`, :func:`plt` - - - - diff --git a/docs/python/programming/math/constants.rst b/docs/python/programming/math/constants.rst index c73229ece0..f6849671b8 100755 --- a/docs/python/programming/math/constants.rst +++ b/docs/python/programming/math/constants.rst @@ -15,16 +15,13 @@ The following mathematical and physical constants are available through the ``h` h.PHI 1.61803398874989484820 (golden ratio) - h.FARADAY 96484.56 (coulombs/mole) (legacy value) h.FARADAY 96485.3321233100141 (modern value. derived from mole and electron charge) - h.R 8.31441 (molar gas constant, joules/mole/deg-K) (legacy value) h.R 8.3144626181532395 (modern value. derived from boltzmann constant and mole) h.Avogadro_constant 6.02214076e23 (codata2018 value, introduced version 8.0) As of Version 8.0 (circa October, 2020) modern units are the default. -See :func:`nrnunit_use_legacy` .. warning:: Constants are not treated specially by the interpreter and @@ -37,10 +34,4 @@ See :func:`nrnunit_use_legacy` If assignment takes place due to execution of a hoc interpreter statement, the warning occurs only once but cannot be avoided. - - The legacy FARADAY is a bit different than the legacy faraday of the units database. - The legacy faraday in a :file:`.mod` mechanism is 96520. - - - diff --git a/docs/python/programming/math/matrix.rst b/docs/python/programming/math/matrix.rst index b75c777c04..f494cb50b6 100755 --- a/docs/python/programming/math/matrix.rst +++ b/docs/python/programming/math/matrix.rst @@ -53,20 +53,17 @@ Matrix By default, a new Matrix is of type MFULL (= 1) and allocates storage for all nrow*ncol elements. Scaffolding is in place for matrices of storage type MSPARSE (=2) and MBAND (=3) but not many methods have been interfaced - to the meschach library at this time. If a method is called on a matrix type + to the eigen library at this time. If a method is called on a matrix type whose method has not been implemented, an error message will be printed. It is intended that implemented methods will be transparent to the user, eg m*x=b (``x = m.solv(b)`` ) will solve the linear system regardless of the type of m and v1 = m*v2 (``v1 = m.mulv(v2)`` ) will perform the vector multiplication. - Matrix is implemented using the - `meschach c library by David E. Stewart `_ - (discovered at http://www.netlib.org/c/index.html\ ) which contains a large collection - of routines for sparse, banded, and full matrices. Many of the useful - routines have not - been interfaced with the hoc interpreter but can be easily added on request - or you can add it yourself + Matrix is implemented using the `eigen3 library `_ + which contains a large collection of routines for sparse, banded, and full matrices. + Many of the useful routines have not been interfaced with the hoc + interpreter but can be easily added on request or you can add it yourself by analogy with the code in ``nrn/src/ivoc/(matrix.c ocmatrix.[ch])`` At this time the MFULL matrix type is complete enough to do useful work and MSPARSE can be used to multiply a matrix by a vector and solve @@ -500,10 +497,6 @@ Matrix - .. warning:: - Implemented only for full and sparse matrices. - - ---- @@ -660,9 +653,6 @@ Matrix print() m.solv(b,1).printf("%8.3f", 475, 535) - .. warning:: - Implemented only for full and sparse matrices. - ---- @@ -790,9 +780,6 @@ Matrix m.printf() - .. warning:: - Implemented only for full and sparse matrices. - ---- @@ -813,9 +800,6 @@ Matrix Otherwise fill the matrix row with a constant. - .. warning:: - Implemented only for full matrices and sparse. - ---- @@ -836,9 +820,6 @@ Matrix Otherwise fill the matrix column with a constant. - .. warning:: - Implemented only for full matrices. - ---- @@ -883,9 +864,6 @@ Matrix m.printf() - .. warning:: - Implemented only for full and sparse matrices. - ---- @@ -901,9 +879,6 @@ Matrix Description: Fills the matrix with 0. - .. warning:: - Implemented only for full matrices. - ---- @@ -929,10 +904,6 @@ Matrix m.printf() - .. warning:: - Implemented only for full matrices. - - ---- diff --git a/docs/python/programming/math/random.rst b/docs/python/programming/math/random.rst index dfaea09a04..210b43500d 100755 --- a/docs/python/programming/math/random.rst +++ b/docs/python/programming/math/random.rst @@ -768,3 +768,92 @@ Random Class +---- + +NMODLRandom Class +================= + +.. class:: NMODLRandom + + Syntax: + ``r = point_process.ranvar`` + + ``r = section(x).mech.ranvar`` + + ``r = section(x).ranvar_mech`` + + + Description: + Returns an NMODLRandom wrapper for the nrnran123_State associated with the mechanism + :ref:`RANDOM ranvar ` variable. + Note that an attempt to assign a value to ranvar will raise an error. + At present, all mentions of ranvar in the context of a specific mechanism instance return a wrapper for + the same nrnran123_State (though the NMODLRandom instances are different). + +---- + +.. method:: NMODLRandom.get_ids + + Syntax: + ``vector = r.get_ids()`` + + Description: + Returns a HOC Vector of size 3 containing the 32 bit id1, id2, id3 of the nrnran123_State + +---- + +.. method:: NMODLRandom.set_ids + + Syntax: + ``r = r.set_ids(id1, id2, id3)`` + + Description: + Sets the 32 bit id1, id2, id3 of the nrnran123_State and returns the same NModlRandom instance. + + +---- + +.. method:: NMODLRandom.get_seq + + Syntax: + ``x = r.get_seq()`` + + Description: + Returns as a float, the 34 bit sequence position of the nrnran123_State + +---- + +.. method:: NMODLRandom.set_seq + + Syntax: + ``r = r.set_seq(x)`` + + Description: + Sets the 34 bit sequence position of the nrnran123_State. Returns the same NMODLRandom instance. + +---- + +.. method:: NMODLRandom.uniform + + Syntax: + ``x = r.uniform()`` + + Description: + Returns as a float, the uniform random value in the open interval 0 to 1 at the current sequence + position of the nrnran123_State (the current sequence position is then incremented by 1) + This is, for testing purposes, the only distribution exposed to + the interpreter. We don't forsee any practical use of + NMODLRandom within the interpreter in regard to sampling. The purpose + of NMODLRandom is to allow setting of stream properties for a + mod file RANDOM variable. Indeed, if one explicitly constructs an NMODLRandom + from the interpreter, then + + .. code-block:: + python + + from neuron import h + r = h.NMODLRandom() + print(r.uniform()) + + NEURON: NMODLRandom wrapped handle is not valid + diff --git a/docs/python/programming/math/vector.rst b/docs/python/programming/math/vector.rst index eacaf2f2d7..a8d3c9f649 100755 --- a/docs/python/programming/math/vector.rst +++ b/docs/python/programming/math/vector.rst @@ -48,8 +48,23 @@ Vector The :samp:`{objref}[{index}]` notation can be used to read and set Vector elements (setting requires NEURON 7.7+). An older syntax :samp:`{objref}.x[{index}]` works on all Python-supporting versions of NEURON. - Vector slices are not directly supported but are replicated with the functionality - of Vector.c() (see below). + + Beginning with NEURON 9.0 Vectors support slicing; e.g.: + + .. code-block:: + python + + vec = h.Vector([0, 1, 2, 3, 4, 5, 6, 7, 8]) + new_vec = v[2:6] + + will assign new_vec as a vector containing the values [2, 3, 4, 5] + + .. code-block:: + python + + vec[5:7] = [1, 2] + + will update the values at indices 5,6 resulting in ``vec = [0, 1, 2, 3, 4, 1, 2, 7, 8]`` A vector can be created with length *size* and with each element set to the value of *init* or can be created using a Python iterable. diff --git a/docs/python/programming/neuron_classes.rst b/docs/python/programming/neuron_classes.rst new file mode 100644 index 0000000000..1a5db25d3e --- /dev/null +++ b/docs/python/programming/neuron_classes.rst @@ -0,0 +1,87 @@ +NEURON Python Classes and Objects +================================= + +NEURON exposes its internal objects and hoc templates as Python objects via an automatic +conversion layer, effectively making all entities from the HOC stack available to a Python program. + +There are basically two main objects which expose most Neuron entities. The first is `hoc` which +exposes a number of internal established classes and functions. + +.. code-block:: + python + + >>> from neuron import hoc + >>> hoc. + hoc.List( + hoc.SectionList( + hoc.SectionRef( + hoc.Vector( + ... + +However, for *dynamic* entities NEURON provides the `h` gateway object. It gives access to internal +classes (templates) and objects, even if they were just created. E.g.: + +.. code-block:: + python + + >>> from neuron import h + >>> # Create objects in the hoc stack + >>> h("objref vec") + >>> h("vec = new Vector(5, 1)") + >>> # Access to objects + >>> h.vec.as_numpy() + array([1., 1., 1., 1., 1.]) + >>> + >>> # Access to exposed types + >>> vv = h.Vector(5, 2) + >>> vv.as_numpy() + array([1., 1., 1., 1., 1.]) + +This is particularly useful as NEURON can dynamically load libraries with more functions and classes. + +Class Hierarchy +--------------- + +All NEURON's internal interpreter objects are instances of a global top-level type: `HocObject`. +Until very recently they were considered direct instances, without any intermediate hierarchy. + +With #1858 Hoc classes are now associated with actual Python types, created dynamically. Such +change enables type instances to be properly recognized as such, respecting e.g. `isinstance()` +predicates and subclassing. + +.. code-block:: + python + + >>> isinstance(hoc.Vector, type) + True + >>> v = h.Vector() + >>> isinstance(v, hoc.HocObject) + True + >>> isinstance(v, hoc.Vector) + True + >>> type(v) is hoc.Vector # direct subclass + True + >>>isinstance(v, hoc.Deck) # Not instance of other class + False + +Subclasses are also recognized properly. For creating them please inherit from `HocBaseObject` +with `hoc_type` given as argument. E.g.: + +.. code-block:: + python + + >>> class MyStim(neuron.HocBaseObject, hoc_type=h.NetStim): + pass + >>> issubclass(MyStim, hoc.HocObject) + True + >>> issubclass(MyStim, neuron.HocBaseObject) + True + >>> MyStim._hoc_type == h.NetStim + True + >>> stim = MyStim() + >>> isinstance(stim, MyStim) + True + >>> isinstance(stim, h.NetStim) + True + >>> isinstance(stim, h.HocObject) + True diff --git a/docs/python/programming/neuronpython.rst b/docs/python/programming/neuronpython.rst deleted file mode 100755 index b207a3e5f3..0000000000 --- a/docs/python/programming/neuronpython.rst +++ /dev/null @@ -1,10 +0,0 @@ -.. automodule:: neuron - :members: - -.. seealso:: - - .. toctree:: - :maxdepth: 2 - - ../modelspec/programmatic/rxd.rst - diff --git a/docs/python/programming/ockeywor.rst b/docs/python/programming/ockeywor.rst deleted file mode 100755 index c5183ed421..0000000000 --- a/docs/python/programming/ockeywor.rst +++ /dev/null @@ -1,920 +0,0 @@ -.. _ockeywor: - -HOC Keywords ------------- - -.. note:: - - This is a page about HOC syntax; it is not directly applicable to Python-based simulations. - A HOC-based version of the NEURON documentation is also available. - -.. index:: help (keyword) - -.. _keyword_help: - -**help** - - - invokes the help system - - - Syntax: - ``help`` - - ``help word`` - - - - Description: - ``Help word`` sends a word to the help system. - The word is looked up in the :file:`nrn/lib/helpdict` file and if found - Netscape is sent the appropriate URL to display - the help text. If the word is not found, the URL for the table - of contents is sent. Netscape must be running for the help system - to work. - - - ----- - - - -.. index:: return (keyword) - -.. _keyword_return: - -**return** - - - Syntax: - ``return`` - - ``return expr`` - - ``return objref`` - - - - Description: - The ``return`` command will immediately exit from a procedure - without returning a value. - - The ``return expr`` command will immediately exit from a function - which must return a value. This command must also be the last executable - statement of a function. It is possible for a function to contain more - than one ``return`` command, for instance in a series of ``if else`` - statements; however, not more than one of the ``return`` commands may - return a value at any given time. - - The ``return objref`` command must be used to return from an - :ref:`obfunc`. - - - Example: - - .. code-block:: - none - - func max(){ - if ($1 > $2){ - return $1 - } else { - return $2 - } - } - - returns the maximum of two arguments which are read into the function. Eg. ``max(3,6)``, where $1 is the - first argument (3) and $2 is the second argument (6). This use of ``max`` would return the value 6. - - .. warning:: - See restriction of the :ref:`break ` statement. - - - ----- - - - -.. index:: break (keyword) - -.. _keyword_break: - -**break** - - - Syntax: - ``break`` - - - - Description: - Immediately exit from a loop. Control transfers to the next statement after - the loop statement. - - - .. warning:: - This statement, as well as "return", "continue", and "stop" - cannot occur within the scope of a statement that - modifies the section stack such as - - section { statement } - - or the stack will not be properly popped. Also it should not be placed on - a line that contains object syntax but should be placed on a line by - itself. eg. - - .. code-block:: - none - - x.p() break - - should be written - - .. code-block:: - none - - x.p() - break - - - Example: - - .. code-block:: - none - - while(1) { - x = fscan() - if (x < 0) { - break; - } - print sqrt(x) - } - - - - - ----- - - - -.. index:: continue (keyword) - -.. _keyword_continue: - -**continue** - - - Syntax: - ``continue`` - - - - Description: - Inside a compound statement of a loop, transfers control to the next iteration of the - loop statement. - - - Example: - - .. code-block:: - none - - for i=1,10{ - if(i==6){ - continue - } - print i - } - - prints the numbers: 1,2,3,4,5,7,8,9,10. 6 is left out because when i==6, the control is passed - beyond the print statement to the next iteration of the loop. - - You can accomplish the same thing with the following syntax: - - .. code-block:: - none - - for i=1,10{ - if(i<6 || i>6){ - print i - } - } - - - - .. warning:: - See restriction of the :ref:`break ` statement. - - - ----- - - - -.. index:: stop (keyword) - -.. _keyword_stop: - -**stop** - - - Syntax: - ``stop`` - - - - Description: - Return control to the command level of the interpreter. This is a useful safety device - for stopping the current execution - of your program. Eg. you may wish to stop the program and print out an error message - that lets you know if you have entered unacceptable arguments. - - .. warning:: - See restriction of the :ref:`break ` statement. - - - ----- - - - -.. index:: if (keyword) - -.. _keyword_if: - -**if** - - - Syntax: - ``if (expr) stmt1`` - - ``if (expr) stmt1 else stmt2`` - - - Description: - Conditional statement. When the *expr* evaluates to a nonzero number - (true) stmt1 is executed. With the ``else`` form, if the expression - evaluates to zero (false) stm2 is executed. - - - Example: - - .. code-block:: - none - - i = 0 //initialize i - j = 0 //initialize j - if(vec.x[i] <= 10 && i < vec.size()){ //if the value of the ith element in vec - //is less than or equal to 10, and - //if i is an index within vec - vec1.x[j] = vec.x[i] //set the jth element of vec1 equal to that - //ith element of vec - i = i+1 //increment i by 1 - j = j+1 //increment j by 1 - } else{ //otherwise (This must be on the same line as the closing brace of - //the previous statement in order to indicate that the compound - //statement has not ended.) - i = i+1 //simply go to the next element of vec - } - - - - .. seealso:: - :data:`float_epsilon`, :ref:`ifsec ` - - - ----- - - - -.. index:: else (keyword) - -.. _keyword_else: - -**else** - - - .. seealso:: - :ref:`if ` - - ----- - - - -.. index:: while (keyword) - -.. _keyword_while: - -**while** - - - Syntax: - ``while (expr) stmt`` - - - - Description: - Iteration statement. Repeatedly execute the statement as long as the - *expr* evaluates to true. - - - Example: - - .. code-block:: - none - - numelements = 20 - i = 0 - while (i < numelements){ - print(cos(vec.x[i])) - print(sin(vec.x[i])) - i += 1 - } - - prints the cosines and the sines of the ``vec`` elements up to ``numelements``, which in this case = 20. - - - .. seealso:: - :ref:`for `, :ref:`break `, :ref:`continue ` - - - - ----- - - - -.. index:: for (keyword) - -.. _keyword_for: - -**for** - - - Syntax: - ``for(stmt1; expr2; stmt3) stmt`` - - ``for var=expr1, expr2 stmt`` - - ``for (var) stmt`` - - ``for (var, expr) stmt`` - - ``for iterator (args) stmt`` - - - - Description: - Iteration statement. The ``for`` statement is similar to ``while`` in that it iterates over - a statement. However, the ``for`` statement is more compact and contains within its parentheses - the command to advance to the next iteration. Statements 1 and 3 may be - empty. - - This command also has a short form which always increments the iterations by one. - - .. code-block:: - none - - for *var*=*expr1*, *expr2* stmt - - is equivalent to - - .. code-block:: - none - - for(*var*=*expr1*; *var* <= *expr2*; *var*=*var*+1) stmt - - However, *expr1* and *expr2* are evaluated only once at the - beginning of the ``for``. - - ``for (var) stmt`` - - Loops over all segments of the currently accessed section. *var* begins - at 0 and ends at 1. In between *var* is set to the center position of - each segment. Ie. stmt is executed nseg+2 times. - - ``for (var, expr) stmt`` - - If the expression evaluates to a non-zero value, it is exactly equivalent - to - ``for (var) stmt`` - If it evaluates to 0 (within :data:`float_epsilon` ) then the iteration does - not include the 0 or 1 points. Thus ``for(x, 0) { print x }`` - is exactly equivalent to ``for (x) if (x > 0 && x < 1) { print x }`` - - The :ref:`keyword_iterator` form of the for loop executes the statement with a looping - construct defined by the user. - - Example: - - .. code-block:: - none - - for(i=0; i<=9; i=i+1){ - print i*2 - } - - is equivalent to - - .. code-block:: - none - - for i=0, 9 { - print i*2 - } - - - .. code-block:: - none - - create axon - access axon - {nseg = 5 L=1000 diam=50 insert hh } - for (x) print x, L*x - for (x) if (x > 0 && x < 1) { print x, gnabar_hh(x) } - - - .. seealso:: - :ref:`keyword_iterator`, - :ref:`break `, :ref:`continue `, :ref:`while `, :ref:`forall `, :ref:`forsec ` - - - - ----- - - - -.. index:: print (keyword) - -.. _keyword_print: - -**print** - - - Syntax: - ``print expr, string, ...`` - - - - Description: - Any number of expressions and/or strings may be printed. A newline is - printed at the end. - - - Example: - - .. code-block:: - none - - x=2 - y=3 - print x, "hello", "good-bye", y, 7 - - prints - - .. code-block:: - none - - x hello good-bye 3 7 - - and then moves to the next line. - - - - ----- - - - -.. index:: delete (keyword) - -.. _keyword_delete: - -**delete** - - - Syntax: - ``delete varname`` - - - - Description: - Deletes the variable name from the global namespace. Allows the - varname to be declared as another type. It is up to the user to make - sure it is safe to execute this statement since the variable may be used - in an existing function. - - - - ----- - - - -.. index:: read (keyword) - -.. _keyword_read: - -**read** - - - Syntax: - ``read(var)`` - - - - Description: - *var* is assigned the number input by the user, or the next number in the - standard input, or the file opened with ropen. ``read(var)`` - returns 0 on - end of file and 1 otherwise. - - - Example: - - .. code-block:: - none - - for i=1, 5 { - read(x) - print x*x - } - - will await input from the user or from a file, and will print the square of each value typed in - by the user, or read from the file, for the first five values. - - - .. seealso:: - :func:`xred`, :meth:`File.ropen`, :func:`fscan`, :func:`File`, :func:`getstr` - - - - ----- - - - -.. index:: debug (keyword) - -.. _keyword_debug: - -**debug** - - A toggle for parser debugging purposes. Prints the stack machine commands - resulting from parsing a statement. Not useful to the user. - - ----- - - - -.. index:: double (keyword) - -.. _keyword_double: - -**double** - - - Syntax: - ``double var1[expr]`` - - ``double var2[expr1][expr2]`` - - ``double varn[expr1][expr2]...[exprn]`` - - - - Description: - Declares a one-dimensional, a two-dimensional or an n-dimensional array of doubles. - This is reminiscent of the command which creates an array in C, however, HOC does not demand - that you specify whether or not numbers are integers. All numbers in all arrays will be - doubles. - - The index for each dimension ranges from 0 to expr-1. Arrays may be - redeclared at any time, including within procedures. Thus arrays may - have different lengths in different objects. - - The :class:`Vector` class for the ivoc interpreter provides convenient and powerful methods for - manipulating arrays. - - - Example: - - .. code-block:: - none - - double vec[40] - - declares an array with 40 elements, whereas - - .. code-block:: - none - - objref vec - vec = new Vector(40) - - creates a vector (which is an array by a different name) with 40 elements which you can - manipulate using the commands of the Vector class. - - - - ----- - - - -.. index:: depvar (keyword) - -.. _keyword_depvar: - -**depvar** - - - Syntax: - ``depvar`` - - - - Description: - Declare a variable to be a dependent variable for the purpose of - solving simultaneous equations. - - - Example: - - .. code-block:: - none - - depvar x, y, z - proc equations() { - eqn x:: x + 2*y + z = 6 - eqn y:: x - y + z = 2 - eqn z:: 2*x + y -z = -3 - } - equations() - solve() - print x,y,z - - prints the values of x, y and z. - - - .. seealso:: - :ref:`eqn `, :func:`eqinit`, :func:`solve`, :func:`Matrix` - - - - ----- - - - -.. index:: eqn (keyword) - -.. _keyword_eqn: - -**eqn** - - - Syntax: - ``eqn var:: expr = expr`` - - ``eqn var: expr =`` - - ``eqn var: = expr`` - - - Description: - Introduce a simultaneous equation. - The single colon forms add the expressions to the indicated sides. This is convenient for breaking - long equations down into more manageable parts which can be added together. - - - Example: - - .. code-block:: - none - - eqinit() - depvar x, y, z - proc equations() { - eqn x:: x + 2*y + z = 6 - eqn y:: x - y + z = 2 - eqn z:: 2*x + y -z = -3 - eqn z: = 5 + 4y - } - equations() - solve() - print x,y,z - - makes the right hand side of the z equation "2 + 4y" and solves for the values x, y, and z. - - - - ----- - - - -.. index:: local (keyword) - -.. _keyword_local: - -**local** - - - Syntax: - ``local var`` - - - - Description: - Declare a list of local variables within a procedure or function - Must be the first statement on the same line as the function declaration. - - - Example: - - .. code-block:: - none - - func count() {local i, x - x = 0 - for i=0,40 { - if (vec.x[i] == 7) { - x = x+1 - } - } - return x - } - - returns the number of elements which have the value of 7 in the first 40 elements of ``vec``. ``i`` - and ``x`` are local variables, and their usage here will not affect variables of the same name in - other functions and procedures of the same program. - - ----- - - - -.. index:: localobj (keyword) - -.. _keyword_localobj: - -**localobj** - - - Syntax: - ``localobj var`` - - - Description: - Declare a list, comma separated, of local objrefs within a proc, func, iterator, or obfunc. - Must be after the :ref:`local ` statement (if that exists) - on the same line as the function declaration - - Example: - - .. code-block:: - none - - func sum() { local i, j localobj tobj // sum from $1 to $2 - i = $1 j = $2 - tobj = new Vector() - tobj.indgen(i, j ,1) - return tobj.sum - } - sum(5, 10) == 45 - - - - ----- - - - -.. index:: strdef (keyword) - -.. _keyword_strdef: - -**strdef** - - - Syntax: - ``strdef stringname`` - - - - Description: - Declare a comma separated list of string variables. String - variables cannot be arrays. - - Strings can be passed as arguments to functions. - - - Example: - - .. code-block:: - none - - strdef a, b, c - a = "Hello, " - b = "how are you?" - c = "What is your name?" - print a, b - print c - - will print to the screen: - - .. code-block:: - none - - Hello, how are you? - What is your name? - - - - - ----- - - - -.. index:: setpointer (keyword) - -.. _keyword_setpointer: - -**setpointer** - - - Syntax: - ``setpointer pvar, var`` - - - - Description: - Connects pointer variables in membrane mechanisms to the address of var. - eg. If :file:`$NEURONHOME/examples/nmodl/synpre.mod` is linked into NEURON, then: - - .. code-block:: - none - - soma1 syn1=new synp(.5) - setpointer syn1.vpre, axon2.v(1) - - would enable the synapse in soma1 to observe the axon2 membrane potential. - - - ----- - - - -.. index:: insert (keyword) - -.. _keyword_insert: - -**insert** - - - Syntax: - ``insert mechanism`` - - - - Description: - Insert the density mechanism in the currently accessed section. - Not used for point processes--they are inserted with a different syntax. - - - .. seealso:: - :ref:`hh `, :ref:`pas `, :ref:`fastpas `, :func:`psection`, :ref:`mech` - - - - ----- - - - -.. index:: uninsert (keyword) - -.. _keyword_uninsert: - -**uninsert** - - - Syntax: - ``uninsert mechanism`` - - - - Description: - Delete the indicated mechanism from the currently accessed section. Not for - point processes. - - - diff --git a/docs/python/programming/oop.rst b/docs/python/programming/oop.rst deleted file mode 100755 index 90b4b669f3..0000000000 --- a/docs/python/programming/oop.rst +++ /dev/null @@ -1,334 +0,0 @@ -.. _oop: - -Object Oriented Programming in HOC ----------------------------------- -See `Object Oriented Programming `_ -in the reference manual. - -.. note:: - - Classes defined in HOC may be accessed in Python via ``h.ClassName``. - -.. index:: begintemplate (keyword) - -.. _begintemplate: - -begintemplate -~~~~~~~~~~~~~ - - - Syntax: - ``begintemplate`` - - - - Description: - Declare a new class or data structure. Any HOC code may appear between the - ``begintemplate`` and ``endtemplate`` declarations. Classes are instantiated with - the new statement. - - - Example: - - .. code-block:: - none - - begintemplate String - public s - strdef s - proc init() { - if (numarg()) { - s = $s1 - } - } - endtemplate String - objref s - s = new String("Hello") - print s.s - - will print "Hello" to the screen. - - -.. index:: endtemplate (keyword) - -.. _endtemplate: - -endtemplate -~~~~~~~~~~~ - - Syntax: - ``endtemplate`` - - - Description: - Closes the class declaration - - .. seealso:: - :ref:`begintemplate` - - -.. index:: objectvar (keyword) - -.. _objectvar: - -objectvar -~~~~~~~~~ - - Syntax: - ``objectvar`` - - - Description: - Synonym for :ref:`objref`. - - - -.. index:: objref (keyword) - -.. _objref: - -objref -~~~~~~ - - Syntax: - ``objref`` - - - - Description: - A comma separated list declarations of object variables. Object - variables are labels (pointers, references) to the actual objects. Thus ``o1 = o2`` - merely states that o1 and o2 are labels for the same object. Objects are - created with the ``new`` statement. When there are no labels for an object - the object is deleted. The keywords ``objectvar`` and ``objref`` are synonyms. - - An object has a unique name that can be determined with the ``print obj`` statement - and consists of the template name followed by an index number in brackets. - This name can be used in place of an objref. - - - Example: - - .. code-block:: - none - - objref vec, g - vec = new Vector(20) - g = new Graph() - - creates a vector object and a graph object with pointers named vec and g, respectively. - - - .. seealso:: - :ref:`new`, :ref:`begintemplate`, :class:`List`, :ref:`mech`, :class:`SectionList` - - -.. index:: public (keyword) - -.. _keyword_public: - -public -~~~~~~ - - Syntax: - ``public`` - - - - Description: - A comma separated list of all the names in a class that are available - outside the class. - - - .. seealso:: - :ref:`begintemplate` - - - -.. index:: external (keyword) - -.. _external: - -external -~~~~~~~~ - Syntax: - ``external`` - - - - Description: - A comma separated list of functions, procedures, iterators, objects, - strings, or variables defined at the top - level that can be executed within this class. This statement is - optional but if it exists must follow the begintemplate or public line. - This allows an object to get information from the outside and can - be used as information shared by all instances. External iterators - can only use local variables and arguments. - - Example: - - .. code-block:: - none - - global_ra = 100 - func ra_value() {return global_ra} - begintemplate Cell - external ra_value - create axon - proc init() { - forall Ra = ra_value() /* just the axon */ - } - endtemplate Cell - - - :func:`execute1` can be used to obtain external information as well. - - -.. index:: new (keyword) - -.. _new: - -new -~~~ - - Syntax: - ``objectvariable = new Object(args)`` - - - - Description: - Creates a new object/instance of type/class Object and makes - objectvariable label/point to it. - When the object no longer is pointed to, it no longer exists. - - - Example: - - .. code-block:: - none - - objref vec - vec = new Vector(30) - - creates a vector of size 30 with its pointer named ``vec``. - - - - ----- - - - -.. function:: init - - - Syntax: - ``proc init() { ... }`` - - - Description: - If an init procedure is defined in a template, then it is called whenever - an instance of the template is created. - - .. seealso:: - :ref:`new` - - - ----- - - - -.. function:: unref - - - Syntax: - ``proc unref() { print this, " refcount=", $1 }`` - - - Description: - If an unref procedure is defined in a template, then it is called whenever - the reference count of an object of that type is decremented. The reference - count is passed as the argument. When the count is 0, the object will be - destroyed on return from unref. This is useful in properly managing - objects which mutually reference each other. Note that unref may be - called recursively. - - - - ----- - - - -.. index:: NULLobject - -.. _nil: - -NULLobject -~~~~~~~~~~ - - Syntax: - ``objref nil`` - - - Description: - When an object variable is first declared, it refers to NULLobject - until it has been associated with an instance of some object class - by a :ref:`new` statement. - A NULLobject object variable can - be useful as an argument to certain class methods. - - Example: - - .. code-block:: - none - - objref nil - print nil // prints NULLobject - - - - ----- - - - -.. data:: this - - - Syntax: - ``objref this`` - - - Description: - Declared inside a template - (see :ref:`begintemplate`). - Allows the object to call a procedure - with itself as one of the arguments. - - Example: - - .. code-block:: - none - - begintemplate Demothis - public printname - objref this - - proc init() { - printname() - } - - proc printname() { - print "I am ", this - } - endtemplate Demothis - - objref foo[3] - print "at creation" - for i=0,2 foo[i]=new Demothis() - print "check existing" - for i=0,2 foo[i].printname() - - diff --git a/docs/python/programming/pointers/ptrvector.rst b/docs/python/programming/pointers/ptrvector.rst index 62e5e35f29..29b06cd189 100755 --- a/docs/python/programming/pointers/ptrvector.rst +++ b/docs/python/programming/pointers/ptrvector.rst @@ -16,8 +16,8 @@ PtrVector all pointers point to an internal dummy variable. So it is possible to scatter from a larger Vector into a smaller Vector. - If :meth:`CVode.cache_efficient` is used, a callback should be registered - with the :meth:`PtrVector.ptr_update_callback` method in order to prevent + A callback should be registered with the + :meth:`PtrVector.ptr_update_callback` method in order to prevent memory segfaults when internal memory is reallocated. Example: @@ -128,10 +128,10 @@ PtrVector Description: The statement or pythoncallback is executed whenever range variables - are re-allocated in order to establish cache efficiency. - (see :meth:`CVode.cache_efficient`) Within the callback, the - :meth:`PtrVector.resize` method may be called but the PtrVector should - not be destroyed. The return value is 0. + are re-allocated. + Within the callback, the :meth:`PtrVector.resize` method may be called but + the PtrVector should not be destroyed. + The return value is 0. ---- diff --git a/docs/python/programming/predec.rst b/docs/python/programming/predec.rst deleted file mode 100755 index 683c05a093..0000000000 --- a/docs/python/programming/predec.rst +++ /dev/null @@ -1,77 +0,0 @@ -.. _predec: - -.. _predeclared-variables: - -Predeclared Variables ---------------------- - - - - - -.. data:: hoc_ac_ - - Syntax: - ``h.hoc_ac_`` - - A variable used by the graphical interface to communicate with the - interpreter. It is very volatile. It sometimes holds a value on a - function call. If this value is needed by the user it should be - copied to another variable prior to any other function call. - ----- - - - -.. data:: hoc_obj_ - - - Syntax: - ``h.hoc_obj_[0]`` - - ``h.hoc_obj_[1]`` - - - Description: - When a line on a :class:`Graph` is picked with the :ref:`gui_pickvector` tool - two new :class:`Vector`\ 's are created containing the y and x coordinates of the - line. The y vector is referenced by hoc_obj_[0] and the x vector is - referenced by hoc_obj_[1]. - - ----- - - - -.. data:: hoc_cross_x_ - - - Syntax: - ``h.hoc_cross_x_`` - - - Description: - X coordinate value of the last :ref:`graph_crosshair` manipulation. - - .. seealso:: - :ref:`graph_crosshair` - - ----- - - - -.. data:: hoc_cross_y_ - - Syntax: - ``h.hoc_cross_y_`` - - Description: - Y coordinate value of the last :ref:`graph_crosshair` manipulation. - - .. seealso:: - :ref:`graph_crosshair` - - - - diff --git a/docs/python/simctrl/bbsavestate.rst b/docs/python/simctrl/bbsavestate.rst index 8d4b627f82..cb22f24995 100644 --- a/docs/python/simctrl/bbsavestate.rst +++ b/docs/python/simctrl/bbsavestate.rst @@ -30,18 +30,24 @@ BBSaveState h.stdinit() bbss = h.BBSaveState() if restore: - bbss.restore_test() + bbss.restore("temp.dat") print(f'after restore t={h.t}') else: pc.psolve(tstop/2) - bbss.save_test() + bbss.save("temp.dat") pc.psolve(tstop) - Note that files are saved in a subdirectory called "out" and restored - from a subdirectory called "in". An empty "out" folder should be created by - the user prior to calling save_test(). A script filter + In this case, the entire model state for this MPI rank is in the filename for save and restore. + + If multisplit is involved, or it is desired to reassemble the model cells on a different set of MPI ranks, + one instead must use :meth:`BBSaveState.save_test` + and :meth:`BBSaveState.restore_test`. This allows reassembly of the multisplit subtrees back into + their complete cells to allow different multisplitting and different cell distribution on different ranks. + In this case + files are saved in a subdirectory called "bbss_out" and restored + from a subdirectory called "bbss_in". A script filter (see :meth:`BBSaveState.save_test`) is needed to copy and sometimes - concatenate files from the out to the in subfolders. These files have + concatenate files from the bbss_out to the bbss_in subfolders. These files have an ascii format. BBSaveState has a c++ API that allows one to replace the file reader and @@ -59,7 +65,7 @@ BBSaveState Because a restore clears the event queue and because one cannot call finitialize from hoc without vitiating the restore, :meth:`Vector.play` will not work unless one calls :meth:`BBSaveState.vector_play_init` after a - restore (similarly :func:`frecord` must be called for :meth:`Vector.record` to work. + restore (similarly :func:`frecord_init` must be called for :meth:`Vector.record` to work. Note that it is necessary that Vector.play use a tvec argument with a first element greater than or equal to the restore time. @@ -72,9 +78,10 @@ BBSaveState with a base gid. 4. NetCon.event in Hoc can be used only with NetCon's with a None source. - - To allow extra state, such as Random sequence, to be saved for - POINT_PROCESS or SUFFIX density nmodl mechanisms, + RANDOM variables declared in a mod file NEURON block have their sequence value saved + automatically. + + To allow extra state to be saved, eg. when POINTER and BBCOREPOINTER are used to manage objects, declare FUNCTION bbsavestate() within the mechanism. That function is called when the mechanism instance is saved and restored. @@ -119,16 +126,16 @@ BBSaveState Description: - State of the model is saved in files within the subdirectory, `out`. - The file `out/tmp` contains the value of t. Other files have the + State of the model is saved in files within the subdirectory, `bbss_out`. + The file `bbss_out/tmp` contains the value of t. Other files have the filename format tmp.. . Only in the case of multisplit is it possible to have the same gid in more than one filename. Note that the out folder needs to be created by the user prior to a call to save_test(). To prepare for a restore, the tmp.. files should be copied - from the `out` subfolder to a subfolder called `in`, with the filename - in/tmp. . Each file should begin with a first line that specifies + from the `bbss_out` subfolder to a subfolder called `bbss_in`, with the filename + bbss_in/tmp. . Each file should begin with a first line that specifies the number of files in the `out` folder that had the same gid. The following out2in.sh script shows how to do this (not particularly @@ -138,16 +145,16 @@ BBSaveState bash #!/usr/bin/env bash - rm -f in/* - cat out/tmp > in/tmp - for f in out/tmp.*.* ; do + rm -f bbss_in/* + cat bbss_out/tmp > bbss_in/tmp + for f in bbss_out/tmp.*.* ; do echo $f i=`echo "$f" | sed 's/.*tmp\.\([0-9]*\)\..*/\1/'` echo $i - if test ! -f in/tmp.$i ; then - cnt=`ls out/tmp.$i.* | wc -l` - echo $cnt > in/tmp.$i - cat out/tmp.$i.* >> in/tmp.$i + if test ! -f bbss_in/tmp.$i ; then + cnt=`ls bbss_out/tmp.$i.* | wc -l` + echo $cnt > bbss_in/tmp.$i + cat bbss_out/tmp.$i.* >> bbss_in/tmp.$i fi done @@ -166,16 +173,40 @@ BBSaveState Description: State of the model is restored from files within the - subdirectory, "in". The file "in/tmp" supplies the value of t. - Other files have the filename format tmp. and are read when + subdirectory, "bbss_in". The file "bbss_in/tmp" supplies the value of t. + Other files have the filename format tmp. and are read when that gid is restored. Note that in a multisplit context, the same - "in/tmp." file will be read by multiple ranks, but only the state + "bbss_in/tmp." file will be read by multiple ranks, but only the state assocated with sections that exist on a rank will be restored. ---- +.. method:: BBSaveState.save + + Syntax: + ``.save("filename")`` + Description: + Saves the state of the entire model (on this rank). This is simpler to use than the ``save_test``, ``restore_test`` + pattern but does not work if one has multisplit cells or desires a different distribution of cells on a different + number of ranks. + + +---- + + +.. method:: BBSaveState.restore + + Syntax: + ``.restore("filename")`` + + Description: + Restores the state of the entire model (on this rank). This is simpler to use than the ``save_test``, ``restore_test`` + pattern but does not work if one has multisplit cells or desires a different distribution of cells on a different + number of ranks. + +---- .. method:: BBSaveState.ignore diff --git a/docs/python/simctrl/cvode.rst b/docs/python/simctrl/cvode.rst index 03b3fa1c03..5898ecd1f2 100755 --- a/docs/python/simctrl/cvode.rst +++ b/docs/python/simctrl/cvode.rst @@ -1544,15 +1544,15 @@ CVode Description: - When set, G*v = R matrix and vectors are reallocated in tree order so that - all the elements of each type are contiguous in memory. Pointers to these - elements used by the GUI, Vector, Pointer, etc. are updated. - - Much of the implementation was contributed by Hubert Eichner + Deprecated method. + This used to cause the G*v = R matrix and vectors to be reallocated in + tree order so that all the elements of each type are contiguous in + memory. + This is no longer required because this scheme is now used all the time + and cannot be disabled. + Pointers to these elements used by the GUI, Vector, Pointer, etc. are updated. - :meth:`ParallelContext.multisplit` automatically sets h.CVode().cache_efficient(True) - - 0 or 1 can be used instead of ``False`` or ``True``, respectively. + 0 or 1 could be used instead of ``False`` or ``True``, respectively. diff --git a/docs/python/simctrl/programmatic.rst b/docs/python/simctrl/programmatic.rst index 0a6a5e8605..ce082d8498 100755 --- a/docs/python/simctrl/programmatic.rst +++ b/docs/python/simctrl/programmatic.rst @@ -8,6 +8,7 @@ See also: cvode.rst batch.rst savstate.rst + bbsavestate.rst sessionsave.rst Functions @@ -213,22 +214,6 @@ Functions ---- -.. function:: nrnunit_use_legacy - - Syntax: - ``bool = h.nrnunit_use_legacy(bool)`` - - Description: - | Return current units usage as 0 or 1. - | An argument is not required. - | Arg, False uses modern codata2018 units for FARADAY, R, etc. (default as of version 8.0) - | Arg, True uses legacy units (default prior to October, 2020) - - .. seealso:: - :ref:`NRNUNIT_USE_LEGACY` :ref:`CONSTANTS` - ----- - .. data:: secondorder diff --git a/docs/python/visualization/plotshapeclass.rst b/docs/python/visualization/plotshapeclass.rst index d296126d0e..dd1fd6200c 100755 --- a/docs/python/visualization/plotshapeclass.rst +++ b/docs/python/visualization/plotshapeclass.rst @@ -22,6 +22,7 @@ PlotShape ``ps.plot(graphics_object)`` Description: + In NEURON 7.7+, PlotShape.plot works both with and without Interviews support. Variables, sectionlists, and scale are supported. Clicking on a segment displays the value and the segment id. @@ -58,6 +59,7 @@ PlotShape or use plotly instead. Example: + You can also pass in a SectionList argument to only plot specific sections @@ -75,7 +77,89 @@ PlotShape ps.variable('v') ax = ps.plot(pyplot, cmap=cm.jet) pyplot.show() - + + Example: + + Line width across the neuron morphology is able to be altered depending on different modes. ``ps.show(0)`` allows for visualizing diameters for each segment across the cell. Additionally, when ``mode = 1`` or ``mode = 2`` , line_width argument can be passed in to specify fixed width across cell. + + For plotting on matplotlib: + + .. code-block:: + python + + from neuron import h, gui + from neuron.units import mV, ms + from matplotlib.pyplot import cm + from matplotlib import pyplot + + h.load_file("c91662.ses") + + for sec in h.allsec(): + sec.nseg = int(1 + 2 * (sec.L // 40)) + sec.insert(h.hh) + + ic = h.IClamp(h.soma(0.5)) + ic.delay = 1 * ms + ic.dur = 1 * ms + ic.amp = 10 + + h.finitialize(-65 * mV) + h.continuerun(2 * ms) + + ps = h.PlotShape(False) + ps.variable("v") + ps.show(1) + ps.plot(pyplot, cmap=cm.magma, line_width=10, color="red") + pyplot.show() + + For plotting on plotly: + + .. code-block:: + python + + import plotly + import matplotlib + from neuron import h + from neuron.units import mV, ms + + h.load_file("c91662.ses") + for sec in h.allsec(): + sec.nseg = int(1 + 2 * (sec.L // 40)) + sec.insert(h.hh) + + ic = h.IClamp(h.soma(0.5)) + ic.delay = 1 * ms + ic.dur = 1 * ms + ic.amp = 10 + + h.finitialize(-65 * mV) + h.continuerun(2 * ms) + + ps = h.PlotShape(False) + ps.variable("v") + ps.show(1) + ps.plot(plotly, width=7, cmap=matplotlib.colormaps["viridis"]).show() + + + Example: + Color argument can also be passed in when consistent color across cell is preferred. When not specified, the morphology will be plotted in color gradient passed as ``cmap`` in accordance with voltage values of each segment after simulation is initiated. To specifiy cmap, + + .. code-block:: + python + + from neuron import h + from matplotlib import pyplot, cm + + h.load_file("c91662.ses") + sl = h.SectionList([sec for sec in h.allsec() if "apic" in str(sec)]) + for sec in sl: + sec.v = 0 + ps = h.PlotShape(False) + ps.scale(-80, 40) + ps.variable("v") + ax = ps.plot(pyplot, line_width=3, color="red") + pyplot.show() + ---- .. method:: PlotShape.scale @@ -172,6 +256,44 @@ PlotShape Description: Range variable (v, m_hh, etc.) to be used for time, space, and shape plots. + + Additionally, the variable can also be identified by species or specific region to show the corresponding voltage across. + + Example: + + .. code-block:: + python + + from neuron import h, rxd + from neuron.units import mM, µm, ms, mV + import plotly + h.load_file("stdrun.hoc") + + dend1 = h.Section('dend1') + dend2 = h.Section('dend2') + dend2.connect(dend1(1)) + + dend1.nseg = dend1.L = dend2.nseg = dend2.L = 11 + dend1.diam = dend2.diam = 2 * µm + + cyt = rxd.Region(dend1.wholetree(), nrn_region="i") + cyt2 = rxd.Region(dend2.wholetree(), nrn_region="i") + + ca = rxd.Species([cyt,cyt2], name="ca", charge=2, initial=0 * mM, d=1 * µm ** 2 / ms) + + ca.nodes(dend1(0.5))[0].include_flux(1e-13, units="mmol/ms") + + h.finitialize(-65 * mV) + h.continuerun(50 * ms) + + ps = h.PlotShape(False) + + ps.variable(ca[cyt]) + + ps.plot(plotly).show() + + + ---- diff --git a/docs/python/visualization/shape.rst b/docs/python/visualization/shape.rst index 300dbbb71b..6274e527c3 100755 --- a/docs/python/visualization/shape.rst +++ b/docs/python/visualization/shape.rst @@ -78,7 +78,8 @@ Shape Description: - + Mode for ``shape.show()`` can be adjusted for different way to display the cell, and can be adjusted as the following example (available from NEURON 9.0: + mode = 0 displays diameters @@ -88,7 +89,36 @@ Shape mode = 2 displays schematic. ie line through 1st and last 2d points of each section. + .. code-block:: + python + + import plotly + from neuron import h, gui + from neuron.units import mV, ms + import matplotlib + + h.load_file("c91662.ses") + for sec in h.allsec(): + sec.nseg = int(1 + 2 * (sec.L // 40)) + sec.insert(h.hh) + + ic = h.IClamp(h.soma(0.5)) + ic.delay = 1 * ms + ic.dur = 1 * ms + ic.amp = 10 + + h.finitialize(-65 * mV) + h.continuerun(2 * ms) + + ps = h.PlotShape(False) + ps.variable("v") + print(ps.show()) # prints the current mode + ps.show(0) # alters the mode to 0 that displays diameters for each segment + print(ps.show()) # should print 0 as the mode set + ps.plot(plotly, width=7, cmap=matplotlib.colormaps["viridis"]).show() + + ---- diff --git a/docs/removed_features.rst b/docs/removed_features.rst index af82040835..ba322d39bb 100644 --- a/docs/removed_features.rst +++ b/docs/removed_features.rst @@ -52,3 +52,7 @@ To that end, the following table's columns constitute: - NEURON Java support. - `#1937 `_ - 5a67957 + * - GPU wheels + - Experimental support for ``pip install neuron-gpu`` proved user-unfriendly and hard to maintain. + - `#2378 `_ + - fb17127 diff --git a/docs/rst_substitutions.txt b/docs/rst_substitutions.txt index 714f38ff6f..3ccdc57f3d 100644 --- a/docs/rst_substitutions.txt +++ b/docs/rst_substitutions.txt @@ -1 +1,2 @@ -.. |neuron_with_cpp_mechanisms| replace:: NEURON 9.0[.dev] \ No newline at end of file +.. |neuron_with_cpp_mechanisms| replace:: NEURON 9.0[.dev] +.. |neuron_with_soa_data| replace:: NEURON 9.0[.dev] \ No newline at end of file diff --git a/docs/rxd-tutorials/extracellular.ipynb b/docs/rxd-tutorials/extracellular.ipynb index af7658be57..6637a1584c 100644 --- a/docs/rxd-tutorials/extracellular.ipynb +++ b/docs/rxd-tutorials/extracellular.ipynb @@ -104,14 +104,14 @@ "metadata": {}, "outputs": [], "source": [ - "from neuron import h, crxd as rxd" + "from neuron import h, rxd" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "For reaction diffusion in the extracellular space (ECS) you have to import the crxd from neuron. The first example we will place two single compartment neurons in a closed box of extracellular space." + "For reaction diffusion in the extracellular space (ECS) you have to import the rxd from neuron. The first example we will place two single compartment neurons in a closed box of extracellular space." ] }, { @@ -380,7 +380,7 @@ "metadata": {}, "outputs": [], "source": [ - "from neuron.crxd import rxdmath\n", + "from neuron.rxd import rxdmath\n", "\n", "kb = 0.0008\n", "kth = 15.0\n", diff --git a/docs/rxd-tutorials/ip3-demo.ipynb b/docs/rxd-tutorials/ip3-demo.ipynb index 6708eef1e7..7c72325829 100644 --- a/docs/rxd-tutorials/ip3-demo.ipynb +++ b/docs/rxd-tutorials/ip3-demo.ipynb @@ -13,7 +13,7 @@ "metadata": {}, "outputs": [], "source": [ - "from neuron import h, crxd as rxd\n", + "from neuron import h, rxd\n", "\n", "h.load_file(\"stdrun.hoc\")" ] diff --git a/docs/scripting.rst b/docs/scripting.rst new file mode 100644 index 0000000000..eb7dcc03c2 --- /dev/null +++ b/docs/scripting.rst @@ -0,0 +1,144 @@ +.. _launching_hoc_and_python_scripts: + +Running Python and HOC scripts +============================== + +This section describes the various ways that you can **run** Python and HOC +scripts. +You may also want to refer to the :ref:`Python ` and +:ref:`HOC ` reference sections for documentation of how to write +scripts using the two languages. + +HOC scripts are always run using one of the NEURON executables, such as +``nrniv``, ``special``, ``neurondemo`` or ``neurongui``. +Python scripts can also be run using these executables, but may additionally be +run using the standard ``python`` and ``ipython`` commands. + +Files passed to the NEURON executables are identified based on their file +extensions, so: + +.. code-block:: shell + + nrniv script.hoc + nrniv script.py + nrniv -python script.hoc + nrniv -python script.py + python script.py + +Will all work as expected. +It is also possible to execute code from the commandline, for example: + +.. code-block:: shell + + nrniv -c "hoc_statement" + nrniv -python -c "python_statement" + python -c "python_statement" + +In this case, it is necessary to tell NEURON whether or not the code being +passed to ``-c`` is HOC or Python code, using the ``-python`` option. + +When using Python, it is recommended that you organise your scripts so that +there is no need to pass more than one ``.py`` file or ``-c`` argument to +NEURON. +This is consistent with the ``python`` command, which only accepts a single +script file or ``-c`` option, and respecting this rule minimises the number of +possible differences between ``python script.py`` and ``nrniv script.py``. + +In addition, Python scripts can execute HOC code internally using +:ref:`h `, and HOC scripts can execute Python code +internally using :ref:`nrnpython `. + +Advanced usage +~~~~~~~~~~~~~~ + +This section describes some of the caveats and differences between different +ways of running HOC and Python scripts. +In most cases, these details will not be important. + +Use of custom MOD files: + If you :ref:`use custom MOD files ` to extend + NEURON, then some additional details may apply. + + In this case, you will have run the :ref:`nrnivmodl + ` command, which will have produced a new + executable called ``special`` in a subdirectory named according to your + computer architecture (*e.g.* ``x86_64/special``). + + ``special`` accepts the same commandline arguments as ``nrniv``, and it can + be convenient or necessary (*e.g.* :ref:`when using GPU support + `) to launch scripts using it. + +Multiple script files and commands: + In Python mode (``-python``) the NEURON executables will only process one + input file or ``-c`` command, whereas in HOC mode it will execute several. + + This means that ``nrniv a.py b.py`` will execute both scripts, but + ``nrniv -python a.py b.py`` will only execute ``a.py`` and pass ``b.py`` as + an argument in ``sys.argv``. + + Similarly, ``nrniv -c "hoc_code_1" -c "hoc_code_2"`` will execute both + fragments of HOC code, but ``nrniv -python -c "pycode1" -c "pycode2"`` will + only execute the first expression, ``pycode1``. + + It is best to organise your Python scripts to have a single entry point and + to not rely on executing multiple files. + This is consistent with the regular ``python`` command. + +``sys.path``: + NEURON aims to provide the same Python environment with ``nrniv -python`` + as you would obtain with ``python`` directly. + This includes the behaviour for the first entry in ``sys.path``, which is + an empty string when ``-c`` as used, and the script directory after + resolving symlinks if a script is passed. + See also: `the corresponding section of the Python documentation + `_. + If you try and execute multiple Python scripts, the ``sys.path`` behaviour + may be surprising. + + One intentional difference is that if the path to the ``neuron`` module + does not exist in ``sys.path`` then ``nrniv -python`` will automatically + append it, while if you were to run ``python`` then an attempt to ``import + neuron`` would simply fail. + + +``-pyexe`` and ``NRN_PYTHONEXE``: + The NEURON executables also accept a ``-pyexe`` argument, which governs + which Python interpreter NEURON will try and launch. + The ``NRN_PYTHONEXE`` environment variable has the same effect, but if both + are used then ``-pyexe`` takes precedence. + + This is typically only relevant in a build of NEURON that uses dynamic + Python support (:ref:`NRN_ENABLE_PYTHON_DYNAMIC + `), which typically means the macOS and + Windows binary installers. + + In this situation, ``nrniv -python`` searches for a Python installation in + the following order: + + * The argument to ``-pyexe``. + * The ``NRN_PYTHONEXE`` environment variable. + * ``python``, ``python3``, ``pythonA.B`` ... ``pythonX.Y`` in ``$PATH``, + where the set of ``pythonX.Y`` names corresponds to all the Python + versions supported by the NEURON installation. + The search order matches the :ref:`NRN_PYTHON_DYNAMIC + ` setting that was used at build time. + * On Windows, some other heuristics are applied as a last resort. + + NEURON will exit with an error if you try to force it to use an unsupported + Python version using ``-pyexe`` or ``NRN_PYTHONEXE``. + If these are not passed, it will accept the first Python that is supported + by the installation. + + On a system with multiple Python versions, this can lead to differences + between ``python`` and ``nrniv -python``: + + .. code-block:: shell + + python -c "import neuron" # fails, NEURON not installed + python3.10 -c "import neuron" # succeeds, NEURON installed for 3.10 + nrniv -python -c "import neuron" # succeeds, search ignores `python` + # and continues to find `python3.10` + + Installations using Python wheels (``pip install neuron``) explicitly set + the ``NRN_PYTHONEXE`` variable, so this section is unlikely to be relevant + for those installations. diff --git a/docs/tutorials/ball-and-stick-3.ipynb b/docs/tutorials/ball-and-stick-3.ipynb index 0c175bc9b6..a1971e10f9 100644 --- a/docs/tutorials/ball-and-stick-3.ipynb +++ b/docs/tutorials/ball-and-stick-3.ipynb @@ -322,7 +322,7 @@ "source": [ "import matplotlib.pyplot as plt\n", "\n", - "plt.plot(t, ring.cells[0].soma_v)\n", + "plt.plot(t, list(ring.cells[0].soma_v))\n", "plt.show()" ] }, diff --git a/docs/tutorials/ball-and-stick-4.ipynb b/docs/tutorials/ball-and-stick-4.ipynb index 1ce4409b9a..93d0db6069 100644 --- a/docs/tutorials/ball-and-stick-4.ipynb +++ b/docs/tutorials/ball-and-stick-4.ipynb @@ -18,7 +18,9 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Note: If you followed the Quick Start instructions on the NEURON website, you should have everything you need to run parallel simulations. If not, if you do not already have an MPI installation, go to that link and follow the instructions in \"Step 4: Install MPI to enable parallel simulations.\" If you compiled NEURON yourself instead of using an installer (this is almost-never necessary), this part of the tutorial requires you to have used the `--with-paranrn` flag at configure time." + "**Note:** If you followed the [installation instructions](../install/install_instructions.html) on the NEURON website, you should have everything you need to run parallel simulations.\n", + "If not, if you do not already have an MPI installation, go to that link and follow the instructions related to MPI.\n", + "If you compiled NEURON yourself instead of using an installer (this is rarely necessary), this part of the tutorial requires you to have used the `-DNRN_ENABLE_MPI=ON` flag at configure time." ] }, { @@ -32,9 +34,12 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Parallel communication takes place via logical events in network connection objects known as NetCon. NetCon sources are threshold detectors. They monitor some variable, say the membrane potential of a cell, and when the variable reaches some threshold, it triggers an event sent to the targets. Targets are frequently synapses on other cells. When they receive the event, they activate.\n", + "Parallel communication takes place via logical events in network connection objects known as [NetCon](../python/modelspec/programmatic/network/netcon.html).\n", + "NetCon sources are threshold detectors. They monitor some variable, say the membrane potential of a cell, and when the variable reaches some threshold, it triggers an event sent to the targets. Targets are frequently synapses on other cells. When they receive the event, they activate.\n", "\n", - "In a parallel context across several machines, communication between hosts can be computationally inefficient when the frequency of events is high and when the message being sent is large. NEURON uses an efficient priority queueing mechanism to deliver events to targets after the delay specified by the NetCon. The message passed is succinct. It is an integer, the unique global identifier (gid) of the source. The following two figures illustrate these ideas and come from Hines M.L. and Carnevale N.T, Translating network models to parallel hardware in NEURON, Journal of Neuroscience Methods 169 (2008) 425–455. Users should also consult the ParallelContext reference." + "In a parallel context across several machines, communication between hosts can be computationally inefficient when the frequency of events is high and when the message being sent is large. NEURON uses an efficient priority queueing mechanism to deliver events to targets after the delay specified by the NetCon. The message passed is succinct. It is an integer, the unique global identifier (gid) of the source.\n", + "The following two figures illustrate these ideas and come from [Hines M.L. and Carnevale N.T, Translating network models to parallel hardware in NEURON, Journal of Neuroscience Methods 169 (2008) 425–455](https://www.ncbi.nlm.nih.gov/pmc/articles/PMC2430920).\n", + "Users should also consult the [ParallelContext](../python/modelspec/programmatic/network/parcon.html) reference." ] }, { @@ -76,33 +81,66 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Create a file called `testmpi.py`:" + "To do this, first we will write out a small test script.\n", + "Executing the following cell will create a file called `testmpi.py`:" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "%%writefile testmpi.py\n", "from neuron import h\n", "h.nrnmpi_init() # initialize MPI\n", "pc = h.ParallelContext()\n", "print('I am {} of {}'.format(pc.id(), pc.nhost()))\n", - "h.quit() # necessary to avoid a warning message on parallel exit on some systems\n", - "```" + "h.quit() # necessary to avoid a warning message on parallel exit on some systems" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Run the script in parallel via e.g. `mpiexec -n 4 python testmpi.py` from the command line in a terminal. You should see output resembling:" + "And we will test running this script using `mpiexec`.\n", + "Normally we can just write `python` in place of `$python_exe`, but using `sys.executable` in this way can be necessary on systems with multiple Python versions." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os, sys\n", + "\n", + "os.environ[\"python_exe\"] = sys.executable" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ + "Now we can run the script in parallel with `mpiexec -n 4 python testmpi.py`.\n", + "\n", + "You can also run this directly from the command line in a terminal; most likely this is what you will want to do when running larger simulations or using a shared compute cluster." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!mpiexec -n 4 $python_exe testmpi.py" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "You should see something similar to:\n", "```bash\n", "numprocs=4\n", "I am 1 of 4\n", @@ -116,7 +154,11 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "These could appear in any order since in theory they are running simultaneously but must print out in some order. If instead you see four processes claiming to be 0 of 1, then your copy of NEURON was not compiled with support for parallel simulation. Reconfigure with the `--with-paranrn` flag, recompile, and try again. If you get an error saying that `mpiexec` is an unknown command, then MPI is either not installed or not on your PATH; correct your MPI setup and try again." + "These could appear in any order since in theory they are running simultaneously but must print out in some order.\n", + "If instead you see four processes claiming to be 0 of 1, then your copy of NEURON was not compiled with support for parallel simulation.\n", + "Reconfigure with the [-DNRN_ENABLE_MPI=ON flag](../cmake_doc/options.html#mpi-options), recompile, and try again.\n", + "\n", + "If you get an error saying that `mpiexec` is an unknown command, then MPI is either not installed or not on your PATH; correct your MPI setup and try again." ] }, { @@ -130,7 +172,8 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Begin by downloading ballandstick.py into your working directory. This is equivalent to the classes we created in the previous part of the tutorial." + "This notebook requires that `ballandstick.py` is in your working directory.\n", + "This is equivalent to the classes we created in the previous part of the tutorial." ] }, { @@ -141,10 +184,12 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "%%writefile ring.py\n", "from neuron import h\n", "from ballandstick import BallAndStick\n", "\n", @@ -206,15 +251,14 @@ " nc = pc.gid_connect(source_gid, target.syn)\n", " nc.weight[0] = self._syn_w\n", " nc.delay = self._syn_delay\n", - " target._ncs.append(nc)\n", - "```" + " target._ncs.append(nc)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "The call to `h.nrnmpi_init()` must happen before any use of the ParallelContext class -- which forms a key part of any NEURON parallel simulation.\n", + "The call to `h.nrnmpi_init()` must happen before any use of the [ParallelContext](../python/modelspec/programmatic/network/parcon.html) class -- which forms a key part of any NEURON parallel simulation.\n", "\n", "The only conceptually new method here is the `set_gids` method where each process specifies which cells it will simulate. Here we use what is known as a round-robin approach, where the `pc.id()`th process starts at `pc.id()` and skips by however many processes are running (`pc.nhost`)." ] @@ -241,10 +285,12 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "%%writefile test_ring1.py\n", "from neuron import h\n", "from neuron.units import ms, mV\n", "import matplotlib.pyplot as plt\n", @@ -263,13 +309,15 @@ "\n", "if pc.gid_exists(cell_to_plot):\n", " plt.figure()\n", + " plt.title(\"Cell {}\".format(cell_to_plot))\n", " plt.plot(t, pc.gid2cell(cell_to_plot).soma_v)\n", - " plt.show()\n", + " plt.xlabel(\"Simulation time [ms]\")\n", + " plt.ylabel(\"Soma voltage [mV]\")\n", + " plt.savefig(\"test_ring1_{}ranks.svgz\".format(pc.nhost()))\n", "\n", "pc.barrier()\n", "pc.done()\n", - "h.quit()\n", - "```" + "h.quit()" ] }, { @@ -284,13 +332,12 @@ "metadata": {}, "source": [ "The conceptually new pieces are:\n", - "
    \n", - "
  • pc.set_maxstep(10 * ms) -- sets an upper bound on how far MPI can simulate without communicating, here a simulated 10 ms. This must be called before attempting a parallel simulation.
  • \n", - "
  • pc.psolve(100 * ms) -- a parallel version of h.continuerun, but does not support updating NEURON graphics during the simulation.
  • \n", - "
  • pc.gid_exists -- only the process that owns the specified cell should make the plot.
  • \n", - "
  • pc.gid2cell -- lookup a cell by gid
  • \n", - "
  • pc.barrier() -- wait until all processes reach this point; used to make sure processes don't shut down before the graph is closed
  • \n", - "
" + "\n", + "* [pc.set_maxstep(10 * ms)](../python/modelspec/programmatic/network/parcon.html#ParallelContext.set_maxstep) -- sets an upper bound on how far MPI can simulate without communicating, here a simulated 10 ms. This *must* be called before attempting a parallel simulation.\n", + "* [pc.psolve(100 * ms)](../python/modelspec/programmatic/network/parcon.html#ParallelContext.psolve) -- a parallel version of [h.continuerun](../python/simctrl/stdrun.html), but does not support updating NEURON graphics during the simulation.\n", + "* [pc.gid_exists](../python/modelspec/programmatic/network/parcon.html#ParallelContext.gid_exists) -- only the process that owns the specified cell should make the plot.\n", + "* [pc.gid2cell](../python/modelspec/programmatic/network/parcon.html#ParallelContext.gid2cell) -- lookup a cell by gid.\n", + "* [pc.barrier()](../python/modelspec/programmatic/network/parcon.html#ParallelContext.barrier) -- wait until all processes reach this point; used to make sure processes don't shut down before the graph is closed." ] }, { @@ -301,23 +348,38 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "`python test_ring1.py`" + "!$python_exe test_ring1.py\n", + "\n", + "\n", + "def SVGZ(name):\n", + " from gzip import GzipFile\n", + " from IPython.display import SVG\n", + "\n", + " return SVG(data=GzipFile(name + \".svgz\").read())\n", + "\n", + "\n", + "SVGZ(\"test_ring1_1ranks\")" ] }, { - "attachments": { - "test_ring1.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAXkAAAD4CAYAAAAJmJb0AAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAAnRUlEQVR42u1daZQc1XV+Vd3TPT0jtM5onRGS0AJiEeCxzGYwSwBhAsRbROwY22DZCXbwFoJM/uSccIIdH2OIHfsQsA2OYsCADXHAGDDGxoBghNm0wSAhRkLLCO2jmZ6Z7s69r1711PRUdbek6XpVb77vnEtVV7eQqu573/vuffe9sgqFggAAAADMhI1HAAAAAJIHAAAAQPIAAAAASB4AAAAAyQMAAABHhmSU/jFNTU2FWbNmwSsAAACHgFWrVu0sFArNkSd5Jvj29nZ4DAAA4BBgWdamoO+QrgEAADAYIHkAAACQfFXhQoLsz2S/Vp9nk60k6yC7lyyFxw0AABBfJX8d2VrP52+R3VIoFObScTfZ1XjcAAAAMSR5UuktdPgw2R3qs0WH88juVz+5i+wKPG4AAIB4KvnvkV1PllefJ5HtIRU/oD5vJpuBxw0AABAzkifRfikddhChrzrMP7+MrJ2tq6sLHgEAAIiYkj+T7DIi6bfpeI9w0jS3ko2na24dPqdztvj9YRocbidrY2tubo71w9zd3Sfue7FT5PPYvhkAAENInsh5OVkL2Sz6uJTsd3T+STo+RfYx9bOryB4y/WHe8cwGcf0Dr4qVG3ehZQEAYIySD8I/kX2NSyiFk6O/0/SHuaGrWx67DmTRsgAAMI/kScH/nuxSdb6BbDGXUJJ9nMx45rNtSx4P9A6gZWnGy517xJp39+FBaEZ2ICd+9PRbYu/BfjwMA5X8qIOljnt70KB144of/Elcctsf8SA049HXtombH10nfvLsRjwMkHz8UZewQfIA4MGAKkJwU5kASD7WyKuXooPk9QIvp48Ocnln6QynbQCQvAEk7xz394LkdSI7kC+e93nOgfDR258fcgRA8kYo+Z4+qJaokDyiKt0kn4OSB8mblyY4CJLXS/L9OZA8lDwAkh955PIuyaOEMgrEwujOwhdafaEUfG8/hA9I3gC4OXkoeb3oyw0+f/hCL/pV6gx+AMkbAaRrohJRCQ/JQ8lHQ/jADyB5NGhgxPxQgJKPmC/gB5C8UQ26Gw1as5IvQMlHkOSxOytI3hhy4drsgRyqCXTBuxYKCjI6UVUvyihB8kaRC6oJIkEsIHndvsCAC5I3lFywICoqJI90jV7h4/FFFn0CJG8QuaA+G0oeIF94K5360SdA8gaFpj1I10TDDyB5DLggeaAWoSmWcetUj56ICsSiFTmka0DyRjVoIpdU0nmkWSj5iCh5pAj0Ch/hUfLwBUjeAHJpSCUcJY9ysUikCJA2gy9A8sAIqpaCaKhLKAWJdI1uYklTVIW0mX7hMxjdwhcgeQMadMZV8lAtWv3A4KgKftA/4CK6Bckb1qCTaNC6/aBYnn0Bko9OdAtfgORjD554LaoWhKZaB9tBJQ8/6O4TGfQJkLw5qkWIxnQSqkU7yYsiyeO1c/p9UZewRdK20CdA8mYoSJ7ssyyUUEZByWeg5CMgfAoiQQRfXwdfgOQNIRebG3QygXIxnX5QUr4xlYQfIqDkbYtJ3sY8FUjeBNXiadBQLVqJxVXynBPux7bPeoWPxeWspOSx+jieJG9ZVivZU2RryFaTXaeuTyR7nOxNdZxg+sPMqQbthKZo0DqJhdGActZIDLjU953UGZR8bJU8r1X+eqFQWEjH08iuJafy+Q1kT9L1eXxUn0eBalH5xwGoR/0k706Cwxf6oltX+CC6jS3JkxO3kr2kzvfTYS3ZDLLLye5SP+PjFcaTS14MkjzUI5Q8INNlsk8k0SfirOSLIAU/iw6nkK0km8IDgPpqG38O+DPLyNrZurq6DFItaNA6B1svyaOMMiLRLfpEvEmeSHoMHR4g+wqR3b4S8mNpVQggxtvJ2tiam5tj3qAFVEuklDzSNZHoEzbSNbEneSL4OkXwK4isH1SXt9P1aep7Pu4wPjSVJZRo0NEheaRrohHdWiJdh4nX2JK8xVPnQtxJtpYc+l3PVw+TXaXO+fjQaGjQFkLTSKhHSfJpKPkoRbfYhVIPkiPw/ziT7G/JXiOCe1ld+ybZzWT30bWr6biJ7BOjoUEnitU1IHntSh4bY0XCFxbmqeJN8qRen2FBH/D1+aOtQaNcLEJKHlvcRkfJI7rVBqx4HckGnfeka7C6T6sfJMkjXRMJX7Dwyai1I973IAMg+XirFqhHrRHVECUPBanVF84GZbbaYgIkD5KPeYOW6ZpkQjbmXB4NWtdgy8ggJx8JX7jRrfQFxA9I3hTVAnLRhwKUfKR8ITcow4ALkjdStaBBa4EbQbEfuLIDOXnd0S2XUOJl3iB5g1SLq+Sxl7m+wZaRsLH6OAq+sCF8QPLmNmioFl3qkVGsz0YeWKsvLLX9NvoESN6INIE9pEGDXLQQi5LyPOBm8No57b5wX6Qj+wQGXJB8XOFO9tn2oJLH7of6IiqX5LEIR78vOG2GSieQvFnEkrQRmmpOETi+cKo64Ae9vkC6BiRvHLG4DboHq161RVVMLJZKEyCi0ukLMTRdAyUPko87ybvvs5QNGuSiBTlVtsdAdY3+fuG+yBskD5KPd2NWUai7rSpCU53E4uwGKkkem8VFgOS9K17hC5B8zJV8whYITSPgC8XxklywXkFjVJUfTJvJPoEUJkg+7iQvVQuW02uFmwd2SR5+0OkLlBWD5A1KETCsIekaNGg96tEhlkGSR4pAp/jhEso6CnGTdMQ8FUg+1orFUfKCGrQlGzbSBPqIxbYHc/JZ+EGr+BkaVWHABcnHWD06JG8pNY8JP30D7iCxZJCT1z7gYn4EJG+MYpEP1EYuOAoDrjddM0CfB3IYcHUPuHjPK0g+5o15MF0D1aJfPXqJhYHSPZ2+EBA+IHmDlLzlzQWDWHT5wo2oMlh9HIGoatAXSGGC5GOtWLxKnle9Qsnri6pcP+CNRLp94VScucIHfgDJx1qxMCwsp4+UesSOoPrFT0KxDFKYIPnYKxZGAotwIkAsQ6trGD19SBPoInmUUILkjWnM8oEOUS1o0HoG3ILHD3hZhe4B1/KQPNYsgOTjT/IWFuFETT06Sh6+0DHYOn1CDbhJG+kaE0meRvGLydaTdZDdYLJiUffrUfJo0Lp8UZquQepMX58o+iKFFKZxJE+Exz3sB2RLyBaSXUnXFpqt5EWRXNCg9flicJUl6uR1YXAVuCgKH+TkzVPyi8k6KGzbQNZH5/eQXW42yWMfc/1pgkE/FF9WgXSNvj7hrgJX6Ro3jQOYQfIzyDo9nzera161v4ysna2rqyu+DTpfEpqqdA0atB5y8a5XcJQ8SF7HYDtE+KTcclaIH5NIvoqGULidrI2tubnZACXvfE7XoUHrTBOUTrwidaa/T2ALbjNJfgtZq+dzi7pmvmoBuWgkF48fkk4TR528TpIv7RPwhUkk/yLZPMuyZpOl6Hwp2cNGqseSOvkMGrTO6LDoh2TClvv7I12jZ7BlWMXqGrwWUweSNe5sA+TgL9HpY2TMej+ma6tNVi1W6e6HaNBafOGqRzdNAD/oGWwdJT/oBxlVwRfmkLxy9CN0eGS0NOhE6XJ6NGgtCtLykjzqs/VEt/mgdA18ESaw4nWEQ1M06Kgo+cHPKGfV3Cds5ORB8iY06HxpdY0NJa8tqhLD0jXY1kBfdGuXLkxDnwDJx1m1WCXpGrw4RL+Sl8vpMfGK6BYkDxwpsQxVLWjQusC5YAsTr5HrE8WKMwy4IPk4N+iEjYlX/WmCwQlwBqfOsO2zvj5hDdsRFL4Aycc4NLWw8CMS5GJ7WnYG+5jr8UPJVh/IyYPkjQpN6zHxqpfkveka7AgakT6B6BYkH+sUAWqCoxRVeXPyGeztr5Xk3RRmOmnLLaARVYHkY4lcSWiKBq13wEWdfDQGWy944JWT4Ni0DyQf69DUHtqgoSD1kAvSNdFT8u6AizULIPnYqkevkoeC1EsuQ5V8Qm75nM9jb38twgcDLkjepNC0tEFDyYePYXXy2NtfT5/ID+8T8rWY8ANIPt6qRQxt0CB5DVHV0Dp5lO5Fp0/wy3SQrgHJx1Y9Mqwhi3Dw4mJd5FJaJ89AVKWH5Etz8lmseAXJx1U9ljbojMzJo0HrIBe/dA18oUf4DEvXwA8geVNCU0wy6RtwbZ90DZR82H1CkYyNeSqQvEkNurSSAKGplgG3dLB1lDxSZ/qFDyrOQPIxb9BWycQrJpn0DLi2X3UNFGS4fsgPfVtaUcmjT4Dk49yg7ZLdD6Fa9Ay4pYMtA2mCcJErDC9GcNYswA8g+Rg36KETr8jJ6xpwbd+JVwy4YcKvGMHZ2x9+AMkbouQx8apLyaNOPhLCJ++zdiRlI6ICyce7QZfWBPPqPnfLAyAskkedfFT8IEmmRMlzX+nPQc2D5ONG8m5oWlIT7DRokHzYSr50URqUvEaSx5oFkLwJKJTsQjmkQWOiKXRflJbtMbB3Tcgknx8ufOpTiKpA8nFV8vkyqgUlY6ErSK8fUglbkj5K98KOboeXFdcn1YCLyVeQfFwbdMIeTvJQLWGT/NDBVu7tj0lwbdEt+kSMSZ46z7+TrSN7leyXZOM93y0n6yBbT3aR+aGp/z4dUslDtYSu5L3q0SUXEEvY0a0o0yfgi7go+cfJTqAR+yQ6vkG2XBH8QjosJTue7GKy/6RridHQoIfUyadQuqdrwLVLWD6DHUG1DLZOnxDDlDx8EROSJ3L/LdmA+vg8WYs6v5zsHvouS7aRzjvIFhtN8gEblCE01UEuQwdbRho7gmojeQubxcVayXvxObJH1fkMsk7Pd5vVtWGgBrCMrJ2tq6srtg/Srejw2+IWDTp8cilN12D1sUYljxJKrUhW+gGR1hN0mOrz1Y1EbA+p39xIB1b0Kw6DHG+nA5toa2uLbUF5LiBFIBs0qjpCHnCFf7oGpawh9wmlJL05+RRIPnIkTyR8QYVB4DN0uJTs/MLg0s4tZK2en7Woa+Y2aLnK0p/koeTDV5C2z8Trwb4BPBwNSt5v9TFIPjwcaXUNT6peT3YZ8ftBz1cPky2l79Nks+l8HtkLRjdoUvKJUvWIhR8aSd4aRvI9mOwLvU+UKvliChPRbXSUfAV8nyxN9rjKRT9PZP9FstX0+T76vEY4aZxr6ZrRXuXQtHSyD5UEukh+6NyIO+BiP/nw/cAo3ZnVET7oE7EgeSLuuWW+u4kON43mFAFCU53qceh1XmmJiCpk4eOz4jWdRFlx2MCK1xFT8oVhSr4uYWE5fcgYUCRfl7CHKXmQfLgo+FTX8LwVEz1IHiQfSyVfSvKcMshgpWXog62j5P0WQ8EPkfAFBlyQfFxJvjQPjAatQ8k7ud7ksMVQzopXN50DhNEnBtV76YCL6BYkH0vVkvAhebkxFhp0eMTis72ESywMbDccpi8C5kfkmgX4ASQfO5IfTizFNAEW4YSv5BOlJI/l9DqiW78Btx5KHiQf1wZt+zxNma5Bgw41ogoiFgby8iH6ohA0P4KJV5B8TMnFN12TRE4+XCU/vKLDHWyh5EP2hXrtZdJnwAXJg+RjqVpsn3RNfQorLaOk5BFVaRhw/SZeQfIg+bih4LOUvhiaglhCJ/nSnHx9ceIVvgjPF3mp4kurzupRcQaSjyu5JCz/iVc0aB3q0R7mB0fJI6oKzRe5gm8xAqcw8Y5XkHwMSV74pms4F4z8owYljx1BIzHglq48dvoEtpgAyceyQeflNgbDVAuUfOh+cJR8aboGe6ZoiW4DyooxNwKSj2VomrQDFkOBWEJX8gkrYOIVvggN/bm8b59w144Mvn4CAMnHpUH7habUoPtpAOjPIQcZKskn/EsoMeCG64ukT3TLW0wwv2P1MUg+ZmmCgm+6BtsN6yH5oJw8/BBun0jaNvoESN6QBi1D0+GPsx6LcEInFqnkA+vkoR7D7BOJgGIEh+ThC5B8jMApmbJKHuQSspIf2rSZbFIJVHWEruR9ixGwjxBIPpYNOl8+NMUinJCVvPAlF6QIwlTyhcCJVyeqgi9A8nFr0OVUCxp0KMgHLIZyfIFKp7AH3CA/QPiA5GOHflknH6zkEZqG5AdVxZTAC1y0I1dm7YgkeQgfkLwJoSkmXsMmeUfJp5L+Ay6UfNhK3oLwAcmbQy7JMkoeqiUc9OVygSSfrsOOoGELnzq/eSoIH5B8XNME5apr0KBDInm1wMZfyWNH0DARtK0Bb1AmhQ8GXJB8vFRLQHUNaoL1kHxAVIXBNsyoKu9fjJBCCSVIPo5KPmDFq6ta0KDDQbaMkkd1Tfi+cCdZ/aJbRFUg+fgp+TKqBeQSnnpkpJNQ8vpJPhc42KJPxIzkLcv6OlmBrEl9ZtxG1kH2KtmpphM8l2enEsNVC6cNOC2JOvmQSF4peb9y1nrs7R+6L/wGW/YNV6JhwI0JyROBt9LhQrJ3PJeXkM1TtozshyY/xF5FLJmU7fd8oCBDRL/aLyVowg9zI2EqeSb5hO936BPxUvK3kF1P5t0c+nKyuwsOnqfz8UR204wledVY/fKPDvmjQYepHv0mXd1BmP2AfcxDInl61n5KfjCqwoAbeZIn4mYy30Kd5pWSr2aQdXo+b1bXjISbigkieUz4hUzyAcTC6pHL+twFU0AISr4ugOSxj1BoSFZB5E/QYarPVzeSfVM4qZojGSg4ncMmZs6cGdPGDJKPDMnngkneu2dK0G+AkQFHS+yLdCJ4wMU8VURInpx1QQA5n0iH2WSvWM4+IS1kL9H5Ylb3ZK2en7eoa37//9vpwCba2tpiKbHcsLM+iQYdBfUYlK7x7pkytr4OD6uG4GiJs2LpuuCcPDYoCweHLWeInF8jm0w2i004KZlT6XwbHR8m+7SqsjmNzvfS9a2mPsSeSjl5TDKFp+QDKjpcP3j9BdQ+ug3yRRrCJzpK/jDxCNklZB1kB8k+a/JDdFMx7urWYQqSru/t6UdrC4nkA3PyWH0cakRVjuR5wN1zsA8PKk4kr9S8e85pl2tHy0McTNcEKXlbbN8L1RJWVBU42OKNRKEOtg7JB0e32zDYhgLMPo0QsXhJxK9BI/8YDrqzA6IxlQwgebyRKOzoNngSHK9iBMnHsEGXq64BsYRF8jnRkApWj9JfGHBrjoOqvTem/QdcrB0ByccK2WpIHg06HJLvGxBj0uWVPDbGqj329w4oki9TVgw/gOTjo+TzxRA0SLWgTj48BdmQLq/kMeCGEVE5JF9uwEVEBZKPDaopoeS6Yff9o0DtcKBMTh7VNeFGVI6ST6JPgORNUPI5uateXaJ8fTbUfG3BhMFVHQ1BE6/Y2z80uOmao8qQPPoESD5GJJ8PVPGSXKAgQ8HgZF/QegXs7R+aks+WV/JuahN9AiQfC7AyLEvySZBLOCRfnliwt3+4JM+7nTSkgide0SdA8rFBVpJ88KPE2+nDwb6e8pN9vMcSNosLB/uJ5MekkkLta4U+AZKPN7hKoJySz2ARTih470BWHieNSZX1BYglBJLvJZKvT1bsExhwQfKxAJN3WSWP0r1wSL7b2QulaUw68DdYsxAOdpEvyg22WH0Mko+Xku/PF4nct0EjNA1XyTeWIxdbZDHZF4ovJjWmK5M8+gRIPhYkX2W6JosGXXP1yCng8Q1l0jVYTh8Kdh7oq5g2cwUSAJKPPDjkDNptz9ugQS41JhYi+YlE8H4v8S4qyCT2Eao1eBPa97qzFdJmqDgDyccIvHd20Pa2Q0LTPqiWmqcIyqhH1xdZLKevKXi9Aiv0cmkz941R8AVIPhZgNVKfxMSrbnC6ZmJjJZK3kSKo+WDrTIBPKqfkk1gMBZKPESouhsJKy9DIpRyxSAWZhJKvNXZ2Vy5lxWIokHzslHy5dA1WWoZELgeyoqmCkk9DyYem5JvKVNe4rwV0XxMIgOQjC55kknvXlEnX8Kq/DFZa1hS8Mdm+3oEqlTyIpbYkX1nJJ0n48KZ+6BMg+cij+MLiMukaBkr3aovd6qXQ1eTkUcpaY5Lvrs4XrOYx4ILkIw9XiWQqkHwapXs1xU6lHpuqqq4BsdTaF7zFcH2FPoF9hEDyMSH5fLHBVlLyeBNO7bCru3JFh6se+3J5kcsX8NBqpeQrLITCgAuSj6WSL7d3jfs9JvxqSyzVpAjqUZ9de190ZysOtu6ACyUPko88Kr36r6jk65CuqXWKgFGuosMlFknyGHBrq+QbKyv5NJQ8SD5OSj5TTf4R6rFm4HQNV2uMzSSrUvLwRS0H3D4oeZC8SSTvVtdUStdAyddcPY5JBb6kYtAPUPK1RD5foAE3W3EC3PUF/ACSj42Sr6aSAKFpDUmeiGViYzXqEUq+ltjT0y94TruqdA1WH8eD5Ek5fZlsHdlqsm97ri8n6yBbT3aR6SSfqZiTt6Hka0ryfVWrRyj5WkZU7kKodFW+QDFC7ZE8QoI/lw6Xky0qFApZ+jxZXV9Ih6Vkx5NNJ3uCrs2n3xjHcq4irEdOXjO59ImZExuqV/LIBdcEO93NyRqrLaGEH6Ku5P+O7GYmeP5Axx3qOhP/PXydbCOdd5AtNvEBHlTqvCFVmeSh5GsD3lqia39WNFepHp3BGQqyFuhSSr75qGonXuGHqJP8fLIPkkpfSfY02fvV9RlknZ7fbVbX/KKBZWTtbF1dXfEj+Wz1JM85+TwW4Yw4umnw5FLWyWOrz8lja4PaYMe+3qpJHko+HFRM1xD5PkGHqT5f3aj+/ESy08iY4O+j3885RBV2Ox3YRFtbW+wYsLtvQJF8pdK9wV33MhUGBKCWxAIlX2slz7uujsvUQcnHheSJhC8oMwBwuubBAsfLQrxAn9ljTWRbyFo9P21R14wDp2uYOMq9co6R8eyfDZIfYZLf76QIJh9VDyWvm+T3ZeVgW6mU1VXyPE/F9FHN74HDw5Gma35Fdq4ifE7d8GzLTrKHyZbStTTZbDqfx4OAmSQ/IBpTyaoaNAM7UdaAWPYfQh4YSr7mSr4aP7h9guVhfw4pTK1KvgJ+zEZE/jodeVr9KqXquZzyPjquIeN8xrUmVtZIks9Wp8wzeBNOCEq+ujwwlHwNfUFKfuakhqp+624xwWo+lcSSnUiSPBE3E/unAr67iQ43mf4Au6tW8ninZe1IvrfqPHC9m66Bkq+ZL9pmTaiO5IsDLvmiHs+uVsDweaRKvi8nGtKVlTzSNbXD5t09Ytr4+qryunUJS/DPEFGNPA5kB8Tug/1i+vjMoSl5+CK6Sj4q2NvTTx39oMzvMfJ0wucFJ6IoHisjmCSC+GPr3l7RMqFyox4taQJ+zrxXO+dZed/2ATI+71d7uOeL/pC/lse89JXyWdB5mb/zTx07xaKW8VX9+3ggqDfwFYDF55znZ15wPucLxev87L3PVT194e0Wfter7ztCrH53nzzObmqs6veD2z7njWr/TrsvSHP9we1/QPkjR7+hy8PaOc9lTBuXGfF/kxEk/8c3u8SX/ufP2v7+0+dMqvibTIyUPDfIbTR4ca6bt/Dl1aS8XJ23DuDX7HWTYmPV1p3NFc85ouH3rHID14FFLeOq/q3zMu/o+oH/be/sOiiFS5f0QZ/0Ax93kw+cZ+88c04X8nlUJi9ZDJ0wvTpf1EdcyTNh8+6mm8gXW/f0qr6QFV3KHywuufCC5+UOePxROExXfPGcY8QNS44Fyfuh7eiJ4kefep9sYDb9x1KNTZqw1LklygXzhQrOLodTZlbOQRa3uI1ITp4VBTfeN7btF29sPyA6iVA6JbH0UHTSI/zWbI2tT4oJjSk5BzEmnZR7xRw9qUGe8zoBnjxLJSxRxy9pJuPUCF9L2s45l5lK/1iDqtr2+qjoN9eHw33pB5v+J4tnTaz63qWSj4AfmNzW0/Nfs3UfqeC95IsD5JNusX1fdthv3efNL0U5ivwwbVy9fOZj0gnRQN+xiKhTz5y3XE4Wz9kXztG2vFGpVTy3PFGO5SHrop+EJUSVFY68n3/VE68ReoELD6bsA/bFGopINnR1y4GWyXtIW7OcF9M0jUmLsZk6Wbbb2JSkPpGQ/mgkf3Bf9/OBe+70g6HtnD8fPamxJvdmBMlPpQZ/8bipkf436lTyrLDXUuN9uXOPeIVsHRFLR9cBed3FlLFp0TqhQSyePVGmn9imjK2XjXmSIhe3xjzukBtjaSAWVn3tb+8Wz214TzxP9trmvTKEZ/A7URdMPUqcNbdZDpxsrRMbZMUQ+6DS3kix9IPGF7hs2dMjnnvL8QMf+bMLbvvzJo+RfYH3Q2KbQdfYD9wPKq2JiRqSAginQaecBt3TN1Dzv4tDem687Zt2S2J/bcveIqFz3m/htLHirHlNYv6Uo8QCsrnUoEfTAi0erMJKEXC4/8Sa7eKx1dvEHzt2Sj+wylvUOl4sO3uOOKllHPljHBF6ZtQtCEqH+AIXjsbXbt0v/cDGQocxoaFOnDZnkvjsmbPE8dPJF9PHVlWlBZIHfFIdTsPhPN5Ig0PKFzfuEs++tZPsPRlycoaJqxdOnDFOXHX60eLk1gnilJnjZZg/2lcXypdV1HCyj1Nhv1/fJe55sVM8tX6HnPScMT4jPvmBmeLcBZNliWFDCl0vjG2fOXf+y5e2iHvbO0XHjgMyBdV29ARx4yXHSaHDIse2ze4PaGmhNeiETNnsOXjkJM9q8KV3dsuqEiZ1TsFw2M+14qcePV589YL54sy5k4jgx2ORSYhKniOou5/bJFas3CQnrTm8v+as2eIvF00nlTgWS/eHpWtqp+Q5gv2vP2yQqp37xqkkcG76qxPEhQunVr0iFyQPHDI4NNx9mCTPOcOnSR3+npQhEzurd84Ncrj/hXPmiDOOaRLvI4ViYu525NMEttjfO3JpM56wvvOZjeJeUu4853Lugmbxr4tJtR87WU6GAsF+kCQ/QkqeUzJPv9ElfvT0W+L5DbvkBPVVZ8wSS9/fKuaRYh+tAMmHiPENKbG3p6+q3/KWxKtIrT/2+jbZcN+kUJPBYf9lJ08XH5rfLE4/ZhI15Do82EMEd35e33Ck4Hz7bU++Kf77+U0yDXD5yTPEF86eM6oJ5VDAFUOMAyMw4K7atEvc9H9rKcLdI6aOrRf//OHjxFIaaN2/YzQDTyBMJd9YJ4mhHLGv3LhLPPLaVvEbCjO5rItTMDzL/9ekRj5ECvGY5jEI+0dgsN1zsO+w/zyneu569m3x/ac6ZI06k8mXz5tbk4UsppM8T0LvOgJfbHqvW3zrN+uoz2yTlUg3f+RE8ZFTW5CmBMnrAZco/nbN9mHXeXLoF+2bxT0vvkON9qCckOIJuiUnThPnUcgPNTLCJJ9x0maHusUt//5/X90qvk2kwusJ2DfLlxwL5X6Y4Gd/uAMu/5n/+F2HuPu5t2VKjOehPn/2bExog+T1gsmAKy5YofM7MDm3/vMX3iHi3yZXLLJi58Z64fFT0FhrGVERsXDFy77egarL5V6gCOumR9bKSW4uQV1xzUnizLlNeJhH7AsacLurn6fihVM/e26TTJPxvNQn2lrF1/5ivpg8FjucgeQjAHdV5t+vWCW27esVnbt6SMnUiU+fPktcSSE/16sDISj5hrqiGqxE8ht3doubH10rHlu9XeZ6v/PxReIjp8wwvuwuzAF3dxVKnqMoTslwaoZXop4zv1ksv+RYcezUsXiIIPno4ESuhDl7jnjo5XfF/KlHiW9cuEBcdPxUVMSEDHeXxC27ewKXknO0xWqRIy1eb/CNC+eLq8+ag7d6jTB4tTqXA5fDyg3viX97dJ0sizyW+s3dn1ssziaSB0DykcTyS46TBujDvClOxLR++35xRknKZe/BfvGTZzfKGmt+e9SVi1vFdefPH3W11WGBt3J4+JV3ZeqldO7p9S17xS2PvyGeXLdDRlHf/uhJ4qPva4ndtgIgeQAIGc1j0nLl7xNrt4vPnDFLEco+8cBLm8V97Z1yN8ElJ0wV/3jRAjGnGSm0mka3M5wdK58kX3AJag89e14LsmLlO+KZjp1yT5/rL14gPnvGbERRhwmrUIjO+xXb2toK7e3t8ApQc7BS54lU3tmRVxDzJCyX8122aLr4/NlzxHHTkOsNAzwBvuTWP8gtBziNxltc8wpV3jDvM0Tsf/OBmcbtJVMTIresVcTlbVDyAKBwzQdny901ucKJS1ZPahkvLlw4RZb0AeGBUy8rrjlN/PTZjXKOZBoR/Vlzm8QHZk+UW/UCUPIAAAAg8jJKHkMlAACAwQDJAwAAgOQBAAAAkDwAAAAAkgcAAABA8gAAAABIHgAAAADJAwAAjEJEajGUZVlddNh0mH+cd5raOcr8h3vGPeOecc+Mo4nLmyNP8kc4QLQHrfgydoTGPeOecc+45wpAugYAAMBggOQBAABA8rHA7aPQf7hn3DPuGfdcFsbk5AEAAACzlTwAAAAAkgcAAADJxwqWZV1Mtp6sg+wGEx1F99VK9hTZGrLVZNep6xPJHid7Ux0nGHbfCbI/k/1afZ5NtlL5+l6ylGH3O57sfrJ1ZGvJTh8FPv6qatOvk/2crN40P9O//8dkO/gePdd8/Wo5uE3d+6tkp45qkmcSoMMPyJaQLSS7kq4tNJDnB8i+XigU+N5OI7tW3ScPak/S9Xl8VJ9NAg9maz2fv0V2C93vXDruJrvasPu9lew3dH/H0nGRundjfUxteAYd/oGsje7vBDpyf15qoJ9/SnZxybUgvzKXzVO2jOyHo13JLybroAe1gayPzu8hu9w0hqd720r2kjrfrzr/DHWvd6mf8fEKgwighQ4fJrvDVTh0OI/sfkPvdxwdzia7U/m5j2yPyT5W4HdNZ+j++dhAttU0P5Mf/0CHXSWXg/zK1+8uOHiezjm6mzaaSZ6JrtPzebO6ZizI4bPocArZSrIpPACor7bxZ4Nu9Xtk15Pl1edJZHvofgcM9fVsMt7a4ycqRXUHWaPJPqb72kKH75C9o8h9L9kqw/3sIsivI8ppmHiNH8GPocMDZF+hBrKvpMNwPWzBkPu8lA476JZWjSL3spLl/OsP6b55EO8uTc2Y5GPl5wlKufIAN52s0SetYTxq6VcTSJ6VQKvnc4u6ZiLB1ymCX0Ft4kF1ebsbyqnjDkNu90yyy+ie3hZOCo7D91tV6Jo01Nes2DaTb1eqz/cr0jfVx4wLyDbSPXeR9dP5g8r3JvtZVOi7I8ppJpD8i2Tz1Gw8z8DzpM3DBhI856M5V7uWOsN3PV/xvV6lzvn4kCHKZjlZC9ks5dPf0fkn6fgU2cdMu191zxyyd5KrF6hL55OtMdXHCpymOY3uuUG1cfeejfVzFX2Xr39aVdlwkcVeT1rnsBpW7I1wCdkbZG+R3WjCPfnc41kqnHuV7GVlfN+cp+aZ+TfJniCbaOC9f4js1+p8DtkLZB1kvyBLG3avJ5O1Kz//imyC6T4m/AvZOjIuL/wZ+9Q0PxN+Lpw5h34VsV0d5FfWdMKpGGQ+e004lUeH/XdjWwMAAACDgYlXAAAAkDwAAAAAkgcAAABA8gAAAABIHgAAAADJAwAAACB5AACAUYj/B2203EpvKInuAAAAAElFTkSuQmCC" - } - }, "cell_type": "markdown", "metadata": {}, "source": [ - "You should see:\n", - "![test_ring1.png](attachment:test_ring1.png)" + "This should look similar to the following reference image:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "SVGZ(\"test_ring1_ref\")" ] }, { @@ -328,10 +390,13 @@ ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "`mpiexec -n 2 python test_ring1.py`" + "!mpiexec -n 2 $python_exe test_ring1.py\n", + "SVGZ(\"test_ring1_2ranks\")" ] }, { @@ -345,21 +410,25 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Our above test runs the simulation successfully, but in the end, no single process knows when all the spikes occurred. There are a number of ways to deal with this: one solution is to have each process write its data to a file. Instead, we will use pc.py_alltoall to send all the data to node 0, at which point node 0 can plot the raster, save data, or whatever." + "Our above test runs the simulation successfully, but in the end, no single process knows when all the spikes occurred.\n", + "There are a number of ways to deal with this: one solution is to have each process write its data to a file.\n", + "Instead, we will use [pc.py_alltoall](../python/modelspec/programmatic/network/parcon.html#ParallelContext.py_alltoall) to send all the data to MPI rank 0 (`pc.id() == 0`), at which point it can plot the raster, save data, or whatever." ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Store this in `test_ring2.py`:" + "The following code does this and saves itself to a file called `test_ring2.py`:" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "```python\n", + "%%writefile test_ring2.py\n", "from neuron import h\n", "from neuron.units import ms, mV\n", "import matplotlib.pyplot as plt\n", @@ -374,7 +443,7 @@ "h.finitialize(-65 * mV)\n", "pc.psolve(100 * ms)\n", "\n", - "# send all spike time data to node 0\n", + "# send all spike time data to rank 0\n", "local_data = {cell._gid: list(cell.spike_times) for cell in ring.cells}\n", "all_data = pc.py_alltoall([local_data] + [None] * (pc.nhost() - 1))\n", "\n", @@ -385,34 +454,42 @@ " data.update(process_data)\n", " # plot it\n", " plt.figure()\n", + " plt.title(\"Spike raster\")\n", + " plt.xlabel(\"Simulation time [ms]\")\n", + " plt.ylabel(\"Cell\")\n", " for i, spike_times in data.items():\n", " plt.vlines(spike_times, i + 0.5, i + 1.5)\n", - " plt.show()\n", + " plt.savefig(\"test_ring2.svgz\")\n", "\n", "pc.barrier()\n", "pc.done()\n", - "h.quit()\n", - "```" + "h.quit()" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Running this via e.g. `mpiexec -n 2 python test_ring2.py` displays the familiar raster plot. If you are wondering why node 0 was the one chosen to make the plot, it is because that is the only node that is guaranteed to exist (nothing else exists if there is only one process being used for the simulation)." + "We can run this using `mpiexec` as before, here with two processes:" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "!mpiexec -n 2 $python_exe test_ring2.py\n", + "SVGZ(\"test_ring2\")" ] }, { - "attachments": { - "test_ring2.png": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAWoAAAD4CAMAAADy31wtAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjMuMywgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/Il7ecAAAACXBIWXMAAAsTAAALEwEAmpwYAAABwlBMVEX///8AAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAAATExMeHh4zMzNWmMb///93o2O9AAAAkHRSTlMAAQMEBQYHCAkKCwwNDg8QERITFBUWGBkbHB0gISInKiwuMDIzNDc4OTw9PkBCRUdISkxSVVhcXmBjZGZpam1vcXJ0eHt+gIGDh4mKi5CRlpeYnJ2eoKGio6SlpqepqqussLG0tbm7vL2+v8PGycrMzc7P0NHS09TV1tfY2drb3N/g4uPk5+jq6+zv8PT2+/yLGktKAAAD/ElEQVR42u3da1cVZRiA4WcjnkAD0gIlO6iRJVphdqIyJDUPZXawUlN0h5EmGzm0KU3AyCi0NPX/9oH61FoVzevrbq/r/gHPPFxr9jDzYdZESJIk1Ua3bitdc39Hffuu0lVBjRq1UKMWatSoVTfUd+7cj78+61FRo0aNGjVq1KhRo0aNGjVq1KhRo0aNGjXqmqSe+XqsgjoP9RpnNep6o56+ONIbERG9lcpvqO8ldUc8NL7VWZ3pDmTvbtQZqJtWRdNXz6HOQL1hfHzyLf8WPcKgRo0aNWrUqFGjRo0aNWrUqFGjRo0adb1T3xfTmjgoatSoUaNGjRo1atSoUaNGjRo1atSoUaNeFHXDaBl1Huq+46jzUK8b2oY6D/XA5m7UWah7Dscf1PfovUXUf7Z/dubajU+d1Xlu9lxAUHuEQY0aNWrUqFGjRo0aNWrUqFGjRo0aNWrUTAsfFDVq1KhRo0aNGjVq1KhRo0aNGjVq1KhRo65F6uXD45P7UOegLjVH44WnUOe5gKy82IU6B3XD2PUDSV6mQ/3PZ3XLuced1ZnuQPbsRp2Bem1LrDjfgzoD9ROjE9U9bvY8wqBGjRo1atSoUaNGjRo1atSoUaNGjRr1/44a6b8MNWrUqFGjRo0aNWrUqFGjRo0aNWrUqFEvgnr9uW8md6HOQd2+KVZ99xjqTBeQM9tR56HuvLoadRbq5pEdi3pvEfV/pW4c7HMHkoW69PE7bvbyUD99c2Js7HnUHmFQo0aNGjVq1KhRo0aNGjVq1KhRo0aNumapmd5FjRo1atSoUaNGjRo1atSoUaNGjRo1atSo6476wx+rqPNQb92EOtcFpBN1duq/vEyH2lmNGjXqGqE+8cMvs6+g9giDGjVq1KhRo0aNGjVq1KhRo0aNGjVq1LVLLdSohRo1aqFGLdSoUUdEzFUW10wlTfU5Zy4SVjEnV6hR1x91rzmSVMMtfBGz7eyVs60FJzWMluORC1OfLS04p2Xg0rdbEiz05mT1xPIUC6Vq4YuYB/uj/0DBSX3Hy3FyZxx5reCco6/G0pbiC3VMr4iTL6dYKGVntl9uj/bLxYasG9pWLv20JLYMFpvzwHQpIoov1PF925LyswkWSlnn1dXzEaX5YlMGNneX10xFrK8Wm7Nx+KPR95sSLLTr+tyxFAslrHlkR8xHxM+FpvQcjjTUT/7aFe++XXyh1i/XNp5+saaoGwf7Uvxe98/OXLtxLMHv9eGZiGc+L77QCx9EvPReLV1AFr6Ieag/+g8WHdVdjlM748jrBcecfzT2Hiq+UNfkytLRN1IslKqFL2I+OHTli7YE1BuGp04tKzhmY2XidGuChfZdqn6yLMVCkiRJktL1O4ST+770b6LYAAAAAElFTkSuQmCC" - } - }, "cell_type": "markdown", "metadata": {}, "source": [ - "You should see:\n", - "![test_ring2.png](attachment:test_ring2.png)" + "This should display the familiar raster plot.\n", + "If you are wondering why node 0 was the one chosen to make the plot, it is because that is the only node that is guaranteed to exist (nothing else exists if there is only one process being used for the simulation).\n", + "A reference version of the raster plot is shown below; the two plots should look the same:" ] }, { @@ -420,7 +497,9 @@ "execution_count": null, "metadata": {}, "outputs": [], - "source": [] + "source": [ + "SVGZ(\"test_ring2_ref\")" + ] } ], "metadata": { @@ -429,5 +508,5 @@ } }, "nbformat": 4, - "nbformat_minor": 2 + "nbformat_minor": 4 } diff --git a/docs/tutorials/ring.py b/docs/tutorials/ring.py deleted file mode 100644 index dbf761dbcc..0000000000 --- a/docs/tutorials/ring.py +++ /dev/null @@ -1,70 +0,0 @@ -from neuron import h -from ballandstick import BallAndStick - -### MPI must be initialized before we create a ParallelContext object -h.nrnmpi_init() -pc = h.ParallelContext() - - -class Ring: - """A network of *N* ball-and-stick cells where cell n makes an - excitatory synapse onto cell n + 1 and the last, Nth cell in the - network projects to the first cell. - """ - - def __init__( - self, N=5, stim_w=0.04, stim_t=9, stim_delay=1, syn_w=0.01, syn_delay=5, r=50 - ): - """ - :param N: Number of cells. - :param stim_w: Weight of the stimulus - :param stim_t: time of the stimulus (in ms) - :param stim_delay: delay of the stimulus (in ms) - :param syn_w: Synaptic weight - :param syn_delay: Delay of the synapse - :param r: radius of the network - """ - self._N = N - self.set_gids() ### assign gids to processors - self._syn_w = syn_w - self._syn_delay = syn_delay - self._create_cells(r) ### changed to use self._N instead of passing in N - self._connect_cells() - ### the 0th cell only exists on one process... that's the only one that gets a netstim - if pc.gid_exists(0): - self._netstim = h.NetStim() - self._netstim.number = 1 - self._netstim.start = stim_t - self._nc = h.NetCon( - self._netstim, pc.gid2cell(0).syn - ) ### grab cell with gid==0 wherever it exists - self._nc.delay = stim_delay - self._nc.weight[0] = stim_w - - def set_gids(self): - """Set the gidlist on this host.""" - #### Round-robin counting. - #### Each host has an id from 0 to pc.nhost() - 1. - self.gidlist = list(range(pc.id(), self._N, pc.nhost())) - for gid in self.gidlist: - pc.set_gid2node(gid, pc.id()) - - def _create_cells(self, r): - self.cells = [] - for i in self.gidlist: ### only create the cells that exist on this host - theta = i * 2 * h.PI / self._N - self.cells.append( - BallAndStick(i, h.cos(theta) * r, h.sin(theta) * r, 0, theta) - ) - ### associate the cell with this host and gid - for cell in self.cells: - pc.cell(cell._gid, cell._spike_detector) - - def _connect_cells(self): - ### this method is different because we now must use ids instead of objects - for target in self.cells: - source_gid = (target._gid - 1 + self._N) % self._N - nc = pc.gid_connect(source_gid, target.syn) - nc.weight[0] = self._syn_w - nc.delay = self._syn_delay - target._ncs.append(nc) diff --git a/docs/tutorials/test_ring1.py b/docs/tutorials/test_ring1.py deleted file mode 100644 index a45561bf83..0000000000 --- a/docs/tutorials/test_ring1.py +++ /dev/null @@ -1,24 +0,0 @@ -from neuron import h -from neuron.units import ms, mV -import matplotlib.pyplot as plt -from ring import Ring - -cell_to_plot = 0 - -ring = Ring() - -pc = h.ParallelContext() -pc.set_maxstep(10 * ms) - -t = h.Vector().record(h._ref_t) -h.finitialize(-65 * mV) -pc.psolve(100 * ms) - -if pc.gid_exists(cell_to_plot): - plt.figure() - plt.plot(t, pc.gid2cell(cell_to_plot).soma_v) - plt.show() - -pc.barrier() -pc.done() -h.quit() diff --git a/docs/tutorials/test_ring1_ref.svgz b/docs/tutorials/test_ring1_ref.svgz new file mode 100644 index 0000000000..22ed44dac1 Binary files /dev/null and b/docs/tutorials/test_ring1_ref.svgz differ diff --git a/docs/tutorials/test_ring2.py b/docs/tutorials/test_ring2.py deleted file mode 100644 index 139b5b6cfd..0000000000 --- a/docs/tutorials/test_ring2.py +++ /dev/null @@ -1,32 +0,0 @@ -from neuron import h -from neuron.units import ms, mV -import matplotlib.pyplot as plt -from ring import Ring - -ring = Ring() - -pc = h.ParallelContext() -pc.set_maxstep(10 * ms) - -t = h.Vector().record(h._ref_t) -h.finitialize(-65 * mV) -pc.psolve(100 * ms) - -# send all spike time data to node 0 -local_data = {cell._gid: list(cell.spike_times) for cell in ring.cells} -all_data = pc.py_alltoall([local_data] + [None] * (pc.nhost() - 1)) - -if pc.id() == 0: - # combine the data from the various processes - data = {} - for process_data in all_data: - data.update(process_data) - # plot it - plt.figure() - for i, spike_times in data.items(): - plt.vlines(spike_times, i + 0.5, i + 1.5) - plt.show() - -pc.barrier() -pc.done() -h.quit() diff --git a/docs/tutorials/test_ring2_ref.svgz b/docs/tutorials/test_ring2_ref.svgz new file mode 100644 index 0000000000..8047fb98e3 Binary files /dev/null and b/docs/tutorials/test_ring2_ref.svgz differ diff --git a/docs/tutorials/testmpi.py b/docs/tutorials/testmpi.py deleted file mode 100644 index d64de5f0a2..0000000000 --- a/docs/tutorials/testmpi.py +++ /dev/null @@ -1,6 +0,0 @@ -from neuron import h - -h.nrnmpi_init() # initialize MPI -pc = h.ParallelContext() -print("I am {} of {}".format(pc.id(), pc.nhost())) -h.quit() diff --git a/external/coding-conventions b/external/coding-conventions index f8f8d69a66..80a2c90134 160000 --- a/external/coding-conventions +++ b/external/coding-conventions @@ -1 +1 @@ -Subproject commit f8f8d69a66c23978d1c9c5dce62de79466f26e5d +Subproject commit 80a2c9013463b89b5c426e18403e9a2f87c59a00 diff --git a/external/eigen b/external/eigen new file mode 160000 index 0000000000..3147391d94 --- /dev/null +++ b/external/eigen @@ -0,0 +1 @@ +Subproject commit 3147391d946bb4b6c68edd901f2add6ac1f31f8c diff --git a/external/iv b/external/iv index b86fc14d21..2b01af6a55 160000 --- a/external/iv +++ b/external/iv @@ -1 +1 @@ -Subproject commit b86fc14d2167effef0f538c8899ba9737e658a40 +Subproject commit 2b01af6a55a3935575f136c3cd16e6e1f873a0a7 diff --git a/external/mod2c b/external/mod2c deleted file mode 160000 index 284bd611a3..0000000000 --- a/external/mod2c +++ /dev/null @@ -1 +0,0 @@ -Subproject commit 284bd611a349b563a6ed8fb35a42e63e32860c0c diff --git a/external/nmodl b/external/nmodl index f4f722f72a..8f7eb99fd3 160000 --- a/external/nmodl +++ b/external/nmodl @@ -1 +1 @@ -Subproject commit f4f722f72a24708f0a3f69951688302e38c191ff +Subproject commit 8f7eb99fd36ab886eac5c1ab050272fd2c46fa04 diff --git a/nrn_requirements.txt b/nrn_requirements.txt index 83e51d38cf..77ccb32ddc 100644 --- a/nrn_requirements.txt +++ b/nrn_requirements.txt @@ -6,9 +6,10 @@ matplotlib # bokeh 3 seems to break docs notebooks bokeh<3 ipython -cython +cython<3 packaging pytest pytest-cov mpi4py numpy +find_libpython diff --git a/packaging/python/Dockerfile b/packaging/python/Dockerfile index e5da8e9a0d..36c7d0ad4f 100644 --- a/packaging/python/Dockerfile +++ b/packaging/python/Dockerfile @@ -3,10 +3,6 @@ ARG MANYLINUX_IMAGE=manylinux2014_x86_64 FROM quay.io/pypa/$MANYLINUX_IMAGE LABEL authors="Pramod Kumbhar, Fernando Pereira, Alexandru Savulescu" -# Enable devtoolset-9 in order to get GCC 9 -RUN yum install -y centos-release-scl && yum install -y devtoolset-9 -SHELL [ "/usr/bin/scl", "enable", "devtoolset-9" ] - RUN gcc --version && python --version # install basic packages @@ -21,14 +17,14 @@ RUN yum -y install \ autoconf \ automake \ openssh-server \ - libtool + libtool && yum -y clean all && rm -rf /var/cache # required for rpmbuild RUN yum -y install \ gettext \ gcc-c++ \ help2man \ - rpm-build + rpm-build && yum -y clean all && rm -rf /var/cache WORKDIR /root @@ -37,29 +33,34 @@ RUN rpmbuild --rebuild https://vault.centos.org/8-stream/AppStream/Source/SPacka && yum -y install rpmbuild/RPMS/*/flex-2.6.1-9.el7.*.rpm \ && rm -rf rpmbuild -RUN wget http://ftpmirror.gnu.org/ncurses/ncurses-6.2.tar.gz \ - && tar -xvzf ncurses-6.2.tar.gz \ - && cd ncurses-6.2 \ - && ./configure --prefix=/nrnwheel/ncurses --without-shared CFLAGS="-fPIC" \ - && make -j install +RUN wget http://ftpmirror.gnu.org/ncurses/ncurses-6.4.tar.gz \ + && tar -xvzf ncurses-6.4.tar.gz \ + && cd ncurses-6.4 \ + && ./configure --prefix=/nrnwheel/ncurses --without-shared --without-debug CFLAGS="-fPIC" \ + && make -j install \ + && cd .. && rm -rf ncurses-6.4 ncurses-6.4.tar.gz RUN curl -L -o mpich-3.3.2.tar.gz http://www.mpich.org/static/downloads/3.3.2/mpich-3.3.2.tar.gz \ && tar -xvzf mpich-3.3.2.tar.gz \ && cd mpich-3.3.2 \ && ./configure --disable-fortran --prefix=/nrnwheel/mpich \ - && make -j install + && make -j install \ + && cd .. && rm -rf mpich-3.3.2 mpich-3.3.2.tar.gz \ + && rm -rf /nrnwheel/mpich/share/doc /nrnwheel/mpich/share/man RUN curl -L -o openmpi-4.0.3.tar.gz https://download.open-mpi.org/release/open-mpi/v4.0/openmpi-4.0.3.tar.gz \ && tar -xvzf openmpi-4.0.3.tar.gz \ && cd openmpi-4.0.3 \ && ./configure --prefix=/nrnwheel/openmpi \ - && make -j install + && make -j install \ + && cd .. && rm -rf openmpi-4.0.3 openmpi-4.0.3.tar.gz RUN curl -L -o readline-7.0.tar.gz https://ftp.gnu.org/gnu/readline/readline-7.0.tar.gz \ && tar -xvzf readline-7.0.tar.gz \ && cd readline-7.0 \ && ./configure --prefix=/nrnwheel/readline --disable-shared CFLAGS="-fPIC" \ - && make -j install + && make -j install \ + && cd .. && rm -rf readline-7.0 readline-7.0.tar.gz # create readline with ncurses RUN cd /nrnwheel/readline/lib \ @@ -68,8 +69,22 @@ RUN cd /nrnwheel/readline/lib \ && ar cq libreadline.a *.o \ && rm *.o +# NOTE: NMODL transpiler is used in during build step but it requires +# sympy+python available in order to translate MOD files to C++. But under +# manylinux container, Python (libpython) is not available and hence we can't +# complete the build step. In order to enable building wheels with the existing +# implementation of NMODL, for now, we are making libpython available inside +# the manylinux container which is just used during the build step (nothing is +# linked to libraries or binaries of distribution). +RUN curl -L -o Python-3.10.0.tar.gz https://www.python.org/ftp/python/3.10.0/Python-3.10.0.tgz \ + && tar -xvzf Python-3.10.0.tar.gz \ + && cd Python-3.10.0 \ + && ./configure --prefix=/nrnwheel/python --enable-shared --with-static-libpython=no \ + && make -j altinstall \ + && cd .. && rm -rf Python-3.10.0 Python-3.10.0.tar.gz + ENV PATH /nrnwheel/openmpi/bin:$PATH -RUN yum -y install epel-release libX11-devel libXcomposite-devel vim-enhanced +RUN yum -y install epel-release libX11-devel libXcomposite-devel vim-enhanced && yum -y clean all && rm -rf /var/cache RUN yum -y remove ncurses-devel # Copy Dockerfile for reference @@ -78,6 +93,4 @@ COPY Dockerfile . # build wheels from there WORKDIR /root -ENV BASH_ENV=/opt/rh/devtoolset-9/enable \ - ENV=/opt/rh/devtoolset-9/enable \ - PROMPT_COMMAND=". /opt/rh/devtoolset-9/enable " +ENV NMODL_PYLIB=/nrnwheel/python/lib/libpython3.10.so.1.0 diff --git a/packaging/python/Dockerfile_gpu b/packaging/python/Dockerfile_gpu deleted file mode 100644 index 2433171d1e..0000000000 --- a/packaging/python/Dockerfile_gpu +++ /dev/null @@ -1,15 +0,0 @@ -FROM neuronsimulator/neuron_wheel:latest-gcc9-x86_64 -LABEL authors="Pramod Kumbhar, Olli Lupton, Fernando Pereira, Alexandru Savulescu" - -WORKDIR /root - -# Install NVHPC from NVIDIA's repository. -RUN yum-config-manager --add-repo https://developer.download.nvidia.com/hpc-sdk/rhel/nvhpc.repo \ - && yum install -y nvhpc-22-1 nvhpc-2022-22.1 \ - && yum clean all - -# setup nvhpc environment for building wheel and interactive usage -RUN yum install -y environment-modules && yum clean all \ - && echo "module use /opt/nvidia/hpc_sdk/modulefiles" >> ~/.bashrc \ - && /opt/nvidia/hpc_sdk/Linux_x86_64/22.1/compilers/bin/makelocalrc -x \ - -gcc `which gcc` -gpp `which g++` -g77 `which gfortran` -cuda 11.5 diff --git a/packaging/python/build_requirements.txt b/packaging/python/build_requirements.txt index ba9dcfb0e8..dcbd639f9c 100644 --- a/packaging/python/build_requirements.txt +++ b/packaging/python/build_requirements.txt @@ -1,2 +1,2 @@ -cython +cython<3 packaging diff --git a/packaging/python/build_static_readline_osx.bash b/packaging/python/build_static_readline_osx.bash index 025dbb66c6..efe0bf09d5 100644 --- a/packaging/python/build_static_readline_osx.bash +++ b/packaging/python/build_static_readline_osx.bash @@ -29,9 +29,9 @@ else export MACOSX_DEPLOYMENT_TARGET=10.9 # for x86_64 fi -(wget http://ftpmirror.gnu.org/ncurses/ncurses-6.2.tar.gz \ - && tar -xvzf ncurses-6.2.tar.gz \ - && cd ncurses-6.2 \ +(wget http://ftpmirror.gnu.org/ncurses/ncurses-6.4.tar.gz \ + && tar -xvzf ncurses-6.4.tar.gz \ + && cd ncurses-6.4 \ && ./configure --prefix=/opt/nrnwheel/ncurses --without-shared CFLAGS="-fPIC" \ && make -j install) diff --git a/packaging/python/build_wheels.bash b/packaging/python/build_wheels.bash index e3ef18927f..007fa581ce 100755 --- a/packaging/python/build_wheels.bash +++ b/packaging/python/build_wheels.bash @@ -25,6 +25,13 @@ fi py_ver="" +clone_install_nmodl_requirements() { + git config --global --add safe.directory /root/nrn + git submodule update --init --recursive --force --depth 1 -- external/nmodl + pip install -r external/nmodl/requirements.txt +} + + setup_venv() { local py_bin="$1" py_ver=$("$py_bin" -c "import sys; print('%d%d' % tuple(sys.version_info)[:2])") @@ -55,11 +62,12 @@ pip_numpy_install() { 39) numpy_ver="numpy==1.19.3" ;; 310) numpy_ver="numpy==1.21.3" ;; 311) numpy_ver="numpy==1.23.5" ;; + 312) numpy_ver="numpy==1.26.0" ;; *) echo "Error: numpy version not specified for this python!" && exit 1;; esac # older version for apple m1 as building from source fails - if [[ `uname -m` == 'arm64' ]]; then + if [[ `uname -m` == 'arm64' && "$py_ver" == "39" ]]; then numpy_ver="numpy==1.21.3" fi @@ -83,23 +91,13 @@ build_wheel_linux() { CMAKE_DEFS="NRN_MPI_DYNAMIC=$3" if [ "$USE_STATIC_READLINE" == "1" ]; then - CMAKE_DEFS="$CMAKE_DEFS,NRN_WHEEL_BUILD=ON,NRN_WHEEL_STATIC_READLINE=ON" + CMAKE_DEFS="$CMAKE_DEFS,NRN_BINARY_DIST_BUILD=ON,NRN_WHEEL_STATIC_READLINE=ON" fi if [ "$2" == "coreneuron" ]; then setup_args="--enable-coreneuron" - elif [ "$2" == "coreneuron-gpu" ]; then - setup_args="--enable-coreneuron --enable-gpu" - # nvhpc is required for GPU support but make sure - # CC and CXX are unset for building python extensions - source ~/.bashrc - module load nvhpc - unset CC CXX - # make the NVIDIA compilers default to targeting haswell CPUs - # the default is currently 70;80, partly because NVHPC does not - # support OpenMP target offload with 60. Wheels use mod2c and - # OpenACC for now, so we can be a little more generic. - CMAKE_DEFS="${CMAKE_DEFS},CMAKE_CUDA_ARCHITECTURES=60;70;80,CMAKE_C_FLAGS=-tp=haswell,CMAKE_CXX_FLAGS=-tp=haswell" + clone_install_nmodl_requirements + CMAKE_DEFS="${CMAKE_DEFS},LINK_AGAINST_PYTHON=OFF" fi # Workaround for https://github.com/pypa/manylinux/issues/1309 @@ -145,14 +143,12 @@ build_wheel_osx() { if [ "$2" == "coreneuron" ]; then setup_args="--enable-coreneuron" - elif [ "$2" == "coreneuron-gpu" ]; then - echo "Error: GPU support on MacOS is not available!" - exit 1 + clone_install_nmodl_requirements fi CMAKE_DEFS="NRN_MPI_DYNAMIC=$3" if [ "$USE_STATIC_READLINE" == "1" ]; then - CMAKE_DEFS="$CMAKE_DEFS,NRN_WHEEL_BUILD=ON,NRN_WHEEL_STATIC_READLINE=ON" + CMAKE_DEFS="$CMAKE_DEFS,NRN_BINARY_DIST_BUILD=ON,NRN_WHEEL_STATIC_READLINE=ON" fi # We need to "fix" the platform tag if the Python installer is universal2 @@ -168,6 +164,9 @@ build_wheel_osx() { echo " - Python installation is universal2 and we are on arm64, setting _PYTHON_HOST_PLATFORM to: ${_PYTHON_HOST_PLATFORM}" export ARCHFLAGS="-arch arm64" echo " - Setting ARCHFLAGS to: ${ARCHFLAGS}" + # This is a shortcut to have a successful delocate-wheel. See: + # https://github.com/matthew-brett/delocate/issues/153 + python -c "import os,delocate; print(os.path.join(os.path.dirname(delocate.__file__), 'tools.py'));quit()" | xargs -I{} sed -i."" "s/first, /input.pop('i386',None); first, /g" {} else export _PYTHON_HOST_PLATFORM="${py_platform/universal2/x86_64}" echo " - Python installation is universal2 and we are on x84_64, setting _PYTHON_HOST_PLATFORM to: ${_PYTHON_HOST_PLATFORM}" @@ -196,7 +195,7 @@ if [ ! -z "$2" ]; then python_wheel_version=$2 fi -# enable coreneuron support: "coreneuron" or "coreneuron-gpu" +# enable coreneuron support: "coreneuron" # this should be removed/improved once wheel is stable coreneuron=$3 @@ -240,7 +239,7 @@ case "$1" in ;; *) - echo "Usage: $(basename $0) < linux | osx > [python version 36|37|38|39|3*] [coreneuron | coreneuron-gpu]" + echo "Usage: $(basename $0) < linux | osx > [python version 36|37|38|39|3*] [coreneuron]" exit 1 ;; diff --git a/packaging/python/fix_target_processor_in_makefiles.sh b/packaging/python/fix_target_processor_in_makefiles.sh deleted file mode 100755 index b369952655..0000000000 --- a/packaging/python/fix_target_processor_in_makefiles.sh +++ /dev/null @@ -1,14 +0,0 @@ -#!/usr/bin/env bash -set -ex - -instdir="$1" - -# We build the GPU wheels with -tp=haswell for portability, but we don't want to -# embed this in the Makefiles in the wheels themselves to (hopefully) give -# better performance when users run nrnivmodl themselves. -for makefile in bin/nrnmech_makefile share/coreneuron/nrnivmodl_core_makefile -do - sed -i.old -e 's#-tp=haswell##g' "${instdir}/${makefile}" - ! diff -u "${instdir}/${makefile}.old" "${instdir}/${makefile}" - rm "${instdir}/${makefile}.old" -done \ No newline at end of file diff --git a/packaging/python/test_wheels.sh b/packaging/python/test_wheels.sh index 17e791b4a2..3e458d1f2a 100755 --- a/packaging/python/test_wheels.sh +++ b/packaging/python/test_wheels.sh @@ -22,20 +22,7 @@ python_exe=$1 # python to be used for testing python_wheel=$2 # python wheel to be tested use_venv=$3 # if $3 is not "false" then use virtual environment -# There are some considerations to test coreneuron with gpu support: -# - if coreneuron support exist then we can always run all tests on cpu -# - if coreneuron gpu support exist then we can run always binaries like -# nrniv and nrniv-core without existence of NVC/NVC++ compilers -# - if coreneuron gpu support exist and nvc compiler available then we -# can compile mod files and run tests via special binary -# - Note that the tests that use coreneuron can not be launched using -# python or nrniv -python because in gpu build coreneuron is created -# as a static library and linked to special. Hence only special can -# be used to launch GPU tests has_coreneuron=false # true if coreneuron support is available -has_gpu_support=false # true if coreneuron gpu support is available -has_dev_env=true # true if c/c++ dev environment exist to compile mod files -run_gpu_test=false # true if test should be run on the gpu # python version being used python_ver=$("$python_exe" -c "import sys; print('%d%d' % tuple(sys.version_info)[:2])") @@ -74,13 +61,6 @@ run_mpi_test () { rm -rf *.dat fi - # rest of the tests we need development environment. For GPU wheel - # make sure we have necessary compiler. - if [[ "$has_dev_env" == "false" ]]; then - echo "WARNING: Development environment missing, skipping rest of the MPI tests!" - return - fi - # build new special rm -rf $ARCH_DIR nrnivmodl tmp_mod @@ -96,18 +76,12 @@ run_mpi_test () { rm -rf $ARCH_DIR nrnivmodl -coreneuron "test/coreneuron/mod files/" - # python as a launcher can be used only with non-gpi build - if [[ "$has_gpu_support" == "false" ]]; then - $mpi_launcher -n 1 $python_exe test/coreneuron/test_direct.py - fi + $mpi_launcher -n 1 $python_exe test/coreneuron/test_direct.py # using -python doesn't work on Azure CI if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then - if [[ "$has_gpu_support" == "false" ]]; then - $mpi_launcher -n 2 nrniv -python -mpi test/coreneuron/test_direct.py - fi - run_on_gpu=$([ "$run_gpu_test" == "true" ] && echo "1" || echo "0") - NVCOMPILER_ACC_TIME=1 CORENRN_ENABLE_GPU=$run_on_gpu $mpi_launcher -n 2 ./$ARCH_DIR/special -python -mpi test/coreneuron/test_direct.py + $mpi_launcher -n 2 nrniv -python -mpi test/coreneuron/test_direct.py + NVCOMPILER_ACC_TIME=1 CORENRN_ENABLE_GPU=0 $mpi_launcher -n 2 ./$ARCH_DIR/special -python -mpi test/coreneuron/test_direct.py fi fi @@ -135,12 +109,6 @@ run_serial_test () { rm -rf *.dat fi - # rest of the tests we need development environment - if [[ "$has_dev_env" == "false" ]]; then - echo "WARNING: Development environment missing, skipping rest of the serial tests!" - return - fi - # Test 4: execute nrnivmodl rm -rf $ARCH_DIR nrnivmodl tmp_mod @@ -168,30 +136,17 @@ run_serial_test () { rm -rf $ARCH_DIR # first test vanialla coreneuron support, without nrnivmodl - if [[ "$has_gpu_support" == "false" ]]; then - $python_exe test/coreneuron/test_psolve.py - fi + $python_exe test/coreneuron/test_psolve.py nrnivmodl -coreneuron "test/coreneuron/mod files/" # coreneuron+gpu can be used via python but special only - if [[ "$has_gpu_support" == "false" ]]; then - $python_exe test/coreneuron/test_direct.py - fi + $python_exe test/coreneuron/test_direct.py # using -python doesn't work on Azure CI if [[ "$SKIP_EMBEDED_PYTHON_TEST" != "true" ]]; then - # we can run special with or without gpu wheel ./$ARCH_DIR/special -python test/coreneuron/test_direct.py - - # python and nrniv can be used only for non-gpu wheel - if [[ "$has_gpu_support" == "false" ]]; then - nrniv -python test/coreneuron/test_direct.py - fi - - if [[ "$run_gpu_test" == "true" ]]; then - NVCOMPILER_ACC_TIME=1 CORENRN_ENABLE_GPU=1 ./$ARCH_DIR/special -python test/coreneuron/test_direct.py - fi + nrniv -python test/coreneuron/test_direct.py fi rm -rf $ARCH_DIR @@ -210,14 +165,16 @@ run_parallel_test() { # this is for MacOS system if [[ "$OSTYPE" == "darwin"* ]]; then # assume both MPIs are installed via brew. + BREW_PREFIX=$(brew --prefix) brew unlink openmpi brew link mpich - BREW_PREFIX=$(brew --prefix) + export DYLD_LIBRARY_PATH=${BREW_PREFIX}/opt/mpich/lib:$DYLD_LIBRARY_PATH run_mpi_test "${BREW_PREFIX}/opt/mpich/bin/mpirun" "MPICH" "" brew unlink mpich brew link openmpi + export DYLD_LIBRARY_PATH=${BREW_PREFIX}/opt/open-mpi/lib:$DYLD_LIBRARY_PATH run_mpi_test "${BREW_PREFIX}/opt/open-mpi/bin/mpirun" "OpenMPI" "" # CI Linux or Azure Linux @@ -299,12 +256,11 @@ $python_exe -m pip install --upgrade pip # install numpy, pytest and neuron -$python_exe -m pip install numpy pytest +# we install setuptools because since python 3.12 it is no more installed +# by default +$python_exe -m pip install numpy pytest setuptools $python_exe -m pip install $python_wheel -$python_exe -m pip show neuron \ - || $python_exe -m pip show neuron-nightly \ - || $python_exe -m pip show neuron-gpu \ - || $python_exe -m pip show neuron-gpu-nightly +$python_exe -m pip show neuron || $python_exe -m pip show neuron-nightly # check the existence of coreneuron support @@ -313,27 +269,8 @@ if echo $compile_options | grep "NRN_ENABLE_CORENEURON=ON" > /dev/null ; then has_coreneuron=true fi -# check if the gpu support is enabled -if echo $compile_options | grep "CORENRN_ENABLE_GPU=ON" > /dev/null ; then - has_gpu_support=true -fi - -# in case of gpu support, nvc/nvc++ compiler must exist to compile mod files -if [[ "$has_gpu_support" == "true" ]]; then - if ! command -v nvc &> /dev/null; then - has_dev_env=false - fi - - # check if nvidia gpu exist (todo: not a robust check) - if pgaccelinfo -nvidia | grep -q "Device Name"; then - run_gpu_test=true - fi -fi - - # run tests -test_wheel $(which python) - +test_wheel "${python_exe}" # cleanup if [[ "$use_venv" != "false" ]]; then diff --git a/repository_mirror b/repository_mirror deleted file mode 100755 index a686d5203e..0000000000 --- a/repository_mirror +++ /dev/null @@ -1,168 +0,0 @@ -#!/usr/bin/env bash - -#github master branch and bitbucket trunk branch mirror each other -# keep neuron.yale.edu up to date with respect to bitbucket - -#list of repositories to mirror. -repositories="iv nrn" - -#crontab -e -#0 */3 * * * /home/hines/neuron/repository_mirror/repository_mirror - -# bitbucket and github settings have ssh keys to allow push without -# password. - -# https://github.com/felipec/git-remote-hg software is used to make -# a git clone of the bitbucket hg repository using 'git clone hg::ssh:...' -# and track the remote origin/branches/trunk with the local branches/trunk. -# Also, the git repository on github has its remote git/master branch -# tracked by the local master branch. - -# It is asserted that only one remote repository can be ahead of the local -# repository. If both are ahead, the merges must be done manually. -# If bitbucket is ahead of github, then the branches/trunk is (fast forward) -# merged to master and the latter is pushed to github. -# If github is ahead of bitbucket, then master is (fast forward) merged to -# branches/trunk and the latter is pushed to bitbucket. - -# original bootstrapping requires a bitbucket hg repository and a -# github git repository with the same name (manually -# created from an earlier version of the hg repository). If the git -# repository is empty, this script will create a master branch and push - -# an example of how to re-establish consistency when both hg and git -# repositories are different is -if false ; then -git clone http://github.com/nrnhines/nrn nrngit -hg clone http://bitbucket.org/nrnhines/nrn nrnhg -git clone hg::nrnhg temp - -cd temp - -git fetch -git checkout -b branches/trunk origin/branches/trunk -git config core.notesRef refs/notes/hg - -git remote add nrngit /home/hines/neuron/hggit/nrngit -git fetch nrngit - -git checkout branches/trunk -git merge nrngit/master #and fix conflicts -git commit - -git push origin -git checkout master -git merge branches/trunk -cd ../nrngit -git pull ../temp - -fi - -local=$HOME/neuron/hggit -hgmastersite=ssh://hg@bitbucket.org/nrnhines -gitmastersite=ssh://git@github.com/nrnhines -hgcopysite=ssh://hines@neuron.yale.edu//home/hg/neuron - -mkdir -p $local -cd $local - -for rep in $repositories ; do - echo -e "\n\n\n${rep}\n" - cd $local - if test ! -d ${rep}hggit ; then - git clone hg::$hgmastersite/${rep} ${rep}hggit - cd ${rep}hggit - git fetch - git checkout -b branches/trunk origin/branches/trunk - git config core.notesRef refs/notes/hg - - # is the github just initialized with no commits or can - # its master be checked out - git remote add git $gitmastersite/$rep - if ! git fetch git ; then - echo "$gitmastersite/$rep does not exist. Initialize it on github" - exit 1 - fi - if ! git checkout -b master git/master ; then - echo "github empty so create master branch and push" - git branch master - git checkout master - git push -u git master - elif test "`git status -b --porcelain`" != "## master...git/master" ; then - echo "not on branch master...git/master" - echo "$gitmastersite/$rep has no commits" - exit 1 - fi - echo "on branch master...git/master" - cd .. - fi - - cd ${rep}hggit - - # verify validity - git branch --list -vv - - oldhg="`git log branches/trunk --format=\"%h\" -n 1`" - oldgit="`git log master --format=\"%h\" -n 1`" - - git checkout branches/trunk - git pull - git checkout master - git pull - - newhg="`git log branches/trunk --format=\"%h\" -n 1`" - newgit="`git log master --format=\"%h\" -n 1`" - - echo oldhg $oldhg - echo oldgit $oldgit - echo newhg $newhg - echo newgit $newgit - - if test "$newgit" != "$newhg" ; then - # they are the different. - # pull from the one ahead and push to the one behind. - m2t=`git log -n 1 --format="%h" master..branches/trunk` - t2m=`git log -n 1 --format="%h" branches/trunk..master` - if test "$m2t" = "" ; then # empty means master ahead of trunk - echo "merge $rep master to trunk and push to bitbucket" - git checkout branches/trunk - git merge master branches/trunk - git push origin - elif test "$t2m" = "" ; then - echo "merge $rep trunk to master and push to github" - git checkout master - git merge branches/trunk master - git push git - else - echo "Both github and bitbucket for $rep have changes. Resolve manually." - continue - fi - fi - - cd $local - # update neuron.yale.edu if necessary - - hgcopyid=`hg identify -i $hgcopysite/$rep` - err=$? - if test "$err" != 0 ; then # if the repository does not exist - echo "$hgcopysite/$rep does not exist" - continue - fi - - hgmasterid=`hg identify -i $hgmastersite/$rep` - if test "$hgmasterid" = "$hgcopyid" ; then - continue - fi - - if test ! -d "$local/${rep}hg" ; then - echo "cloning bitbucket repository into the local ${rep}hg" - hg clone $hgmastersite/$rep ${rep}hg - fi - - echo $repname $hgmasterid $hgcopyid - - cd $local/${rep}hg - hg pull -u - hg push $hgcopysite/$rep - -done diff --git a/setup.py b/setup.py index c673311de2..bccf1bed53 100644 --- a/setup.py +++ b/setup.py @@ -1,5 +1,4 @@ import os -import re import shutil import subprocess import sys @@ -21,7 +20,6 @@ class Components: MPI = True MUSIC = False # still early support CORENRN = False # still early support - GPU = False # still early support # Check if we've got --cmake-build-dir path that will be used to build extensions only @@ -94,10 +92,6 @@ class Components: Components.CORENRN = True sys.argv.remove("--enable-coreneuron") -if "--enable-gpu" in sys.argv: - Components.GPU = True - sys.argv.remove("--enable-gpu") - if "--enable-music" in sys.argv: Components.MUSIC = True sys.argv.remove("--enable-music") @@ -242,7 +236,7 @@ def _run_cmake(self, ext): "-DPYTHON_EXECUTABLE=" + sys.executable, "-DCMAKE_BUILD_TYPE=" + cfg, ] + ext.cmake_flags - # RTD neds quick config + # RTD needs quick config if self.docs and os.environ.get("READTHEDOCS"): cmake_args = ["-DNRN_ENABLE_MPI=OFF", "-DNRN_ENABLE_INTERVIEWS=OFF"] if self.docs: @@ -321,16 +315,6 @@ def _run_cmake(self, ext): cwd=self.build_temp, env=env, ) - if Components.GPU: - subprocess.check_call( - [ - ext.sourcedir - + "/packaging/python/fix_target_processor_in_makefiles.sh", - ext.cmake_install_prefix, - ], - cwd=self.build_temp, - env=env, - ) except subprocess.CalledProcessError as exc: logging.error("Status : FAIL. Logging.\n%s", exc.output) @@ -370,7 +354,7 @@ def setup_package(): NRN_COLLECT_DIRS = ["bin", "lib", "include", "share"] docs_require = [] # sphinx, themes, etc - maybe_rxd_reqs = ["numpy", "Cython"] if Components.RX3D else [] + maybe_rxd_reqs = ["numpy", "Cython<3"] if Components.RX3D else [] maybe_docs = docs_require if "docs" in sys.argv else [] maybe_test_runner = ["pytest-runner"] if "test" in sys.argv else [] @@ -388,11 +372,7 @@ def setup_package(): ext_common_libraries = ["nrniv"] if not without_nrnpython: - nrn_python_lib = "nrnpython{}".format( - sys.version_info[0] - if sys.platform != "win32" - else str(sys.version_info[0]) + str(sys.version_info[1]) - ) + nrn_python_lib = "nrnpython{}.{}".format(*sys.version_info[:2]) ext_common_libraries.append(nrn_python_lib) extension_common_params = defaultdict( @@ -423,18 +403,14 @@ def setup_package(): "-DNRN_ENABLE_PYTHON_DYNAMIC=ON", "-DNRN_ENABLE_MODULE_INSTALL=OFF", "-DNRN_ENABLE_REL_RPATH=ON", - "-DLINK_AGAINST_PYTHON=OFF", "-DCMAKE_VERBOSE_MAKEFILE=OFF", - "-DCORENRN_ENABLE_OPENMP=ON", # TODO: manylinux portability questions ] + ( [ - "-DCORENRN_ENABLE_GPU=ON", - "-DCMAKE_C_COMPILER=nvc", # use nvc and nvc++ for GPU support - "-DCMAKE_CXX_COMPILER=nvc++", - "-DCMAKE_CUDA_COMPILER=nvcc", + "-DCORENRN_ENABLE_OPENMP=ON", # TODO: manylinux portability questions + "-DNMODL_ENABLE_PYTHON_BINDINGS=ON", ] - if Components.GPU + if Components.CORENRN else [] ), include_dirs=[ @@ -511,15 +487,11 @@ def setup_package(): logging.info("RX3D is %s", "ENABLED" if Components.RX3D else "DISABLED") # package name - package_name = "NEURON-gpu" if Components.GPU else "NEURON" + package_name = "NEURON" # For CI, we want to build separate wheel with "-nightly" suffix package_name += os.environ.get("NEURON_NIGHTLY_TAG", "-nightly") - # GPU wheels use patchelf to avoid duplicating NVIDIA runtime libraries when - # using nrnivmodl. - maybe_patchelf = ["patchelf"] if Components.GPU else [] - setup( name=package_name, package_dir={"": NRN_PY_ROOT}, @@ -537,7 +509,7 @@ def setup_package(): else "node-and-date" }, cmdclass=dict(build_ext=CMakeAugmentedBuilder, docs=Docs), - install_requires=["numpy>=1.9.3", "packaging"] + maybe_patchelf, + install_requires=["numpy>=1.9.3", "packaging", "find_libpython", "setuptools"], tests_require=["flake8", "pytest"], setup_requires=["wheel", "setuptools_scm"] + maybe_docs diff --git a/share/examples/nrniv/nmodl/fit1.hoc b/share/examples/nrniv/nmodl/fit1.hoc index 8a1eb8545a..f6248d3421 100755 --- a/share/examples/nrniv/nmodl/fit1.hoc +++ b/share/examples/nrniv/nmodl/fit1.hoc @@ -9,4 +9,3 @@ attr_praxis(1e-5, .1, 1) fit_praxis(2, "err_fit1", &parm[0]) printf("Final value a=%g k=%g\n", parm[0], parm[1]) - \ No newline at end of file diff --git a/share/lib/helpdict b/share/lib/helpdict index 15a534580e..fbfaf1a699 100755 --- a/share/lib/helpdict +++ b/share/lib/helpdict @@ -252,7 +252,6 @@ flush Graph classes general neuron.exe Reference 46 neuron/general/classes/graph flush PlotShape classes neuron neuron.exe Reference 301 neuron/neuron/classes/pltshape.html#flush flush Shape classes neuron neuron.exe Reference 323 neuron/neuron/classes/shape.html#flush fmatrix functions neuron neuron.exe Reference 369 neuron/neuron/nrnoc.html#fmatrix -fmenu obsolete functions general neuron.exe Reference 190 neuron/general/function/obsolete/lw.html#fmenu for keywords general neuron.exe Reference 227 neuron/general/keywords/ockeywor.html#for forall CurrentlyAccessedSection Section neuron neuron.exe Reference 381 neuron/neuron/secspec.html#forall forsec CurrentlyAccessedSection Section neuron neuron.exe Reference 383 neuron/neuron/secspec.html#forsec diff --git a/share/lib/hoc/corenrn.hoc b/share/lib/hoc/corenrn.hoc index 90215e35b5..d189064042 100644 --- a/share/lib/hoc/corenrn.hoc +++ b/share/lib/hoc/corenrn.hoc @@ -42,7 +42,6 @@ proc save() { proc run() { sprint(tstr, "-e %g", tstop) - cvode.cache_efficient(1) coreneuronrunning_ = 1 stdinit() coreneuronrunning_ = 0 diff --git a/share/lib/hoc/impedanc.hoc b/share/lib/hoc/impedanc.hoc index 90c772e0c1..53822da8b1 100644 --- a/share/lib/hoc/impedanc.hoc +++ b/share/lib/hoc/impedanc.hoc @@ -18,7 +18,7 @@ usage: // each time inject or measure is moved imp.extended = 1 // if you want to perform the extended impedance // calculation described in - // http://neuron.yale.edu/neuron/docs/help/neuron/neuron/classes/impedanc.html + // https://nrn.readthedocs.io/en/latest/python/analysis/programmatic/impedance.html Notes: Impedance objects are expensive in terms of storage but efficiently calculate input impedances everywhere and diff --git a/share/lib/hoc/import3d/import3d_gui.hoc b/share/lib/hoc/import3d/import3d_gui.hoc index 67b9173a97..205d24a1a8 100755 --- a/share/lib/hoc/import3d/import3d_gui.hoc +++ b/share/lib/hoc/import3d/import3d_gui.hoc @@ -1227,14 +1227,24 @@ proc contour2centroid() {local i, j, imax, imin, ok localobj mean, pts, d, max, tobj = m.symmeig(m) // major axis is the one with largest eigenvalue major = m.getcol(tobj.max_ind) + // For plotting and round-off error consistency + // between meschach and eigen use the heuristic that + // major axis points in positive direction. + // i.e. if greatest value is negative, reverse orientation. + if (major.x[major.c.abs().max_ind()] < 0) { + major.mul(-1) + } // minor is normal and in xy plane minor = m.getcol(3-tobj.min_ind-tobj.max_ind) minor.x[2] = 0 + if (minor.mag / (major.mag + 1e-100) < 1e-6) { + execerror("Failed to compute soma centroid from contour.") + } minor.div(minor.mag) -if (g != nil) { -g.beginline(4, 3) g.line(mean.x[0], mean.x[1]) -g.line(mean.x[0] + 20*major.x[0], mean.x[1] + 20*major.x[1]) g.flush -} + if (g != nil) { + g.beginline(4, 3) g.line(mean.x[0], mean.x[1]) + g.line(mean.x[0] + 20*major.x[0], mean.x[1] + 20*major.x[1]) g.flush + } d = new Vector(pts.ncol) rad = new Vector(pts.ncol) for i=0, pts.ncol-1 { diff --git a/share/lib/hoc/import3d/read_nlcda3.hoc b/share/lib/hoc/import3d/read_nlcda3.hoc index 142d78cd34..f1a2602ab5 100755 --- a/share/lib/hoc/import3d/read_nlcda3.hoc +++ b/share/lib/hoc/import3d/read_nlcda3.hoc @@ -545,7 +545,7 @@ proc b2soption_split() {local i, n, id, ip localobj p, newsec, tobj } proc remove_trailspace() { // yuck - hoc_sf_.head(line, " *$", line) + hoc_sf_.rtrim(line, line) sprint(line, "%s\n", line) } diff --git a/share/lib/hoc/loadbal.hoc b/share/lib/hoc/loadbal.hoc index 061abe3b95..049fce8eee 100644 --- a/share/lib/hoc/loadbal.hoc +++ b/share/lib/hoc/loadbal.hoc @@ -44,7 +44,7 @@ proc init() {local i, j localobj ms ms = new MechanismStandard(mname, 3) m_complex_[j].x[i] = 1 + ms.count // printf("complexity %d for %s\n", m_complex_[j].x[i], mname) - if (j == 0 && hoc_sf_.substr(mname, "_ion") != -1) { + if (j == 0 && mt[j].is_ion()) { m_complex_[j].x[i] = 0 ion_complex_.x[i] = 1 } @@ -388,7 +388,7 @@ proc ExperimentalMechComplex() {local i, j, k, b, ts, ns, baseindex, irun, par \ for i=0, mt[0].count-1 { mt[0].select(i) mt[0].selected(s.s) - if (hoc_sf_.substr(s.s, "_ion") != -1) { + if(mt[0].is_ion()) { ionindices.x[i] = j j += 1 } @@ -404,7 +404,6 @@ proc ExperimentalMechComplex() {local i, j, k, b, ts, ns, baseindex, irun, par \ ninstance = new Matrix(j, j, 2) // fixed step with cache efficiency cvode.active(0) - cvode.cache_efficient(1) cmd = new String() ts = 100 @@ -652,7 +651,6 @@ obfunc makesec() {localobj s, sr execute(s.s) sr = hoc_obj_[1] cvode.use_mxb(0) // extracellular would turn this on - cvode.cache_efficient(1) // extracellular would turn this off return sr } diff --git a/share/lib/hoc/mview/mview1.hoc b/share/lib/hoc/mview/mview1.hoc index bdd025484b..1960b83947 100755 --- a/share/lib/hoc/mview/mview1.hoc +++ b/share/lib/hoc/mview/mview1.hoc @@ -127,13 +127,14 @@ proc init() {local i, n, icdis, ntop display.top.append(pdis) - if (nrnpython("import neuron.rxd")) { - pyobj.neuron.rxd._model_view(display.top) + if (pyobj.neuron._userrxd) { + if (nrnpython("import neuron.rxd")) { + pyobj.neuron.rxd._model_view(display.top) + } } - appv = new ModelViewAllPP(this) appv.mkdisplay(pdis) diff --git a/share/lib/hoc/parcom.hoc b/share/lib/hoc/parcom.hoc index cf8b8e48ce..8b0a4e5d3e 100644 --- a/share/lib/hoc/parcom.hoc +++ b/share/lib/hoc/parcom.hoc @@ -34,7 +34,6 @@ proc init() { lb = new LoadBalance(pc) nthread_ = pc.nthread() pct = this - cacheeffic_ = cvode.cache_efficient() busywait_ = 0 nprocstr = "?? useful processors" @@ -55,7 +54,6 @@ proc build() { xvarlabel(ldbalstr) xpvalue("# threads", &nthread_, 1, "change_nthread(nthread_, ispar_)") xcheckbox("Thread Parallel", &ispar_, "change_nthread(nthread_, ispar_)") - xcheckbox("Cache Efficient", &cacheeffic_, "cacheeffic(cacheeffic_)") xcheckbox("Use busy waiting", &busywait_, "busywait(busywait_)") xcheckbox("Multisplit", &multisplit_, "multisplit(multisplit_)") xbutton("Refresh", "howmany() totalcx() ldbal()") @@ -79,7 +77,6 @@ proc save() { sprint(tstr, "{change_nthread(%d, %d)}", nthread_, ispar_) box.save(tstr) if (multisplit_) {box.save("{multisplit(1)}")} - if (multisplit_ == 0 && cacheeffic_) {box.save("{cacheeffic(1)}")} if (busywait_) {box.save("{busywait(1)}")} box.save("{object_pop()}") box.save("{\n") @@ -118,12 +115,6 @@ proc ldbal() { sprint(npiecestr, "%d pieces", lb.npiece_) sprint(ldbalstr, "Load imbalance: %.1f%%", (ldbal_ - 1)*100) } -proc cacheeffic() { - if (!multisplit_) { - cvode.cache_efficient($1) - } - cacheeffic_ = cvode.cache_efficient() -} proc busywait() { if (!ispar_) {$1 = 0} @@ -149,13 +140,6 @@ func pthread() { return ispar_ } -func cache() { - if (numarg()) { - cache_effic($1) - } - return cacheeffic_ -} - proc change_nthread() {local ms ms = multisplit_ if (ms) { @@ -202,7 +186,6 @@ proc use_thread() {local cx, cellx localobj b, ms, vs, cb, nc, nil, roots cxtotal_ = lb.cpu_complexity() ldbal() pc.multisplit() - cacheeffic_ = cvode.cache_efficient() multisplit_ = 1 } @@ -302,7 +285,6 @@ proc unsplit() {local i, j localobj si, sil } } } - cacheeffic_ = cvode.cache_efficient(0) multisplit_ = 0 } diff --git a/share/lib/nrnunits.lib.in b/share/lib/nrnunits.lib similarity index 95% rename from share/lib/nrnunits.lib.in rename to share/lib/nrnunits.lib index 66481db370..b0839ca173 100755 --- a/share/lib/nrnunits.lib.in +++ b/share/lib/nrnunits.lib @@ -68,10 +68,8 @@ pi 3.14159265358979323846 c 2.99792458+8 m/sec fuzz g 9.80665 m/sec2 au 1.49597871+11 m fuzz -@LegacyY@mole 6.022169+23 fuzz -@LegacyN@mole 6.02214076+23 fuzz -@LegacyY@e 1.6021917-19 coul fuzz -@LegacyN@e 1.602176634-19 coul fuzz +mole 6.02214076+23 fuzz +e 1.602176634-19 coul fuzz energy c2 force g mercury 1.33322+5 kg/m2-sec2 @@ -394,8 +392,7 @@ ev e-volt / faraday 9.652000+04 coul / faraday from host: physics.nist.gov / path: /PhysRefData/fundconst/html/keywords.html -@LegacyY@faraday 9.6485309+4 coul -@LegacyN@faraday e-mole +faraday e-mole fathom 6 ft fermi 1-15 m fifth 4|5 qt @@ -431,8 +428,7 @@ hyl gm force sec2/m hz /sec imaginarycubicfoot 1.4 ft3 jeroboam 4|5 gal -@LegacyY@boltzmann 1.38064852-23 joule/K -@LegacyN@boltzmann 1.380649-23 joule/K +boltzmann 1.380649-23 joule/K k boltzmann karat 1|24 kcal kilocal @@ -501,8 +497,7 @@ quarter 9 in quartersection 1|4 mi2 quintal 100 kg quire 25 -@LegacyY@gasconstant 8.3144598 joule/K -@LegacyN@gasconstant k-mole +gasconstant k-mole R gasconstant rad 100 erg/gm ream 500 @@ -571,10 +566,8 @@ tex .001 gram / m englishell 45 inch scottishell 37.2 inch flemishell 27 inch -@LegacyY@planck 6.626-34 joule-sec -@LegacyN@planck 6.62607015-34 joule-sec -@LegacyY@hbar 1.055-34 joule-sec -@LegacyN@hbar planck/two-pi +planck 6.62607015-34 joule-sec +hbar planck/two-pi electronmass 9.1095-31 kg protonmass 1.6726-27 kg neutronmass 1.6606-27 kg diff --git a/share/lib/python/neuron/__init__.py b/share/lib/python/neuron/__init__.py index 1c4614e81a..c48fdd56ef 100644 --- a/share/lib/python/neuron/__init__.py +++ b/share/lib/python/neuron/__init__.py @@ -6,7 +6,7 @@ For empirically-based simulations of neurons and networks of neurons in Python. This is the top-level module of the official python interface to -the NEURON simulation environment (http://neuron.yale.edu/neuron/). +the NEURON simulation environment (https://nrn.readthedocs.io). Documentation is available in the docstrings. @@ -106,6 +106,19 @@ embedded = True if "hoc" in sys.modules else False +# First, check that the compiled extension (neuron.hoc) was built for this version of +# Python. If not, fail early and helpfully. +from ._config_params import supported_python_versions + +current_version = "{}.{}".format(*sys.version_info[:2]) +if current_version not in supported_python_versions: + message = ( + "Python {} is not supported by this NEURON installation (supported: {}). Either re-build " + "NEURON with support for this version, use a supported version of Python, or try using " + "nrniv -python so that NEURON can suggest a compatible version for you." + ).format(current_version, " ".join(supported_python_versions)) + raise ImportError(message) + try: # needed since python 3.8 on windows if python launched # do this here as NEURONHOME may be changed below nrnbindir = os.path.abspath(os.environ["NEURONHOME"] + "/bin") @@ -131,17 +144,21 @@ except: pass -try: - from . import hoc -except: - import neuron.hoc +# Import the compiled HOC extension. We already checked above that it exists for the +# current Python version. +from . import hoc +# These are strange beasts that are defined inside the compiled `hoc` extension, all +# efforts to make them relative imports (because they are internal) have failed. It's +# not clear if the import of _neuron_section is needed, and this could probably be +# handled more idiomatically. import nrn import _neuron_section h = hoc.HocObject() version = h.nrnversion(5) __version__ = version +_userrxd = False # Initialise neuron.config.arguments from neuron import config @@ -219,6 +236,8 @@ def _check_for_intel_openmp(): setattr(hoc, "__file__", hoc_path) else: _original_hoc_file = hoc.__file__ + + # As a workaround to importing doc at neuron import time # (which leads to chicken and egg issues on some platforms) # define a dummy help function which imports doc, @@ -489,7 +508,7 @@ def psection(section): See: - https://www.neuron.yale.edu/neuron/static/py_doc/modelspec/programmatic/topology.html?#psection + https://nrn.readthedocs.io/en/latest/python/modelspec/programmatic/topology.html#psection """ warnings.warn( "neuron.psection() is deprecated; use print(sec.psection()) instead", @@ -521,7 +540,7 @@ def init(): from neuron.units import mV h.finitialize(-65 * mV) - https://www.neuron.yale.edu/neuron/static/py_doc/simctrl/programmatic.html?#finitialize + https://nrn.readthedocs.io/en/latest/python/simctrl/programmatic.html#finitialize """ warnings.warn( @@ -656,12 +675,11 @@ def nrn_dll_sym_nt(name, type): if len(nt_dlls) == 0: b = "bin" - if h.nrnversion(8).find("i686") == 0: - b = "bin" path = os.path.join(h.neuronhome().replace("/", "\\"), b) - fac = 10 if sys.version_info[1] < 10 else 100 # 3.9 is 39 ; 3.10 is 310 - p = sys.version_info[0] * fac + sys.version_info[1] - for dllname in ["libnrniv.dll", "libnrnpython%d.dll" % p]: + for dllname in [ + "libnrniv.dll", + "libnrnpython{}.{}.dll".format(*sys.version_info[:2]), + ]: p = os.path.join(path, dllname) try: nt_dlls.append(ctypes.cdll[p]) @@ -994,7 +1012,8 @@ class _PlotShapePlot(_WrapperPlot): ps.plot(pyplot) pyplot.show() - Limitations: many. Currently only supports plotting a full cell colored based on a variable.""" + Limitations: many. Currently only supports plotting a full cell colored based on a variable. + """ # TODO: handle pointmark, specified sections, color def __call__(self, graph, *args, **kwargs): @@ -1002,6 +1021,7 @@ def __call__(self, graph, *args, **kwargs): def _get_pyplot_axis3d(fig): """requires matplotlib""" + from . import rxd from matplotlib.pyplot import cm import matplotlib.pyplot as plt from mpl_toolkits.mplot3d import Axes3D @@ -1036,7 +1056,15 @@ def mark(self, segment, marker="or", **kwargs): return self def _do_plot( - self, val_min, val_max, sections, variable, cmap=cm.cool, **kwargs + self, + val_min, + val_max, + sections, + variable, + mode, + line_width=2, + cmap=cm.cool, + **kwargs, ): """ Plots a 3D shapeplot @@ -1056,17 +1084,25 @@ def _do_plot( h.define_shape() - # default color is black - kwargs.setdefault("color", "black") - # Plot each segement as a line lines = {} lines_list = [] vals = [] + + if isinstance(variable, rxd.species.Species): + if len(variable.regions) > 1: + raise Exception("Please specify region for the species.") + for sec in sections: all_seg_pts = _segment_3d_pts(sec) for seg, (xs, ys, zs, _, _) in zip(sec, all_seg_pts): - (line,) = self.plot(xs, ys, zs, "-", **kwargs) + if mode == 0: + width = seg.diam + else: + width = line_width + (line,) = self.plot( + xs, ys, zs, "-", linewidth=width, **kwargs + ) if variable is not None: val = _get_variable_seg(seg, variable) vals.append(val) @@ -1083,18 +1119,25 @@ def _do_plot( for sec in sections: for line, val in zip(lines_list, vals): if val is not None: - col = _get_color( - variable, - val, - cmap, - val_min, - val_max, - val_range, - ) - line.set_color(col) + if "color" not in kwargs: + col = _get_color( + variable, + val, + cmap, + val_min, + val_max, + val_range, + ) + else: + col = kwargs["color"] + else: + col = kwargs.get("color", "black") + line.set_color(col) return lines - return Axis3DWithNEURON(fig) + ax = Axis3DWithNEURON(fig) + fig.add_axes(ax) + return ax def _get_variable_seg(seg, variable): if isinstance(variable, str): @@ -1132,7 +1175,7 @@ def _get_3d_pt(segment): z = np.interp(seg_l, arc3d, z3d) return x, y, z - def _do_plot_on_matplotlib_figure(fig): + def _do_plot_on_matplotlib_figure(fig, *args, **kwargs): import ctypes get_plotshape_data = nrn_dll_sym("get_plotshape_data") @@ -1144,7 +1187,9 @@ def _do_plot_on_matplotlib_figure(fig): variable = varobj kwargs.setdefault("picker", 2) result = _get_pyplot_axis3d(fig) - _lines = result._do_plot(lo, hi, secs, variable, *args, **kwargs) + ps = self._data + mode = ps.show() + _lines = result._do_plot(lo, hi, secs, variable, mode, *args, **kwargs) result._mouseover_text = "" def _onpick(event): @@ -1172,7 +1217,7 @@ def _get_color(variable, val, cmap, lo, hi, val_range): elif val > hi: col = color_to_hex(cmap(255)) else: - val = color_to_hex(128) + col = color_to_hex(cmap(128)) else: col = color_to_hex( cmap(int(255 * (min(max(val, lo), hi) - lo) / (val_range))) @@ -1185,15 +1230,17 @@ def color_to_hex(col): [item if len(item) == 2 else "0" + item for item in items] ) - def _do_plot_on_plotly(): + def _do_plot_on_plotly(width=2, color=None, cmap=None): """requires matplotlib for colormaps if not specified explicitly""" import ctypes + from . import rxd import plotly.graph_objects as go class FigureWidgetWithNEURON(go.FigureWidget): def mark(self, segment, marker="or", **kwargs): """plot a marker on a segment + Args: segment = the segment to mark **kwargs = passed to go.Scatter3D plot @@ -1221,12 +1268,16 @@ def mark(self, segment, marker="or", **kwargs): variable, varobj, lo, hi, secs = get_plotshape_data( ctypes.py_object(self._data) ) + + ps = self._data + mode = ps.show() + if varobj is not None: variable = varobj if secs is None: secs = list(h.allsec()) - if variable is None: + if variable is None and varobj is None: kwargs.setdefault("color", "black") data = [] @@ -1242,7 +1293,7 @@ def mark(self, segment, marker="or", **kwargs): name="", hovertemplate=str(sec), mode="lines", - line=go.scatter3d.Line(color=kwargs["color"], width=2), + line=go.scatter3d.Line(color=kwargs["color"], width=width), ) ) return FigureWidgetWithNEURON(data=data, layout={"showlegend": False}) @@ -1255,13 +1306,19 @@ def mark(self, segment, marker="or", **kwargs): kwargs["cmap"] = cm.cool cmap = kwargs["cmap"] - show_diam = False + + # show_diam = False # calculate bounds val_range = hi - lo data = [] + + if isinstance(variable, rxd.species.Species): + if len(variable.regions) > 1: + raise Exception("Please specify region for the species.") + for sec in secs: all_seg_pts = _segment_3d_pts(sec) for seg, (xs, ys, zs, _, _) in zip(sec, all_seg_pts): @@ -1269,11 +1326,15 @@ def mark(self, segment, marker="or", **kwargs): hover_template = str(seg) if val is not None: hover_template += "
" + ("%.3f" % val) - col = _get_color(variable, val, cmap, lo, hi, val_range) - if show_diam: + if color is None: + col = _get_color(variable, val, cmap, lo, hi, val_range) + else: + col = color + if mode == 0: diam = seg.diam else: - diam = 2 + diam = width + data.append( go.Scatter3d( x=xs, @@ -1291,9 +1352,9 @@ def mark(self, segment, marker="or", **kwargs): if hasattr(graph, "__name__"): if graph.__name__ == "matplotlib.pyplot": fig = graph.figure() - return _do_plot_on_matplotlib_figure(fig) + return _do_plot_on_matplotlib_figure(fig, *args, **kwargs) elif graph.__name__ == "plotly": - return _do_plot_on_plotly() + return _do_plot_on_plotly(*args, **kwargs) elif str(type(graph)) == "": return _do_plot_on_matplotlib_figure(graph) raise NotImplementedError @@ -1614,25 +1675,26 @@ def nrnpy_pr(stdoe, s): return 0 +# nrnpy_pr callback in place of hoc printf +# ensures consistent with python stdout even with jupyter notebook. +# nrnpy_pass callback used by h.doNotify() in MINGW when not called from +# gui thread in order to allow the gui thread to run. +# When this was introduced in ef4da5dbf293580ee1bf86b3a94d3d2f80226f62 it was wrapped in a +# try .. except .. pass block for reasons that are not obvious to olupton, who removed it. if not embedded: - try: - # nrnpy_pr callback in place of hoc printf - # ensures consistent with python stdout even with jupyter notebook. - # nrnpy_pass callback used by h.doNotify() in MINGW when not called from - # gui thread in order to allow the gui thread to run. + # Unconditionally redirecting NEURON printing via Python seemed to cause re-ordering + # of NEURON output in the ModelDB CI. This might be because the redirection is only + # triggered by `import neuron`, and an arbitrary amount of NEURON code may have been + # executed before that point. + nrnpy_set_pr_etal = nrn_dll_sym("nrnpy_set_pr_etal") - nrnpy_set_pr_etal = nrn_dll_sym("nrnpy_set_pr_etal") + nrnpy_pr_proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p) + nrnpy_pass_proto = ctypes.CFUNCTYPE(ctypes.c_int) + nrnpy_set_pr_etal.argtypes = [nrnpy_pr_proto, nrnpy_pass_proto] - nrnpy_pr_proto = ctypes.CFUNCTYPE(ctypes.c_int, ctypes.c_int, ctypes.c_char_p) - nrnpy_pass_proto = ctypes.CFUNCTYPE(ctypes.c_int) - nrnpy_set_pr_etal.argtypes = [nrnpy_pr_proto, nrnpy_pass_proto] - - nrnpy_pr_callback = nrnpy_pr_proto(nrnpy_pr) - nrnpy_pass_callback = nrnpy_pass_proto(nrnpy_pass) - nrnpy_set_pr_etal(nrnpy_pr_callback, nrnpy_pass_callback) - except: - print("Failed to setup nrnpy_pr") - pass + nrnpy_pr_callback = nrnpy_pr_proto(nrnpy_pr) + nrnpy_pass_callback = nrnpy_pass_proto(nrnpy_pass) + nrnpy_set_pr_etal(nrnpy_pr_callback, nrnpy_pass_callback) def nrnpy_vec_math(op, flag, arg1, arg2=None): @@ -1672,6 +1734,10 @@ def _nrnpy_rvp_pyobj_callback(f): if f_type not in ( "", "", + "", + "", + "", + "", ): return f @@ -1682,6 +1748,8 @@ def _nrnpy_rvp_pyobj_callback(f): fref = weakref.ref(f) def result(x): + if x == 0 or x == 1: + raise Exception("Concentration is only defined for interior.") sp = fref() if sp: try: diff --git a/share/lib/python/neuron/coreneuron.py b/share/lib/python/neuron/coreneuron.py index 00bcb0f81e..b384614a23 100644 --- a/share/lib/python/neuron/coreneuron.py +++ b/share/lib/python/neuron/coreneuron.py @@ -1,6 +1,32 @@ import sys +class CoreNEURONContextHelper(object): + def __init__(self, coreneuron, new_values): + self._coreneuron = coreneuron + self._new_values = new_values + self._old_values = None + + def __enter__(self): + assert self._new_values is not None + assert self._old_values is None + self._old_values = {} + for k, v in self._new_values.items(): + self._old_values[k] = getattr(self._coreneuron, k) + setattr(self._coreneuron, k, v) + + def __exit__(self, exc_type, exc_val, exc_tb): + assert self._new_values is not None + assert self._old_values is not None + assert self._new_values.keys() == self._old_values.keys() + # Make sure we restore values in reverse order to how we set them. + # This is important for pairs like gpu and cell_permute that interact. + for k in reversed(self._new_values.keys()): + assert getattr(self._coreneuron, k) == self._new_values[k] + setattr(self._coreneuron, k, self._old_values[k]) + return False + + class coreneuron(object): """ CoreNEURON configuration values. @@ -9,15 +35,23 @@ class coreneuron(object): a class instead of a module allows getter/setter methods to be used, which lets us ensure its properties have consistent types and values. + This can also be used as a context manager to change CoreNEURON settings + only inside a particular scope. + Attributes ---------- cell_permute + model_path enable file_mode gpu prcellstate + sim_config verbose warp_balance + save_path + restore_path + skip_write_model_to_disk Examples -------- @@ -25,6 +59,12 @@ class coreneuron(object): >>> coreneuron.enable = True >>> coreneuron.enable True + + >>> coreneuron.enable = False + >>> with coreneuron(enable=True): + ... assert coreneuron.enable + ... coreneuron.enable + False """ def __init__(self): @@ -37,6 +77,25 @@ def __init__(self): self._verbose = 2 # INFO self._prcellstate = -1 self._model_stats = False + self._sim_config = None + self._model_path = None + self._save_path = None + self._restore_path = None + self._skip_write_model_to_disk = False + + def __call__(self, **kwargs): + """ + Yields a context manager helper that can be used in a with statement. + + This allows the syntax + with coreneuron(foo=bar): + assert coreneuron.foo == bar + assert coreneuron.foo == old_value + + Discarding the return value, or using it in any way other than in a + with statement, will have no effect. + """ + return CoreNEURONContextHelper(self, kwargs) def _default_cell_permute(self): return 1 if self._gpu else 0 @@ -162,6 +221,57 @@ def model_stats(self): def model_stats(self, value): self._model_stats = bool(value) + @property + def sim_config(self): + """Simulation config file.""" + return self._sim_config + + @sim_config.setter + def sim_config(self, value): + self._sim_config = str(value) + + @property + def model_path(self): + """Data path of the model.""" + return self._model_path + + @sim_config.setter + def model_path(self, value): + self._model_path = str(value) + + @property + def save_path(self): + """Data path for save.""" + return self._save_path + + @sim_config.setter + def save_path(self, value): + self._save_path = str(value) + + @property + def restore_path(self): + """Data path for restore.""" + return self._restore_path + + @sim_config.setter + def restore_path(self, value): + self._restore_path = str(value) + + @property + def skip_write_model_to_disk(self): + """Set internal flag to only simulate the model with CoreNEURON. + Avoids writing the coreneuron input data to the data_path in + CoreNEURON embedded mode when launched thourgh the NEURON Python + API. The coreneuron input data should already be there by calling + prior to pc.psolve() pc.nrncore_write() and CoreNEURON uses them + for launching the simulation. + """ + return self._skip_write_model_to_disk + + @sim_config.setter + def skip_write_model_to_disk(self, value): + self._skip_write_model_to_disk = value + def nrncore_arg(self, tstop): """ Return str that can be used for pc.nrncore_run(str) @@ -185,7 +295,12 @@ def nrncore_arg(self, tstop): if self._num_gpus: arg += " --num-gpus %d" % self._num_gpus if self._file_mode: - arg += " --datpath %s" % CORENRN_DATA_DIR + if self._model_path is not None: + arg += " --datpath %s" % self._model_path + else: + arg += " --datpath %s" % CORENRN_DATA_DIR + if self._skip_write_model_to_disk: + arg += " --skip-write-model-to-disk" arg += " --tstop %g" % tstop arg += " --cell-permute %d" % self.cell_permute if self._warp_balance > 0: @@ -195,6 +310,12 @@ def nrncore_arg(self, tstop): arg += " --verbose %d" % self.verbose if self._model_stats: arg += " --model-stats" + if self._save_path: + arg += " --checkpoint %s" % self._save_path + if self._restore_path: + arg += " --restore %s" % self._restore_path + if self._sim_config: + arg += " --read-config %s" % self._sim_config # args derived from current NEURON settings. pc = h.ParallelContext() diff --git a/share/lib/python/neuron/expect_hocerr.py b/share/lib/python/neuron/expect_hocerr.py index ead8bdf04f..1b60d89bae 100644 --- a/share/lib/python/neuron/expect_hocerr.py +++ b/share/lib/python/neuron/expect_hocerr.py @@ -41,7 +41,7 @@ def expect_hocerr(callable, args, sec=None): original_stderr = sys.stderr sys.stderr = my_stderr = StringIO() - err = 0 + err = False pyerrmes = False try: if sec: @@ -50,7 +50,7 @@ def expect_hocerr(callable, args, sec=None): callable(*args) printerr("expect_hocerr: no err for %s%s" % (str(callable), str(args))) except Exception as e: - err = 1 + err = True errmes = my_stderr.getvalue() if errmes: errmes = errmes.splitlines()[0] @@ -70,12 +70,12 @@ def expect_err(stmt): """ here = inspect.currentframe() caller = here.f_back - err = 0 + err = False checking(stmt) try: exec(stmt, caller.f_globals, caller.f_locals) printerr("expect_err: no err for-- " + stmt) except Exception as e: - err = 1 + err = True printerr(e) assert err diff --git a/share/lib/python/neuron/hclass3.py b/share/lib/python/neuron/hclass3.py index 2ee17f293f..63c3c74389 100644 --- a/share/lib/python/neuron/hclass3.py +++ b/share/lib/python/neuron/hclass3.py @@ -12,6 +12,12 @@ import sys +def _is_hoc_pytype(hoc_type): + return hoc_type is nrn.Section or ( + isinstance(hoc_type, type) and issubclass(hoc_type, hoc.HocObject) + ) + + def assert_not_hoc_composite(cls): """ Asserts that a class is not directly composed of multiple HOC types. @@ -58,8 +64,8 @@ class MyVector(myClassTemplate): omitted the name of the HOC type is used. :deprecated: Inherit from :class:`~neuron.HocBaseObject` instead. """ - if hoc_type == h.Section: - return nrn.Section + if _is_hoc_pytype(hoc_type): + return hoc_type if module_name is None: module_name = __name__ if name is None: @@ -90,23 +96,29 @@ class MyVector(neuron.HocBaseObject, hoc_type=neuron.h.Vector): """ def __init_subclass__(cls, hoc_type=None, **kwargs): - if hoc_type is not None: - if not isinstance(hoc_type, hoc.HocObject): + if _is_hoc_pytype(hoc_type): + cls_name = cls.__name__ + raise TypeError( + f"Using HocBaseObject with {cls_name} is deprecated." + f" Inherit directly from {cls_name} instead." + ) + if hoc_type is None: + if not hasattr(cls, "_hoc_type"): raise TypeError( - f"Class's `hoc_type` {hoc_type} is not a valid HOC type." + "Class keyword argument `hoc_type` is required for HocBaseObjects." ) + elif not isinstance(hoc_type, hoc.HocObject): + raise TypeError(f"Class's `hoc_type` {hoc_type} is not a valid HOC type.") + else: cls._hoc_type = hoc_type - elif not hasattr(cls, "_hoc_type"): - raise TypeError( - "Class keyword argument `hoc_type` is required for HocBaseObjects." - ) + # HOC type classes may not be composed of multiple hoc types assert_not_hoc_composite(cls) - hobj = hoc.HocObject - hbase = HocBaseObject - if _overrides(cls, hobj, "__init__") and not _overrides(cls, hbase, "__new__"): - # Subclasses that override `__init__` must also implement `__new__` to deal - # with the arguments that have to be passed into `HocObject.__new__`. - # See https://github.com/neuronsimulator/nrn/issues/1129 + # Subclasses that override `__init__` must also implement `__new__` to deal + # with the arguments that have to be passed into `HocObject.__new__`. + # See https://github.com/neuronsimulator/nrn/issues/1129 + if _overrides(cls, hoc.HocObject, "__init__") and not _overrides( + cls, HocBaseObject, "__new__" + ): raise TypeError( f"`{cls.__qualname__}` implements `__init__` but misses `__new__`. " + "Class must implement `__new__`" @@ -119,7 +131,7 @@ def __new__(cls, *args, **kwds): # To construct HOC objects within NEURON from the Python interface, we use the # C-extension module `hoc`. `hoc.HocObject.__new__` both creates an internal # representation of the object in NEURON, and hands us back a Python object that - # is linked to that internal representation. The `__new__` functions takes the + # is linked to that internal representation. The `__new__` function takes the # arguments that HOC objects of that type would take, and uses the `hocbase` # keyword argument to determine which type of HOC object to create. The `sec` # keyword argument can be passed along in case the construction of a HOC object diff --git a/share/lib/python/neuron/rxd/__init__.py b/share/lib/python/neuron/rxd/__init__.py index 6685bd28fb..5b2eb0d8c9 100644 --- a/share/lib/python/neuron/rxd/__init__.py +++ b/share/lib/python/neuron/rxd/__init__.py @@ -1,9 +1,5 @@ from .rxdException import RxDException -# import sys -# if 'neuron.rxd' in sys.modules: -# raise RxDException('NEURON CRxD module cannot be used with NEURON RxD module.') - from . import rxd, constants from .species import Species, Parameter, State from .region import Region, Extracellular @@ -31,6 +27,9 @@ MultipleGeometry, ) from .plugins import set_solver +import neuron + +neuron._userrxd = True # deprecated: # from geometry import ConstantArea, ConstantVolume diff --git a/share/lib/python/neuron/rxd/constants.py b/share/lib/python/neuron/rxd/constants.py index a39d6c3299..a318a5dc6d 100644 --- a/share/lib/python/neuron/rxd/constants.py +++ b/share/lib/python/neuron/rxd/constants.py @@ -1,5 +1,3 @@ -# Avogadro's number (approximation before 2019 redefinition) -NA_legacy = 6.02214129e23 NA_modern = 6.02214076e23 @@ -7,10 +5,7 @@ def NA(): try: from neuron import h - # val = NA_legacy if h.nrnunit_use_legacy() else NA_modern val = h.Avogadro_constant - # Note: h.Avogadro_constant is consistent with the above NA legacy and - # modern values. except: val = NA_modern return val @@ -18,6 +13,6 @@ def NA(): def molecules_per_mM_um3(): # converting from mM um^3 to molecules - # = 6.02214129e23 * 1000. / 1.e18 / 1000 + # = 6.02214076e23 * 1000. / 1.e18 / 1000 # = avogadro * (L / m^3) * (m^3 / um^3) * (mM / M) return NA() / 1e18 diff --git a/share/lib/python/neuron/rxd/geometry3d/FullJoinMorph.py b/share/lib/python/neuron/rxd/geometry3d/FullJoinMorph.py index 34553b1fa2..5f4d24478a 100644 --- a/share/lib/python/neuron/rxd/geometry3d/FullJoinMorph.py +++ b/share/lib/python/neuron/rxd/geometry3d/FullJoinMorph.py @@ -22,34 +22,18 @@ def find_parent_seg(join, sdict, objects): - - if not join: - return None - elif join[0] not in objects: - pseg = sdict[ - ( - join[0]._x0, - join[0]._y0, - join[0]._z0, - join[0]._x1, - join[0]._y1, - join[0]._z1, - ) - ] - # better be all in same cell; so just set root once - h.distance(0, h.SectionRef(sec=pseg.sec).root(0)) - closest = h.distance(pseg) - - # any other possible instance? - + root = None + pseg = None + closest = float("inf") for item in join: if item not in objects: s = sdict[(item._x0, item._y0, item._z0, item._x1, item._y1, item._z1)] - d = h.distance(s) + if root is None: + root = h.SectionRef(sec=s.sec).root(0) + d = h.distance(root, s) if d < closest: pseg = s closest = d - return pseg @@ -69,9 +53,6 @@ def fullmorph(source, dx, soma_step=100, mesh_grid=None, relevant_pts=None): """Input: object source; arguments to pass to ctng Output: all voxels with SA and volume associated, categorized by segment""" source = list(source) - morphology = constructive_neuronal_geometry( - source, soma_step, dx, relevant_pts=relevant_pts - ) ( join_objects, cones, @@ -79,7 +60,7 @@ def fullmorph(source, dx, soma_step=100, mesh_grid=None, relevant_pts=None): join_groups, object_pts, soma_objects, - ) = morphology + ) = constructive_neuronal_geometry(source, soma_step, dx, relevant_pts=relevant_pts) # grid setup if mesh_grid: diff --git a/share/lib/python/neuron/rxd/geometry3d/surfaces.pyx b/share/lib/python/neuron/rxd/geometry3d/surfaces.pyx index a62316da97..141adf7899 100644 --- a/share/lib/python/neuron/rxd/geometry3d/surfaces.pyx +++ b/share/lib/python/neuron/rxd/geometry3d/surfaces.pyx @@ -56,7 +56,7 @@ def contains_surface(i, j, k, objdist, xs, ys, zs, dx, r_inner, r_outer, reject_ if d <= r_inner: return True if d >= r_outer and reject_if_outside: if print_reject_reason: - print 'would normally reject because at (%g, %g, %g): d = %g, r_outer = %g (dx = %g)' % (xbar, ybar, zbar, d, r_outer, dx) + print('would normally reject because at (%g, %g, %g): d = %g, r_outer = %g (dx = %g)' % (xbar, ybar, zbar, d, r_outer, dx)) else: return False @@ -70,7 +70,7 @@ def contains_surface(i, j, k, objdist, xs, ys, zs, dx, r_inner, r_outer, reject_ for z in zs[k : k + 2]: d = objdist(x, y, z) if print_reject_reason: - print 'at (%g, %g, %g): d = %g' % (x, y, z, d) + print('at (%g, %g, %g): d = %g' % (x, y, z, d)) if d <= 0: has_neg = True if d >= 0: @@ -103,13 +103,13 @@ def process_cell(int i, int j, int k, list objects, numpy.ndarray[numpy.float_t, cdef double value0, value1, value2, value3, value4, value5, value6, value7 value0, value1, value2, value3, value4, value5, value6, value7 = [min([objdist(*p) for objdist in objects]) for p in position] if print_values: - print '(x, y, z) = (%7f, %7f, %7f); (x1, y1, z1) = (%7f, %7f, %7f)' % (x, y, z, x1, y1, z1) - print '%7f %7f %7f %7f %7f %7f %7f %7f' % (value0, value1, value2, value3, value4, value5, value6, value7) - print 'last obj distance to position[4]: ', objects[len(objects)-1](*position[4]) - print 'distance to position[4] with everything but the last object:', min([objdist(*position[4]) for objdist in objects[:len(objects)-1]]) - print 'distance to position[4] with everything:', min([objdist(*position[4]) for objdist in objects[:]]) - print 'last object:', objects[len(objects) - 1] - print 'position[4]:', position[4] + print('(x, y, z) = (%7f, %7f, %7f); (x1, y1, z1) = (%7f, %7f, %7f)' % (x, y, z, x1, y1, z1)) + print('%7f %7f %7f %7f %7f %7f %7f %7f' % (value0, value1, value2, value3, value4, value5, value6, value7)) + print('last obj distance to position[4]: ', objects[len(objects)-1](*position[4])) + print('distance to position[4] with everything but the last object:', min([objdist(*position[4]) for objdist in objects[:len(objects)-1]])) + print('distance to position[4] with everything:', min([objdist(*position[4]) for objdist in objects[:]])) + print('last object:', objects[len(objects) - 1]) + print('position[4]:', position[4]) new_index = start + 9 * find_triangles(value0, value1, value2, value3, value4, value5, value6, value7, x, x1, y, y1, z, z1, &tridata[start]) if store_areas: @@ -137,7 +137,7 @@ def volume_inside_cell(int i, int j, int k, list objects, numpy.ndarray[numpy.fl cdef double x, y, z cdef int i1, j1, k1 if not objects: - print 'grr... it thinks there is surface when no nearby objects.' + print('grr... it thinks there is surface when no nearby objects.') for i, x in zip([1, 2], [x0, x1]): for j, y in zip([1, 2], [y0, y1]): for k, z in zip([1, 2], [z0, z1]): @@ -388,18 +388,18 @@ cpdef _triangulate_surface_given_chunks(list objects, xs, ys, zs, internal_membr # this an the two-above commented-out lines are for debugging to detect discrepancies between the # chunked partitioning and using all the nodes if starti - last_starti != len(tri_data) or any(tri_data != triangles[last_starti : starti]): - print 'discrepancy in grid (%d, %d, %d) -- chunk_size = %d' % (i, j, k, chunk_size) + print('discrepancy in grid (%d, %d, %d) -- chunk_size = %d' % (i, j, k, chunk_size)) # identify which object(s)... grr... """ for m in xrange(len(objects)): tri_data2 = list(triangles[last_starti : starti]) starti = process_cell(i, j, k, objs + objects_distances[: m], xs, ys, zs, triangles, last_starti) #was objs if starti - last_starti != len(tri_data2) or any(tri_data2 != triangles[last_starti : starti]): - print ' missed object %d: %r' % (m, objects[m - 1]) + print()' missed object %d: %r' % (m, objects[m - 1])) if starti - last_starti == len(tri_data) and all(tri_data == triangles[last_starti : starti]): break else: - print ' *** should never get here ***'""" + print(' *** should never get here ***'""") ''' # some regions of the surface may yet be missing @@ -468,10 +468,10 @@ cpdef _triangulate_surface_given_chunks(list objects, xs, ys, zs, internal_membr if local_objs: for m, objdist in enumerate(local_objs): if contains_surface(i, j, k, objdist, xs, ys, zs, dx, r_inner, r_outer, False): - print 'item %d in grid(%d, %d, %d) contains previously undetected surface' % (m, i, j, k) + print('item %d in grid(%d, %d, %d) contains previously undetected surface' % (m, i, j, k)) for n, obj in enumerate(objects): if obj.distance == objdist: - print ' (i.e. global item %d: %r)' % (n, obj) + print(' (i.e. global item %d: %r)' % (n, obj)) break #starti = process_cell(i, j, k, local_objs, xs, ys, zs, triangles, starti) # was objects_distances starti = process_cell(i, j, k, local_objs, xs, ys, zs, triangles, starti, store_areas=store_areas, areas=areas) @@ -500,7 +500,7 @@ cpdef double _tri_area(numpy.ndarray[numpy.float_t, ndim=1] triangles, int lo, i local_area = llgramarea(&triangles[i], &triangles[3 + i], &triangles[6 + i]) doublearea += local_area if numpy.isnan(local_area): - print 'tri_area exception: ', ', '.join([str(v) for v in triangles[i : i + 9]]) + print('tri_area exception: ', ', '.join([str(v) for v in triangles[i : i + 9]])) return doublearea * 0.5 @cython.boundscheck(False) @@ -512,8 +512,8 @@ cpdef double tri_volume(numpy.ndarray[numpy.float_t, ndim=1] triangles): local_vol = llpipedfromoriginvolume(&triangles[i], &triangles[3 + i], &triangles[6 + i]) sixtimesvolume += local_vol if numpy.isnan(local_vol): - print 'tri_volume exception:', + print('tri_volume exception:',) for j in range(i, i + 9): - print triangles[j], - print + print(triangles[j]), + return abs(sixtimesvolume / 6.) diff --git a/share/lib/python/neuron/rxd/initializer.py b/share/lib/python/neuron/rxd/initializer.py index 1db3a2faab..40f7c1eb86 100644 --- a/share/lib/python/neuron/rxd/initializer.py +++ b/share/lib/python/neuron/rxd/initializer.py @@ -14,16 +14,17 @@ def _do_ion_register(): _init_lock = RLock() has_initialized = False +is_initializing = False def _do_init(): - global has_initialized, _init_lock + global has_initialized, is_initializing, _init_lock with _init_lock: - if not has_initialized: + if not has_initialized and not is_initializing: from . import species, region, rxd if len(species._all_species) > 0: - has_initialized = True + is_initializing = True # TODO: clean this up so not repetitive; can't do it super cleanly because of the multiple phases of species for obj in region._all_regions: obj = obj() @@ -55,6 +56,8 @@ def _do_init(): if obj is not None: obj._do_init() rxd._init() + has_initialized = True + is_initializing = False def is_initialized(): diff --git a/share/lib/python/neuron/rxd/node.py b/share/lib/python/neuron/rxd/node.py index 4f274541e5..0a67c60caa 100644 --- a/share/lib/python/neuron/rxd/node.py +++ b/share/lib/python/neuron/rxd/node.py @@ -71,10 +71,14 @@ def _remove(start, stop): # remove _node_flux newflux = {"index": [], "type": [], "source": [], "scale": [], "region": []} - for (i, idx) in enumerate(_node_fluxes["index"]): - if idx not in dels: + for i, (idx, typ) in enumerate(zip(_node_fluxes["index"], _node_fluxes["type"])): + if typ != -1 or idx not in dels: for key in _node_fluxes: newflux[key].append(_node_fluxes[key][i]) + newflux["index"] = [ + idx - (stop - start) if typ == -1 and idx > start else idx + for idx, typ in zip(newflux["index"], newflux["type"]) + ] _node_fluxes = newflux _has_node_fluxes = _node_fluxes["index"] != [] @@ -108,8 +112,8 @@ def _replace(old_offset, old_nseg, new_offset, new_nseg): _states = numpy.delete(_states, list(range(start, stop))) # update _node_flux index - for (i, idx) in enumerate(_node_fluxes["index"]): - if idx in dels: + for i, (idx, typ) in enumerate(zip(_node_fluxes["index"], _node_fluxes["type"])): + if typ == -1 and idx in dels: j = int(((idx + 0.5) / new_nseg) * old_nseg) _node_fluxes["index"][i] = j @@ -625,17 +629,20 @@ def satisfies(self, condition): and int((z - mesh["zlo"]) / mesh["dz"]) == self._k ) # check for a position condition so as to provide a more useful error + checked_for_normalized_position = False try: if 0 <= condition <= 1: # TODO: the trouble here is that you can't do this super-directly based on x # the way to do this is to find the minimum and maximum x values contained in the grid # the extra difficulty with that is to handle boundary cases correctly # (to test, consider a section 1 node wide by 15 discretized pieces long, access at 1./15, 2./15, etc...) - raise RxDException( - "selecting nodes by normalized position not yet supported for 3D nodes; see comments in source about how to fix this" - ) + checked_for_normalized_position = True except: pass + if checked_for_normalized_position: + raise RxDException( + "selecting nodes by normalized position not yet supported for 3D nodes; see comments in source about how to fix this" + ) raise RxDException("unrecognized node condition: %r" % condition) @property diff --git a/share/lib/python/neuron/rxd/nodelist.py b/share/lib/python/neuron/rxd/nodelist.py index 88f0c9510f..8a463e6858 100644 --- a/share/lib/python/neuron/rxd/nodelist.py +++ b/share/lib/python/neuron/rxd/nodelist.py @@ -1,10 +1,19 @@ from .rxdException import RxDException +from .node import Node +import types +from collections import abc class NodeList(list): def __init__(self, items): """Constructs a NodeList from items, a python iterable containing Node objects.""" - list.__init__(self, items) + if isinstance(items, abc.Generator) or isinstance(items, abc.Iterator): + items = list(items) + + if items == [] or all(isinstance(item, Node) for item in items): + list.__init__(self, items) + else: + raise TypeError("Items must be nodes.") def __call__(self, restriction): """returns a sub-NodeList consisting of nodes satisfying restriction""" @@ -16,6 +25,30 @@ def __getitem__(self, key): else: return list.__getitem__(self, key) + def __setitem__(self, index, value): + if not isinstance(value, Node): + raise TypeError("Only assign a node to the list") + super().__setitem__(index, value) + + def append(self, items): + if not isinstance(items, Node): + raise TypeError("The append item must be a Node.") + super().append(items) + + def extend(self, items): + if isinstance(items, abc.Generator) or isinstance(items, abc.Iterator): + items = list(items) + + for item in items: + if not isinstance(item, Node): + raise TypeError("The extended items must all be Nodes.") + super().extend(items) + + def insert(self, position, items): + if not isinstance(items, Node): + raise TypeError("The item inserted must be a Node.") + super().insert(position, items) + @property def value(self): # TODO: change this when not everything is a concentration @@ -88,7 +121,8 @@ def diff(self, value): def include_flux(self, *args, **kwargs): for node in self: - node.include_flux(args, kwargs) + # Unpack arguments for each individual call + node.include_flux(*args, **kwargs) def value_to_grid(self): """Returns a regular grid with the values of the 3d nodes in the list. diff --git a/share/lib/python/neuron/rxd/rangevar.py b/share/lib/python/neuron/rxd/rangevar.py index 6e978611ed..e29d48c0af 100644 --- a/share/lib/python/neuron/rxd/rangevar.py +++ b/share/lib/python/neuron/rxd/rangevar.py @@ -6,10 +6,6 @@ _h_ptrvector = h.PtrVector -def _donothing(): - pass - - class RangeVar: def __init__(self, name): self._name = name @@ -31,7 +27,6 @@ def _init_ptr_vectors(self, nodes): locs_append(node._index) self._locs = numpy.array(locs) pv = _h_ptrvector(len(ptrs)) - pv.ptr_update_callback(_donothing) pv_pset = pv.pset for i, ptr in enumerate(ptrs): pv_pset(i, ptr) diff --git a/share/lib/python/neuron/rxd/rxd.py b/share/lib/python/neuron/rxd/rxd.py index d9eecb06f1..e5f62e8a51 100644 --- a/share/lib/python/neuron/rxd/rxd.py +++ b/share/lib/python/neuron/rxd/rxd.py @@ -254,8 +254,6 @@ def byeworld(): last_diam_change_cnt = None last_structure_change_cnt = None -last_nrn_legacy_units = h.nrnunit_use_legacy() - _all_reactions = [] @@ -393,7 +391,6 @@ def _setup_memb_currents(): num = len(curr_ptrs) if num: curr_ptr_vector = _h_ptrvector(num) - curr_ptr_vector.ptr_update_callback(_donothing) for i, ptr in enumerate(curr_ptrs): curr_ptr_vector.pset(i, ptr) curr_ptr_storage_nrn = _h_vector(num) @@ -570,15 +567,9 @@ def _cxx_compile(formula): _diam_change_count = nrn_dll_sym("diam_change_cnt", _ctypes_c_int) -def _donothing(): - pass - - def _setup_units(force=False): - global last_nrn_legacy_units if initializer.is_initialized(): - if force or last_nrn_legacy_units != h.nrnunit_use_legacy(): - last_nrn_legacy_units = h.nrnunit_use_legacy() + if force: clear_rates() _setup_memb_currents() _compile_reactions() @@ -622,8 +613,10 @@ def _update_node_data(force=False, newspecies=False): # TODO: separate compiling reactions -- so the indices can be updated without recompiling _include_flux(True) _setup_units(force=True) - - # end#if + else: + # don't call _setup_memb_currents if nsegs changed -- because + # it is called by change units. + _setup_memb_currents() def _matrix_to_rxd_sparse(m): diff --git a/share/lib/python/neuron/rxd/section1d.py b/share/lib/python/neuron/rxd/section1d.py index 52e31546ba..8ff325c277 100644 --- a/share/lib/python/neuron/rxd/section1d.py +++ b/share/lib/python/neuron/rxd/section1d.py @@ -12,10 +12,6 @@ _last_c_ptr_length = None -def _donothing(): - pass - - class _SectionLookup: class Lookup: def __init__(self): @@ -118,7 +114,6 @@ def _transfer_to_legacy(): if _last_c_ptr_length != size: if size: _c_ptr_vector = h.PtrVector(size) - _c_ptr_vector.ptr_update_callback(_donothing) for i, ptr in enumerate(_all_cptrs): _c_ptr_vector.pset(i, ptr) _c_ptr_vector_storage_nrn = h.Vector(size) diff --git a/share/lib/python/neuron/rxdtests/do_test.py b/share/lib/python/neuron/rxdtests/do_test.py index 70dbafb052..aeadbf7c70 100644 --- a/share/lib/python/neuron/rxdtests/do_test.py +++ b/share/lib/python/neuron/rxdtests/do_test.py @@ -23,7 +23,6 @@ def do_test(test_to_run, results_location, num_record=10): import itertools - h.nrnunit_use_legacy(True) data = {"record_count": 0, "data": []} do_test.data = data record_count = 0 @@ -60,9 +59,14 @@ def collect_data(): data["data"] = [] data["record_count"] = 1 # remove previous record if h.t is the same - if data["record_count"] > 1 and h.t == data["data"][-len(local_data)]: - data["record_count"] -= 1 - del data["data"][-len(local_data) :] + if data["record_count"] > 1: + if len(local_data) > len(data["data"]): + # model changed -- reset data collection + data["data"] = [] + data["record_count"] = 1 + elif h.t == data["data"][-len(local_data)]: + data["record_count"] -= 1 + del data["data"][-len(local_data) :] # add new data record data["data"].extend(local_data) # print correct record length diff --git a/share/lib/python/neuron/rxdtests/readme.txt b/share/lib/python/neuron/rxdtests/readme.txt index 7fd0377090..25eb9e6247 100644 --- a/share/lib/python/neuron/rxdtests/readme.txt +++ b/share/lib/python/neuron/rxdtests/readme.txt @@ -27,4 +27,4 @@ Notes: guarantees are made about the suitability of these files for self-learning. For more traditional Reaction-Diffusion tutorials see - http://neuron.yale.edu/neuron/static/docs/rxd/index.html + https://nrn.readthedocs.io/en/latest/rxd-tutorials/index.html diff --git a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions.py b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions.py index a657ef684c..5034a10f18 100644 --- a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions.py +++ b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions.py @@ -1,5 +1,5 @@ # Example copied from the RxD tutorial -# http://www.neuron.yale.edu/neuron/static/docs/rxd/index.html +# https://nrn.readthedocs.io/en/latest/rxd-tutorials/index.html from neuron import rxd, h, gui import numpy diff --git a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_del.py b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_del.py index a62d5c630c..e710a2e8ad 100644 --- a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_del.py +++ b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_del.py @@ -1,6 +1,6 @@ # Example copied from the RxD tutorial -# http://www.neuron.yale.edu/neuron/static/docs/rxd/index.html -from neuron import crxd as rxd, h, gui +# https://nrn.readthedocs.io/en/latest/rxd-tutorials/index.html +from neuron import rxd, h, gui import numpy sec = h.Section() diff --git a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_with_v.py b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_with_v.py index 6d6df43931..1a30c81d21 100644 --- a/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_with_v.py +++ b/share/lib/python/neuron/rxdtests/tests/multicompartment_reactions_with_v.py @@ -1,5 +1,5 @@ # Example copied from the RxD tutorial -# http://www.neuron.yale.edu/neuron/static/docs/rxd/index.html +# https://nrn.readthedocs.io/en/latest/rxd-tutorials/index.html from neuron import crxd as rxd, h, gui from neuron.crxd import v import numpy diff --git a/share/lib/python/neuron/tests/test_vector.py b/share/lib/python/neuron/tests/test_vector.py index efd5c672af..ee92f0bb27 100644 --- a/share/lib/python/neuron/tests/test_vector.py +++ b/share/lib/python/neuron/tests/test_vector.py @@ -101,9 +101,61 @@ def testNumpyInteraction(self): except: pass + def testNegativeIndex(self): + l = [i for i in range(10)] + v = h.Vector(l) + assert v[-3] == l[-3], "v[-3] Failed" + v[-3] = 42 + l[-3] = 42 + assert v[-3] == l[-3], "Setting v[-3] Failed" + + def testSlicing(self): + l = [i for i in range(10)] + v = h.Vector(l) + assert list(v[2:6]) == l[2:6], "v[2:6] Failed" + assert list(v[-3:-1]) == l[-3:-1], "v[-3:-1] Failed" + assert list(v[::2]) == l[::2], "v[::2] Failed" + assert list(v[::-2]) == l[::-2], "v[::-2] Failed" + assert list(v[3:6:2]) == l[3:6:2], "v[3:6] Failed" + assert list(v[7:-9:-2]) == l[7:-9:-2], "v[7:-9:-2] Failed" + assert list(v[-1::-3]) == l[-1::-3], "v[-1::-3] Failed" + assert list(v[-2::-6]) == l[-2::-6], "v[-2::-6] Failed" + assert list(v[7:-1:-2]) == l[7:-1:-2], "v[-2::-6] Failed" + assert list(v[3:2:-2]) == l[3:2:-2], "v[3:2:-2] Failed" + + def testAssignmentSlicing(self): + l = [i for i in range(10)] + v = h.Vector(l) + v[2:4] = [12, 13] + l[2:4] = [12, 13] + assert list(v[2:4]) == l[2:4], "v[2:4] Failed" + v[-3:-1] = [-9, -9] + l[-3:-1] = [-9, -9] + assert list(v[-3:-1]) == l[-3:-1], "v[-3:-1] Failed" + v[::2] = [_ for _ in range(277, 282)] + l[::2] = [_ for _ in range(277, 282)] + assert list(v[::2]) == l[::2], "v[::2] Failed" + v[::-2] = [_ for _ in range(-377, -372)] + l[::-2] = [_ for _ in range(-377, -372)] + assert list(v[::-2]) == l[::-2], "v[::-2] Failed" + v[3:6:2] = [-123, -456] + l[3:6:2] = [-123, -456] + assert list(v[3:6:2]) == l[3:6:2], "v[3:6] Failed" + v2 = h.Vector(x for x in range(10, 20)) + v[3:8] = v2[3:8] + assert list(v[3:8]) == list(v2[3:8]), "v[3:8] = v2[3:8] Failed" + + def testErrorHandling(self): + l = [i for i in range(10)] + v = h.Vector(l) + # Input that is too short or long should raise an IndexError + with self.assertRaises(IndexError): + v[0:3] = [55] + with self.assertRaises(IndexError): + v[3:7] = (x for x in range(100, 120)) -def suite(): +def suite(): suite = unittest.makeSuite(VectorTestCase, "test") return suite diff --git a/share/lib/python/neuron/tests/utils/__init__.py b/share/lib/python/neuron/tests/utils/__init__.py index 69340f4001..1dd7903446 100644 --- a/share/lib/python/neuron/tests/utils/__init__.py +++ b/share/lib/python/neuron/tests/utils/__init__.py @@ -86,6 +86,19 @@ def cvode_use_long_double(cv, enabled): cv.use_long_double(old_setting) +@contextmanager +def fast_imem(enabled): + from neuron import h + + cvode = h.CVode() + old_setting = cvode.use_fast_imem() + cvode.use_fast_imem(enabled) + try: + yield None + finally: + cvode.use_fast_imem(old_setting) + + @contextmanager def hh_table_disabled(): """ diff --git a/share/lib/python/neuron/tests/utils/capture_stdout.py b/share/lib/python/neuron/tests/utils/capture_stdout.py new file mode 100644 index 0000000000..17a60bf5ea --- /dev/null +++ b/share/lib/python/neuron/tests/utils/capture_stdout.py @@ -0,0 +1,25 @@ +import sys, io, inspect, hashlib + + +def capture_stdout(stmt, md5=False): + """ + Some hoc functions print to stdout. + This provides a way to capture in a string. E.g. for test comparisons. + e.g. + s = capture_stdout("h.topology()") + md5hash = capture_stdout("h.topology()", True) + + unfortunately, does not work when nrniv is launched + """ + + oldstdout = sys.stdout + sys.stdout = mystdout = io.StringIO() + here = inspect.currentframe() + caller = here.f_back + try: + exec(stmt, caller.f_globals, caller.f_locals) + finally: + sys.stdout = oldstdout + if md5: + return hashlib.md5(mystdout.getvalue().encode("utf-8")).hexdigest() + return mystdout.getvalue() diff --git a/share/lib/python/neuron/tests/utils/checkresult.py b/share/lib/python/neuron/tests/utils/checkresult.py index 9c742c2a3e..4a39f434a7 100644 --- a/share/lib/python/neuron/tests/utils/checkresult.py +++ b/share/lib/python/neuron/tests/utils/checkresult.py @@ -1,6 +1,6 @@ import json import math -from neuron import h +from neuron import h, hoc class Chk: @@ -39,7 +39,7 @@ def __call__(self, key, value, tol=0.0): """ if key in self.d: - if type(value) == type(h.Vector): # actually hoc.HocObject + if isinstance(value, hoc.Vector): # Convert to list to keep the `equal` method below simple value = list(value) # Hand-rolled comparison that uses `tol` for arithmetic values @@ -75,7 +75,7 @@ def equal(a, b): assert match else: print("{} added {}".format(self, key)) - if type(value) == type(h.Vector): # actually hoc.HocObject + if isinstance(value, hoc.Vector): self.d[key] = value.to_python() else: self.d[key] = value diff --git a/share/lib/python/scripts/_binwrapper.py b/share/lib/python/scripts/_binwrapper.py index dd256311cd..5fd1ca16d1 100755 --- a/share/lib/python/scripts/_binwrapper.py +++ b/share/lib/python/scripts/_binwrapper.py @@ -11,10 +11,11 @@ from setuptools.command.build_ext import new_compiler from packaging.version import Version from sysconfig import get_config_vars, get_config_var +from find_libpython import find_libpython def _customize_compiler(compiler): - """Do platform-sepcific customizations of compilers on unix platforms.""" + """Do platform-specific customizations of compilers on unix platforms.""" if compiler.compiler_type == "unix": (cc, cxx, cflags) = get_config_vars("CC", "CXX", "CFLAGS") if "CC" in os.environ: @@ -66,13 +67,7 @@ def _config_exe(exe_name): package_name = "neuron" # determine package to find the install location - if "neuron-gpu-nightly" in working_set.by_key: - print("INFO : Using neuron-gpu-nightly Package (Alpha Developer Version)") - package_name = "neuron-gpu-nightly" - elif "neuron-gpu" in working_set.by_key: - print("INFO : Using neuron-gpu Package (Alpha Version)") - package_name = "neuron-gpu" - elif "neuron-nightly" in working_set.by_key: + if "neuron-nightly" in working_set.by_key: print("INFO : Using neuron-nightly Package (Developer Version)") package_name = "neuron-nightly" elif "neuron" in working_set.by_key: @@ -91,6 +86,11 @@ def _config_exe(exe_name): os.environ["CORENRN_PERLEXE"] = shutil.which("perl") os.environ["NRNBIN"] = os.path.dirname(__file__) + if "NMODLHOME" not in os.environ: + os.environ["NMODLHOME"] = NRN_PREFIX + if "NMODL_PYLIB" not in os.environ: + os.environ["NMODL_PYLIB"] = find_libpython() + _set_default_compiler() return os.path.join(NRN_PREFIX, "bin", exe_name) diff --git a/share/lib/python/scripts/nmodl b/share/lib/python/scripts/nmodl new file mode 120000 index 0000000000..cf9ff4fba4 --- /dev/null +++ b/share/lib/python/scripts/nmodl @@ -0,0 +1 @@ +_binwrapper.py \ No newline at end of file diff --git a/src/coreneuron/CMakeLists.txt b/src/coreneuron/CMakeLists.txt index de2c63d222..561b9896e3 100644 --- a/src/coreneuron/CMakeLists.txt +++ b/src/coreneuron/CMakeLists.txt @@ -33,19 +33,15 @@ option(CORENRN_ENABLE_OPENMP "Build the CORE NEURON with OpenMP implementation" option(CORENRN_ENABLE_OPENMP_OFFLOAD "Prefer OpenMP target offload to OpenACC" ON) option(CORENRN_ENABLE_TIMEOUT "Enable nrn_timeout implementation" ON) option(CORENRN_ENABLE_REPORTING "Enable use of libsonata for soma reports" OFF) -option(CORENRN_ENABLE_MPI "Enable MPI-based execution" ON) -option(CORENRN_ENABLE_MPI_DYNAMIC "Enable dynamic MPI support" OFF) option(CORENRN_ENABLE_HOC_EXP "Enable wrapping exp with hoc_exp()" OFF) option(CORENRN_ENABLE_SPLAYTREE_QUEUING "Enable use of Splay tree for spike queuing" ON) option(CORENRN_ENABLE_NET_RECEIVE_BUFFER "Enable event buffering in net_receive function" ON) -option(CORENRN_ENABLE_NMODL "Enable external nmodl source-to-source compiler" OFF) option(CORENRN_ENABLE_CALIPER_PROFILING "Enable Caliper instrumentation" OFF) option(CORENRN_ENABLE_LIKWID_PROFILING "Enable LIKWID instrumentation" OFF) option(CORENRN_ENABLE_CUDA_UNIFIED_MEMORY "Enable CUDA unified memory support" OFF) option(CORENRN_ENABLE_UNIT_TESTS "Enable unit tests execution" ON) option(CORENRN_ENABLE_GPU "Enable GPU support using OpenACC or OpenMP" OFF) option(CORENRN_ENABLE_SHARED "Enable shared library build" ON) -option(CORENRN_ENABLE_LEGACY_UNITS "Enable legacy FARADAY, R, etc" OFF) option(CORENRN_ENABLE_PRCELLSTATE "Enable NRN_PRCELLSTATE debug feature" OFF) set(CORENRN_NMODL_DIR @@ -129,15 +125,10 @@ if(CORENRN_ENABLE_GPU) "${CMAKE_CUDA_FLAGS} --expt-relaxed-constexpr -Xcudafe --diag_suppress=3057,--diag_suppress=3085" ) - if(CORENRN_ENABLE_NMODL) - # NMODL supports both OpenACC and OpenMP target offload - if(CORENRN_ENABLE_OPENMP AND CORENRN_ENABLE_OPENMP_OFFLOAD) - set(CORENRN_ACCELERATOR_OFFLOAD "OpenMP") - else() - set(CORENRN_ACCELERATOR_OFFLOAD "OpenACC") - endif() + # NMODL supports both OpenACC and OpenMP target offload + if(CORENRN_ENABLE_OPENMP AND CORENRN_ENABLE_OPENMP_OFFLOAD) + set(CORENRN_ACCELERATOR_OFFLOAD "OpenMP") else() - # MOD2C only supports OpenACC offload set(CORENRN_ACCELERATOR_OFFLOAD "OpenACC") endif() endif() @@ -175,11 +166,9 @@ endif() # ============================================================================= # Build option specific compiler flags # ============================================================================= -if(CORENRN_ENABLE_NMODL) - # We use Eigen for "small" matrices with thread-level parallelism handled at a higher level; tell - # Eigen not to try to multithread internally - list(APPEND CORENRN_COMPILE_DEFS EIGEN_DONT_PARALLELIZE) -endif() +# We use Eigen for "small" matrices with thread-level parallelism handled at a higher level; tell +# Eigen not to try to multithread internally +list(APPEND CORENRN_COMPILE_DEFS EIGEN_DONT_PARALLELIZE) if(CORENRN_HAVE_NVHPC_COMPILER) # PGI with llvm code generation doesn't have necessary assembly intrinsic headers list(APPEND CORENRN_COMPILE_DEFS EIGEN_DONT_VECTORIZE=1) @@ -191,11 +180,6 @@ if(CORENRN_HAVE_NVHPC_COMPILER) # problem. If GPU support is disabled, we define R123_USE_INTRIN_H=0 to avoid the problem. list(APPEND CORENRN_COMPILE_DEFS R123_USE_INTRIN_H=0) endif() - # CMake versions <3.19 used to add -A when using NVHPC/PGI, which makes the compiler excessively - # pedantic. See https://gitlab.kitware.com/cmake/cmake/-/issues/20997. - if(CMAKE_VERSION VERSION_LESS 3.19) - list(REMOVE_ITEM CMAKE_CXX17_STANDARD_COMPILE_OPTION -A) - endif() endif() if(CORENRN_ENABLE_SHARED) @@ -204,17 +188,15 @@ else() set(COMPILE_LIBRARY_TYPE "STATIC") endif() -if(CORENRN_ENABLE_MPI) - find_package(MPI REQUIRED) +if(NRN_ENABLE_MPI) list(APPEND CORENRN_COMPILE_DEFS NRNMPI=1) - # avoid linking to C++ bindings - list(APPEND CORENRN_COMPILE_DEFS MPI_NO_CPPBIND=1) - list(APPEND CORENRN_COMPILE_DEFS OMPI_SKIP_MPICXX=1) - list(APPEND CORENRN_COMPILE_DEFS MPICH_SKIP_MPICXX=1) else() list(APPEND CORENRN_COMPILE_DEFS NRNMPI=0) list(APPEND CORENRN_COMPILE_DEFS NRN_MULTISEND=0) endif() +if(NRN_ENABLE_MPI_DYNAMIC) + list(APPEND CORENRN_COMPILE_DEFS NRNMPI_DYNAMICLOAD=1) +endif() if(CORENRN_ENABLE_OPENMP) find_package(OpenMP QUIET) @@ -256,27 +238,6 @@ if(CORENRN_ENABLE_REPORTING) include_directories(${sonatareport_INCLUDE_DIR}) endif() -if(CORENRN_ENABLE_LEGACY_UNITS) - set(CORENRN_USE_LEGACY_UNITS 1) -else() - set(CORENRN_USE_LEGACY_UNITS 0) -endif() -list(APPEND CORENRN_COMPILE_DEFS CORENEURON_USE_LEGACY_UNITS=${CORENRN_USE_LEGACY_UNITS}) -# Propagate Legacy Units flag to backends. -set(MOD2C_ENABLE_LEGACY_UNITS - ${CORENRN_ENABLE_LEGACY_UNITS} - CACHE BOOL "" FORCE) -set(NMODL_ENABLE_LEGACY_UNITS - ${CORENRN_ENABLE_LEGACY_UNITS} - CACHE BOOL "" FORCE) - -if(CORENRN_ENABLE_MPI_DYNAMIC) - if(NOT CORENRN_ENABLE_MPI) - message(FATAL_ERROR "Cannot enable dynamic mpi without mpi") - endif() - list(APPEND CORENRN_COMPILE_DEFS CORENEURON_ENABLE_MPI_DYNAMIC) -endif() - if(CORENRN_ENABLE_PRCELLSTATE) set(CORENRN_NRN_PRCELLSTATE 1) else() @@ -289,55 +250,61 @@ endif() # ============================================================================= # NMODL specific options # ============================================================================= -if(CORENRN_ENABLE_NMODL) - find_package(nmodl) - if(NOT "${CORENRN_NMODL_DIR}" STREQUAL "" AND NOT nmodl_FOUND) - message(FATAL_ERROR "Cannot find NMODL in ${CORENRN_NMODL_DIR}") - endif() - if(nmodl_FOUND) - set(CORENRN_MOD2CPP_BINARY ${nmodl_BINARY}) - set(CORENRN_MOD2CPP_INCLUDE ${nmodl_INCLUDE}) - # path to python interface - set(ENV{PYTHONPATH} "${nmodl_PYTHONPATH}:$ENV{PYTHONPATH}") - set(CORENRN_NMODL_PYTHONPATH $ENV{PYTHONPATH}) - else() - set(NMODL_ENABLE_PYTHON_BINDINGS - OFF - CACHE BOOL "Disable NMODL python bindings") - include(AddNmodlSubmodule) - set(CORENRN_MOD2CPP_BINARY ${CMAKE_BINARY_DIR}/bin/nmodl${CMAKE_EXECUTABLE_SUFFIX}) - set(CORENRN_MOD2CPP_INCLUDE ${CMAKE_BINARY_DIR}/include) - set(ENV{PYTHONPATH} "$ENV{PYTHONPATH}") - set(nmodl_PYTHONPATH "${CMAKE_BINARY_DIR}/lib") - set(CORENRN_NMODL_PYTHONPATH "${nmodl_PYTHONPATH}:$ENV{PYTHONPATH}") - set(NMODL_TARGET_TO_DEPEND nmodl) - endif() - - # set correct arguments for nmodl for cpu/gpu target - set(CORENRN_NMODL_FLAGS - "" - CACHE STRING "Extra NMODL options such as passes") -else() - include(AddMod2cSubmodule) - set(NMODL_TARGET_TO_DEPEND mod2c_core) - set(CORENRN_MOD2CPP_BINARY ${CMAKE_BINARY_DIR}/bin/mod2c_core${CMAKE_EXECUTABLE_SUFFIX}) - set(CORENRN_MOD2CPP_INCLUDE ${CMAKE_BINARY_DIR}/include) +find_package(nmodl) +if(NOT "${CORENRN_NMODL_DIR}" STREQUAL "" AND NOT nmodl_FOUND) + message(FATAL_ERROR "Cannot find NMODL in ${CORENRN_NMODL_DIR}") endif() +if(nmodl_FOUND) + set(CORENRN_NMODL_BINARY ${nmodl_BINARY}) + set(CORENRN_NMODL_INCLUDE ${nmodl_INCLUDE}) + # path to python interface + set(ENV{PYTHONPATH} "${nmodl_PYTHONPATH}:$ENV{PYTHONPATH}") + set(CORENRN_NMODL_PYTHONPATH $ENV{PYTHONPATH}) +else() + set(NMODL_ENABLE_PYTHON_BINDINGS + ON + CACHE BOOL "Disable NMODL python bindings") + nrn_add_external_project(nmodl OFF) + add_subdirectory(${PROJECT_SOURCE_DIR}/external/nmodl ${CMAKE_BINARY_DIR}/external/nmodl) + set(CORENRN_NMODL_BINARY ${CMAKE_BINARY_DIR}/bin/nmodl${CMAKE_EXECUTABLE_SUFFIX}) + set(CORENRN_NMODL_INCLUDE ${CMAKE_BINARY_DIR}/include) + set(ENV{PYTHONPATH} "$ENV{PYTHONPATH}") + set(nmodl_PYTHONPATH "${CMAKE_BINARY_DIR}/lib") + set(CORENRN_NMODL_PYTHONPATH "${nmodl_PYTHONPATH}:$ENV{PYTHONPATH}") + set(NMODL_TARGET_TO_DEPEND nmodl) + set(NMODL_PROJECT_BINARY_DIR ${CMAKE_BINARY_DIR}/external/nmodl) + # install nrnunits.lib and libpywrapper.so from external/nmodl + install( + FILES ${NMODL_PROJECT_BINARY_DIR}/lib/libpywrapper${CMAKE_SHARED_LIBRARY_SUFFIX} + DESTINATION lib + COMPONENT pywrapper + OPTIONAL) + install( + FILES ${NMODL_PROJECT_BINARY_DIR}/share/nmodl/nrnunits.lib + DESTINATION share/nmodl + COMPONENT nrnunits) +endif() +set_property(GLOBAL PROPERTY CORENRN_NMODL_BINARY "${CORENRN_NMODL_BINARY}") + +# set correct arguments for nmodl for cpu/gpu target +set(CORENRN_NMODL_FLAGS + "" + CACHE STRING "Extra NMODL options such as passes") # ============================================================================= # Profiler/Instrumentation Options # ============================================================================= if(CORENRN_ENABLE_CALIPER_PROFILING) find_package(caliper REQUIRED) - list(APPEND CORENRN_COMPILE_DEFS CORENEURON_CALIPER) + list(APPEND CORENRN_COMPILE_DEFS NRN_CALIPER) set(CORENRN_CALIPER_LIB caliper) endif() if(CORENRN_ENABLE_LIKWID_PROFILING) - find_package(likwid REQUIRED) + include(GetLIKWID) + list(APPEND CORENRN_COMPILE_DEFS LIKWID_PERFMON) - # TODO: avoid this part, probably by using some likwid CMake target - include_directories(${likwid_INCLUDE_DIRS}) + set(CORENRN_LIKWID_LIB nrn_likwid) endif() # enable debugging code with extra logs to stdout @@ -351,7 +318,7 @@ endif() # Do not set this when building wheels. The nrnivmodl workflow means that we do not know what # compiler will be invoked with these flags, so we have to use flags that are as generic as # possible. -if(NOT DEFINED NRN_WHEEL_BUILD OR NOT NRN_WHEEL_BUILD) +if(NOT DEFINED NRN_BINARY_DIST_BUILD OR NOT NRN_BINARY_DIST_BUILD) list(APPEND CORENRN_EXTRA_CXX_FLAGS "${IGNORE_UNKNOWN_PRAGMA_FLAGS}") endif() @@ -384,9 +351,9 @@ file( "utils/*/*.cpp") set(MPI_LIB_FILES "mpi/lib/mpispike.cpp" "mpi/lib/nrnmpi.cpp") -if(CORENRN_ENABLE_MPI) +if(NRN_ENABLE_MPI) # Building these requires -ldl, which is only added if MPI is enabled. - list(APPEND CORENEURON_CODE_FILES "mpi/core/resolve.cpp" "mpi/core/nrnmpidec.cpp") + list(APPEND CORENEURON_CODE_FILES "mpi/core/resolve.cpp") endif() file(COPY ${PROJECT_SOURCE_DIR}/external/Random123/include/Random123 @@ -439,22 +406,6 @@ set(CORENEURON_BUILTIN_MODFILES # coreneuron GPU library # ============================================================================= if(CORENRN_ENABLE_GPU) - # ~~~ - # artificial cells and some other cpp files (using Random123) should be compiled - # without OpenACC to avoid use of GPU Random123 streams - # OL210813: this shouldn't be needed anymore, but it may have a small performance benefit - # ~~~ - set(OPENACC_EXCLUDED_FILES - ${CMAKE_CURRENT_BINARY_DIR}/netstim.cpp - ${CMAKE_CURRENT_BINARY_DIR}/netstim_inhpoisson.cpp - ${CMAKE_CURRENT_BINARY_DIR}/pattern.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/io/nrn_setup.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/io/setup_fornetcon.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/io/corenrn_data_return.cpp - ${CMAKE_CURRENT_SOURCE_DIR}/io/global_vars.cpp) - - set_source_files_properties(${OPENACC_EXCLUDED_FILES} PROPERTIES COMPILE_FLAGS - "-DDISABLE_OPENACC") # Only compile the explicit CUDA implementation of the Hines solver in GPU builds. Because of # https://forums.developer.nvidia.com/t/cannot-dynamically-load-a-shared-library-containing-both-openacc-and-cuda-code/210972 # this cannot be included in the same shared library as the rest of the OpenACC code. @@ -467,14 +418,14 @@ if(CORENRN_ENABLE_GPU) # different matrix sizes in partial_piv_lu.cpp (with CUDA attributes but without OpenACC or OpenMP # annotations) and dispatch to these from a wrapper in partial_piv_lu.h that does have # OpenACC/OpenMP annotations. - if(CORENRN_ENABLE_NMODL AND EXISTS ${CORENRN_MOD2CPP_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp) - list(APPEND CORENEURON_CODE_FILES ${CORENRN_MOD2CPP_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp) + if(EXISTS ${CORENRN_NMODL_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp) + list(APPEND CORENEURON_CODE_FILES ${CORENRN_NMODL_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp) if(CORENRN_ENABLE_GPU AND CORENRN_HAVE_NVHPC_COMPILER AND CMAKE_BUILD_TYPE STREQUAL "Debug") # In this case OpenAccHelper.cmake passes -gpu=debug, which makes these Eigen functions # extremely slow. Downgrade that to -gpu=lineinfo for this file. - set_source_files_properties(${CORENRN_MOD2CPP_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp + set_source_files_properties(${CORENRN_NMODL_INCLUDE}/partial_piv_lu/partial_piv_lu.cpp PROPERTIES COMPILE_FLAGS "-gpu=lineinfo,nodebug -O1") endif() endif() @@ -490,12 +441,12 @@ set(CORENRN_MPI_LIB_NAME CACHE INTERNAL "") # for non-dynamic mpi mode just build object files -if(CORENRN_ENABLE_MPI AND NOT CORENRN_ENABLE_MPI_DYNAMIC) +if(NRN_ENABLE_MPI AND NOT NRN_ENABLE_MPI_DYNAMIC) add_library(${CORENRN_MPI_LIB_NAME} OBJECT ${MPI_LIB_FILES}) target_include_directories( - ${CORENRN_MPI_LIB_NAME} PRIVATE ${MPI_INCLUDE_PATH} ${PROJECT_SOURCE_DIR}/src + ${CORENRN_MPI_LIB_NAME} PRIVATE ${MPI_C_INCLUDE_DIRS} ${PROJECT_SOURCE_DIR}/src ${CMAKE_BINARY_DIR}/generated) - target_link_libraries(${CORENRN_MPI_LIB_NAME} ${CORENRN_CALIPER_LIB}) + target_link_libraries(${CORENRN_MPI_LIB_NAME} ${CORENRN_CALIPER_LIB} ${CORENRN_LIKWID_LIB}) set_property(TARGET ${CORENRN_MPI_LIB_NAME} PROPERTY POSITION_INDEPENDENT_CODE ON) set(CORENRN_MPI_OBJ $) endif() @@ -529,7 +480,7 @@ foreach(target coreneuron-core ${coreneuron_cuda_target}) endforeach() # we can link to MPI libraries in non-dynamic-mpi build -if(CORENRN_ENABLE_MPI AND NOT CORENRN_ENABLE_MPI_DYNAMIC) +if(NRN_ENABLE_MPI AND NOT NRN_ENABLE_MPI_DYNAMIC) target_link_libraries(coreneuron-core PUBLIC ${MPI_CXX_LIBRARIES}) endif() @@ -543,7 +494,7 @@ endif() target_link_libraries(coreneuron-core PUBLIC ${CMAKE_DL_LIBS}) # this is where we handle dynamic mpi library build -if(CORENRN_ENABLE_MPI AND CORENRN_ENABLE_MPI_DYNAMIC) +if(NRN_ENABLE_MPI AND NRN_ENABLE_MPI_DYNAMIC) # store mpi library targets that will be built list(APPEND corenrn_mpi_targets "") @@ -569,7 +520,7 @@ if(CORENRN_ENABLE_MPI AND CORENRN_ENABLE_MPI_DYNAMIC) list(GET NRN_MPI_LIBNAME_LIST ${val} libname) add_library(core${libname}_lib SHARED ${MPI_LIB_FILES}) - target_link_libraries(core${libname}_lib ${CORENRN_CALIPER_LIB}) + target_link_libraries(core${libname}_lib ${CORENRN_CALIPER_LIB} ${CORENRN_LIKWID_LIB}) target_include_directories( core${libname}_lib PUBLIC ${include} @@ -598,7 +549,7 @@ endif() # Suppress some compiler warnings. target_compile_options(coreneuron-core PRIVATE ${CORENEURON_CXX_WARNING_SUPPRESSIONS}) target_link_libraries(coreneuron-core PUBLIC ${sonatareport_LIBRARY} ${CORENRN_CALIPER_LIB} - ${likwid_LIBRARIES}) + ${CORENRN_LIKWID_LIB}) # TODO: fix adding a dependency of coreneuron-core on CLI11::CLI11 when CLI11 is a submodule. Right # now this doesn't work because the CLI11 targets are not exported/installed but coreneuron-core is. @@ -607,11 +558,6 @@ target_include_directories( coreneuron-core SYSTEM PRIVATE ${CLI11_HEADER_DIRECTORY} ${PROJECT_SOURCE_DIR}/external/Random123/include) -# See: https://en.cppreference.com/w/cpp/filesystem#Notes -if(CMAKE_CXX_COMPILER_IS_GCC AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.1) - target_link_libraries(coreneuron-core PUBLIC stdc++fs) -endif() - if(CORENRN_ENABLE_GPU) # nrnran123.cpp uses Boost.Pool in GPU builds if it's available. find_package(Boost QUIET) @@ -643,8 +589,10 @@ set(output_binaries "${nrniv_core_prefix}/special-core" "${corenrn_mech_library} add_custom_command( OUTPUT ${output_binaries} DEPENDS coreneuron-core ${NMODL_TARGET_TO_DEPEND} ${CORENEURON_BUILTIN_MODFILES} - COMMAND ${CMAKE_BINARY_DIR}/bin/nrnivmodl-core -b ${COMPILE_LIBRARY_TYPE} -m - ${CORENRN_MOD2CPP_BINARY} -n "internal" -p 4 "." + COMMAND + ${CMAKE_COMMAND} -E env NMODLHOME=${NMODL_PROJECT_BINARY_DIR} + ${CMAKE_BINARY_DIR}/bin/nrnivmodl-core -b ${COMPILE_LIBRARY_TYPE} -m ${CORENRN_NMODL_BINARY} -n + "internal" -p 4 "." WORKING_DIRECTORY ${CMAKE_BINARY_DIR}/bin COMMENT "Running nrnivmodl-core with internal mod files") add_custom_target(nrniv-core ALL DEPENDS ${output_binaries}) @@ -688,17 +636,19 @@ file( RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}" *.h *.hpp) -configure_file("${CMAKE_BINARY_DIR}/generated/coreneuron/config/neuron_version.hpp" - "${CMAKE_BINARY_DIR}/include/coreneuron/config/neuron_version.hpp" COPYONLY) +configure_file(${CMAKE_BINARY_DIR}/generated/coreneuron/config/neuron_version.hpp + ${CMAKE_BINARY_DIR}/include/coreneuron/config/neuron_version.hpp COPYONLY) foreach(header ${main_headers}) - configure_file("${header}" "${CMAKE_BINARY_DIR}/include/coreneuron/${header}" COPYONLY) + configure_file(${CMAKE_CURRENT_SOURCE_DIR}/${header} + ${CMAKE_BINARY_DIR}/include/coreneuron/${header} COPYONLY) endforeach() -configure_file("utils/profile/profiler_interface.h" +configure_file(${CMAKE_CURRENT_SOURCE_DIR}/utils/profile/profiler_interface.h ${CMAKE_BINARY_DIR}/include/coreneuron/nrniv/profiler_interface.h COPYONLY) # main program required for building special-core -file(COPY apps/coreneuron.cpp DESTINATION ${CMAKE_BINARY_DIR}/share/coreneuron) +cpp_cc_build_time_copy(INPUT ${CMAKE_CURRENT_SOURCE_DIR}/apps/coreneuron.cpp + OUTPUT ${CMAKE_BINARY_DIR}/share/coreneuron/coreneuron.cpp) # Extract the various compiler option strings to use inside nrnivmodl-core. Sets the global property # CORENRN_LIB_LINK_FLAGS, which contains the arguments that must be added to the link line for @@ -751,12 +701,8 @@ install( PATTERN "*.ipp") install(FILES ${MODFUNC_PERL_SCRIPT} ${ENGINEMECH_CODE_FILE} DESTINATION share/coreneuron) -# copy mod2c/nmodl for nrnivmodl-core -install(PROGRAMS ${CORENRN_MOD2CPP_BINARY} DESTINATION bin) - -if(NOT CORENRN_ENABLE_NMODL) - install(FILES ${NMODL_UNITS_FILE} DESTINATION share/mod2c) -endif() +# copy nmodl for nrnivmodl-core +install(PROGRAMS ${CORENRN_NMODL_BINARY} DESTINATION bin) # install nrniv-core app install( @@ -800,10 +746,10 @@ message(STATUS "--------------------+------------------------------------------- message(STATUS "CXX COMPILER | ${CMAKE_CXX_COMPILER}") message(STATUS "COMPILE FLAGS | ${CORENRN_CXX_FLAGS}") message(STATUS "Build Type | ${COMPILE_LIBRARY_TYPE}") -message(STATUS "MPI | ${CORENRN_ENABLE_MPI}") -if(CORENRN_ENABLE_MPI) - message(STATUS " DYNAMIC | ${CORENRN_ENABLE_MPI_DYNAMIC}") - if(CORENRN_ENABLE_MPI_DYNAMIC AND NRN_MPI_LIBNAME_LIST) +message(STATUS "MPI | ${NRN_ENABLE_MPI}") +if(NRN_ENABLE_MPI) + message(STATUS " DYNAMIC | ${NRN_ENABLE_MPI_DYNAMIC}") + if(NRN_ENABLE_MPI_DYNAMIC AND NRN_MPI_LIBNAME_LIST) # ~~~ # for dynamic mpi, rely on neuron for list of libraries to build # this is to avoid cmake code duplication on the coreneuron side @@ -817,16 +763,12 @@ if(CORENRN_ENABLE_MPI) message(STATUS " INC | ${include}") endforeach(val) else() - message(STATUS " INC | ${MPI_CXX_INCLUDE_PATH}") + message(STATUS " INC | ${MPI_CXX_INCLUDE_DIRS}") endif() endif() message(STATUS "OpenMP | ${CORENRN_ENABLE_OPENMP}") -message(STATUS "Use legacy units | ${CORENRN_ENABLE_LEGACY_UNITS}") -message(STATUS "NMODL | ${CORENRN_ENABLE_NMODL}") -if(CORENRN_ENABLE_NMODL) - message(STATUS " FLAGS | ${CORENRN_NMODL_FLAGS}") -endif() -message(STATUS "MOD2CPP PATH | ${CORENRN_MOD2CPP_BINARY}") +message(STATUS "NMODL PATH | ${CORENRN_NMODL_BINARY}") +message(STATUS "NMODL FLAGS | ${CORENRN_NMODL_FLAGS}") message(STATUS "GPU Support | ${CORENRN_ENABLE_GPU}") if(CORENRN_ENABLE_GPU) message(STATUS " CUDA | ${CUDAToolkit_LIBRARY_DIR}") @@ -845,5 +787,5 @@ if(CORENRN_ENABLE_REPORTING) message(STATUS " sonatareport_INC | ${sonatareport_INCLUDE_DIR}") message(STATUS " sonatareport_LIB | ${sonatareport_LIBRARY}") endif() -message(STATUS "--------------+--------------------------------------------------------------") +message(STATUS "--------------------+--------------------------------------------------------") message(STATUS "") diff --git a/src/coreneuron/README.md b/src/coreneuron/README.md index 7bb5f4eaeb..b5d1bee5a3 100644 --- a/src/coreneuron/README.md +++ b/src/coreneuron/README.md @@ -1,11 +1,9 @@ -![CoreNEURON CI](https://github.com/BlueBrain/CoreNeuron/workflows/CoreNEURON%20CI/badge.svg) [![codecov](https://codecov.io/gh/BlueBrain/CoreNeuron/branch/master/graph/badge.svg?token=mguTdBx93p)](https://codecov.io/gh/BlueBrain/CoreNeuron) - -![CoreNEURON](docs/_static/bluebrain_coreneuron.jpg) +![CoreNEURON](docs/coreneuron/_static/bluebrain_coreneuron.jpg) # CoreNEURON > Optimised simulator engine for [NEURON](https://github.com/neuronsimulator/nrn) -CoreNEURON is a compute engine for the [NEURON](https://www.neuron.yale.edu/neuron/) simulator optimised for both memory usage and computational speed. Its goal is to simulate large cell networks with small memory footprint and optimal performance. +CoreNEURON is a compute engine for the [NEURON](https://nrn.readthedocs.io/) simulator optimised for both memory usage and computational speed. Its goal is to simulate large cell networks with small memory footprint and optimal performance. ## NEURON Models Compatibility @@ -118,7 +116,7 @@ In order to enable CoreNEURON support, you must set the `-coreneuron` flag. Mak nrnivmodl -coreneuron mod_directory ``` -If you see any compilation error then one of the mod files might be incompatible with CoreNEURON. Please [open an issue](https://github.com/BlueBrain/CoreNeuron/issues) with an example and we can help to fix it. +If you see any compilation error then one of the mod files might be incompatible with CoreNEURON. Please [open an issue](https://github.com/neuronsimulator/nrn/issues) with an example and we can help to fix it. ## Running Simulations @@ -230,7 +228,7 @@ You can find [HOC example](https://github.com/neuronsimulator/nrn/blob/master/te #### What results are returned by CoreNEURON? -At the end of the simulation CoreNEURON transfers by default : spikes, voltages, state variables, NetCon weights, all Vector.record, and most GUI trajectories to NEURON. These variables can be recorded using regular NEURON API (e.g. [Vector.record](https://www.neuron.yale.edu/neuron/static/py_doc/programming/math/vector.html#Vector.record) or [spike_record](https://www.neuron.yale.edu/neuron/static/new_doc/modelspec/programmatic/network/parcon.html#ParallelContext.spike_record)). +At the end of the simulation CoreNEURON transfers by default : spikes, voltages, state variables, NetCon weights, all Vector.record, and most GUI trajectories to NEURON. These variables can be recorded using regular NEURON API (e.g. [Vector.record](https://nrn.readthedocs.io/en/latest/python/programming/math/vector.html#Vector.record) or [spike_record](https://nrn.readthedocs.io/en/latest/python/modelspec/programmatic/network/parcon.html#ParallelContext.spike_record)). #### How can I pass additional flags to build? @@ -244,7 +242,7 @@ cmake .. -DCMAKE_CXX_FLAGS="-O3 -g" \ By default, OpenMP threading is enabled. You can disable it with `-DCORENRN_ENABLE_OPENMP=OFF` -For other errors, please [open an issue](https://github.com/BlueBrain/CoreNeuron/issues). +For other errors, please [open an issue](https://github.com/neuronsimulator/nrn/issues). ## Developer Build @@ -274,25 +272,6 @@ cmake --build . --parallel 8 ctest # use --parallel for speed, -R to run specific tests ``` -#### Building standalone CoreNEURON without NEURON - -If you want to build the standalone CoreNEURON version, first download the repository as: - -``` -git clone https://github.com/BlueBrain/CoreNeuron.git - -``` - -Once the appropriate modules for compiler, MPI, CMake are loaded, you can build CoreNEURON with: - -```bash -mkdir CoreNeuron/build && cd CoreNeuron/build -cmake .. -DCMAKE_INSTALL_PREFIX=$HOME/install -cmake --build . --parallel 8 --target install -``` - -If you don't have MPI, you can disable the MPI dependency using the CMake option `-DCORENRN_ENABLE_MPI=OFF`. - #### Compiling MOD files In order to compile mod files, one can use **nrnivmodl-core** as: @@ -344,25 +323,6 @@ cmake .. -CORENRN_ENABLE_UNIT_TESTS=OFF To see all CLI options for CoreNEURON, see `./bin/nrniv-core -h`. -#### Formatting CMake and C++ Code - -Format code with `black`, `cmake-format`, and `clang-format` tools, before creating a PR. -It should suffice within the build folder to ... -``` -make format-pr -``` - -## Run CI - -CoreNeuron run several CI: - -- Github Action: defined [here](https://github.com/BlueBrain/CoreNeuron/tree/master/.github/workflows) and is self contained - -- Gitlab CI: defined [here](https://github.com/BlueBrain/CoreNeuron/blob/master/.gitlab-ci.yml) and depends on [gitlab pipelines](https://bbpgitlab.epfl.ch/hpc/gitlab-pipelines) and [blueconfigs](https://bbpgitlab.epfl.ch/hpc/sim/blueconfigs) - -### Configure gitlab CI - -See the [README](https://bbpgitlab.epfl.ch/hpc/gitlab-pipelines/-/blob/main/README.md) of `gitlab pipelines` to configure build. ## Citation @@ -373,9 +333,7 @@ If you would like to know more about CoreNEURON or would like to cite it, then u ## Support / Contribuition -If you see any issue, feel free to [raise a ticket](https://github.com/BlueBrain/CoreNeuron/issues/new). If you would like to improve this library, see [open issues](https://github.com/BlueBrain/CoreNeuron/issues). - -You can see current [contributors here](https://github.com/BlueBrain/CoreNeuron/graphs/contributors). +If you see any issue, feel free to [raise a ticket](https://github.com/neuronsimulator/nrn/issues/new). If you would like to improve this library, see [open issues](https://github.com/neuronsimulator/nrn/issues). ## License diff --git a/src/coreneuron/apps/coreneuron.cpp b/src/coreneuron/apps/coreneuron.cpp index 8907bdd8b0..821aa743be 100644 --- a/src/coreneuron/apps/coreneuron.cpp +++ b/src/coreneuron/apps/coreneuron.cpp @@ -13,5 +13,6 @@ int main(int argc, char** argv) { coreneuron::Instrumentor::init_profile(); auto solve_core_result = solve_core(argc, argv); coreneuron::Instrumentor::finalize_profile(); + return solve_core_result; } diff --git a/src/coreneuron/apps/corenrn_parameters.hpp b/src/coreneuron/apps/corenrn_parameters.hpp index 8db8ce06c9..e599ecf7c2 100644 --- a/src/coreneuron/apps/corenrn_parameters.hpp +++ b/src/coreneuron/apps/corenrn_parameters.hpp @@ -6,6 +6,7 @@ # =============================================================================. */ #pragma once +#include // std::uint32_t #include #include #include diff --git a/src/coreneuron/apps/main1.cpp b/src/coreneuron/apps/main1.cpp index aa594c736b..c5d5b3d2db 100644 --- a/src/coreneuron/apps/main1.cpp +++ b/src/coreneuron/apps/main1.cpp @@ -15,6 +15,7 @@ #include #include #include +#include #include #include @@ -40,21 +41,17 @@ #include "coreneuron/network/partrans.hpp" #include "coreneuron/network/multisend.hpp" #include "coreneuron/io/nrn_setup.hpp" -#include "coreneuron/io/file_utils.hpp" #include "coreneuron/io/nrn2core_direct.h" #include "coreneuron/io/core2nrn_data_return.hpp" #include "coreneuron/utils/utils.hpp" +namespace fs = std::filesystem; + extern "C" { const char* corenrn_version() { return coreneuron::bbcore_write_version; } -// the CORENEURON_USE_LEGACY_UNITS determined by CORENRN_ENABLE_LEGACY_UNITS -bool corenrn_units_use_legacy() { - return CORENEURON_USE_LEGACY_UNITS; -} - void (*nrn2core_part2_clean_)(); /** @@ -77,23 +74,7 @@ void set_openmp_threads(int nthread) { * Convert char* containing arguments from neuron to char* argv[] for * coreneuron command line argument parser. */ -char* prepare_args(int& argc, char**& argv, int use_mpi, const char* mpi_lib, const char* arg) { - // first construct all arguments as string - std::string args(arg); - args.insert(0, " coreneuron "); - args.append(" --skip-mpi-finalize "); - if (use_mpi) { - args.append(" --mpi "); - } - - // if neuron has passed name of MPI library then add it to CLI - std::string corenrn_mpi_lib{mpi_lib}; - if (!corenrn_mpi_lib.empty()) { - args.append(" --mpi-lib "); - corenrn_mpi_lib += " "; - args.append(corenrn_mpi_lib); - } - +char* prepare_args(int& argc, char**& argv, std::string& args) { // we can't modify string with strtok, make copy char* first = strdup(args.c_str()); const char* sep = " "; @@ -108,7 +89,7 @@ char* prepare_args(int& argc, char**& argv, int use_mpi, const char* mpi_lib, co free(first); // now build char*argv - argv = new char*[argc]; + argv = new char*[argc + 1]; first = strdup(args.c_str()); token = strtok(first, sep); for (int i = 0; token; i++) { @@ -116,6 +97,9 @@ char* prepare_args(int& argc, char**& argv, int use_mpi, const char* mpi_lib, co token = strtok(nullptr, sep); } + // make sure argv is terminated by NULL! + argv[argc] = nullptr; + // return actual data to be freed return first; } @@ -436,7 +420,7 @@ std::unique_ptr create_report_handler(const ReportConfiguration& using namespace coreneuron; -#if NRNMPI && defined(CORENEURON_ENABLE_MPI_DYNAMIC) +#if NRNMPI && defined(NRNMPI_DYNAMICLOAD) static void* load_dynamic_mpi(const std::string& libname) { dlerror(); void* handle = dlopen(libname.c_str(), RTLD_NOW | RTLD_GLOBAL); @@ -458,7 +442,7 @@ extern "C" void mk_mech_init(int argc, char** argv) { #if NRNMPI if (corenrn_param.mpi_enable) { -#ifdef CORENEURON_ENABLE_MPI_DYNAMIC +#ifdef NRNMPI_DYNAMICLOAD // coreneuron rely on neuron to detect mpi library distribution and // the name of the library itself. Make sure the library name is specified // via CLI option. @@ -517,7 +501,7 @@ extern "C" int run_solve_core(int argc, char** argv) { // Create outpath if it does not exist if (nrnmpi_myid == 0) { - mkdir_p(corenrn_param.outpath.c_str()); + fs::create_directories(corenrn_param.outpath); } if (!corenrn_param.reportfilepath.empty()) { @@ -538,7 +522,7 @@ extern "C" int run_solve_core(int argc, char** argv) { std::string output_dir = corenrn_param.outpath; if (nrnmpi_myid == 0) { - mkdir_p(output_dir.c_str()); + fs::create_directories(output_dir); } #if NRNMPI if (corenrn_param.mpi_enable) { @@ -637,7 +621,7 @@ extern "C" int run_solve_core(int argc, char** argv) { } // copy weights back to NEURON NetCon - if (nrn2core_all_weights_return_) { + if (nrn2core_all_weights_return_ && corenrn_embedded) { // first update weights from gpu update_weights_from_gpu(nrn_threads, nrn_nthread); @@ -651,7 +635,9 @@ extern "C" int run_solve_core(int argc, char** argv) { (*nrn2core_all_weights_return_)(weights); } - core2nrn_data_return(); + if (corenrn_embedded) { + core2nrn_data_return(); + } { Instrumentor::phase p("checkpoint"); diff --git a/src/coreneuron/io/core2nrn_data_return.cpp b/src/coreneuron/io/core2nrn_data_return.cpp index 4d6f192c4f..5b13d4880d 100644 --- a/src/coreneuron/io/core2nrn_data_return.cpp +++ b/src/coreneuron/io/core2nrn_data_return.cpp @@ -7,6 +7,7 @@ */ #include +#include #include "coreneuron/coreneuron.hpp" #include "coreneuron/io/nrn2core_direct.h" @@ -26,7 +27,7 @@ * Return is size of either the returned data pointer or the number * of pointers in mdata. tid is the thread index. */ -size_t (*nrn2core_type_return_)(int type, int tid, double*& data, double**& mdata); +size_t (*nrn2core_type_return_)(int type, int tid, double*& data, std::vector& mdata); /** @brief, Call NEURON mechanism bbcore_read. * Inverse of bbcore_write for transfer from NEURON to CoreNEURON. @@ -40,6 +41,11 @@ int (*core2nrn_corepointer_mech_)(int tid, int dcnt, int* iArray, double* dArray); + +int (*core2nrn_nmodlrandom_)(int tid, + int type, + const std::vector& indices, + const std::vector& nmodlrandom); } namespace coreneuron { @@ -67,14 +73,13 @@ static void soa2aos_inverse_permute_copy(size_t n, int sz, int stride, double* src, - double** dest, + std::vector& dest, int* permute) { // src is soa and permuted. dest is n pointers to sz doubles (aos). for (size_t instance = 0; instance < n; ++instance) { - double* d = dest[instance]; double* s = src + permute[instance]; for (int i = 0; i < sz; ++i) { - d[i] = s[i * stride]; + dest[i][instance] = s[i * stride]; } } } @@ -86,13 +91,16 @@ static void soa2aos_inverse_permute_copy(size_t n, * Each of the sz segments of src have the same order as the n pointers * of dest. */ -static void soa2aos_unpermuted_copy(size_t n, int sz, int stride, double* src, double** dest) { +static void soa2aos_unpermuted_copy(size_t n, + int sz, + int stride, + double* src, + std::vector& dest) { // src is soa and permuted. dest is n pointers to sz doubles (aos). for (size_t instance = 0; instance < n; ++instance) { - double* d = dest[instance]; double* s = src + instance; for (int i = 0; i < sz; ++i) { - d[i] = s[i * stride]; + dest[i][instance] = s[i * stride]; } } } @@ -101,11 +109,12 @@ static void soa2aos_unpermuted_copy(size_t n, int sz, int stride, double* src, d * dest is an array of n pointers to the beginning of each sz length array. * src is a contiguous array of n segments of size sz. */ -static void aos2aos_copy(size_t n, int sz, double* src, double** dest) { +static void aos2aos_copy(size_t n, int sz, double* src, std::vector& dest) { for (size_t instance = 0; instance < n; ++instance) { - double* d = dest[instance]; double* s = src + (instance * sz); - std::copy(s, s + sz, d); + for (auto i = 0; i < sz; ++i) { + dest[i][instance] = s[i]; + } } } @@ -176,6 +185,64 @@ static void core2nrn_corepointer(int tid, NrnThreadMembList* tml) { (*core2nrn_corepointer_mech_)(tid, type, icnt, dcnt, iArray.get(), dArray.get()); } +// based on code from nrncore_callbacks.cpp +std::vector& nrn_mech_random_indices(int type) { + static std::unordered_map> mech_random_indices{}; + static std::mutex mx; + std::unique_lock lock(mx); + if (mech_random_indices.count(type) == 0) { + // if no element, create empty one and search dparam_semantics to fill + auto& mri = mech_random_indices[type]; + int* semantics = corenrn.get_memb_func(type).dparam_semantics; + int dparam_size = corenrn.get_prop_dparam_size()[type]; + for (int i = 0; i < dparam_size; ++i) { + if (semantics[i] == -11) { + mri.push_back(i); + } + } + } + lock.unlock(); + return mech_random_indices[type]; +} + +/** @brief Copy back NMODL RANDOM sequence to NEURON + */ +static void c2n_nmodlrandom(int tid, NrnThreadMembList* tml) { + // Started out as a copy of corenrn_corepointer above. + // overall algorithm for nmodlrandom is similar to nrnthread_dat2_mech. + int type = tml->index; + auto& indices = nrn_mech_random_indices(type); + if (indices.size() == 0) { + return; + } + NrnThread& nt = nrn_threads[tid]; + Memb_list* ml = tml->ml; + int layout = corenrn.get_mech_data_layout()[type]; + int pdsz = corenrn.get_prop_dparam_size()[type]; + int aln_cntml = nrn_soa_padded_size(ml->nodecount, layout); + int n = ml->nodecount; + + // will send back vector of 34 bit uints (aka double) + std::vector nmodlrandom{}; + nmodlrandom.reserve(n * indices.size()); + for (int ix: indices) { + for (int j = 0; j < n; ++j) { + int jp = j; + if (ml->_permute) { + jp = ml->_permute[j]; + } + int pv = ml->pdata[nrn_i_layout(jp, n, ix, pdsz, layout)]; + nrnran123_State* state = (nrnran123_State*) nt._vdata[pv]; + uint32_t seq; + char which; + nrnran123_getseq(state, &seq, &which); + nmodlrandom.push_back(double(seq) * 4 + which); + } + } + + (*core2nrn_nmodlrandom_)(tid, type, indices, nmodlrandom); +} + /** @brief Copy event queue and related state back to NEURON. */ static void core2nrn_tqueue(NrnThread&); @@ -224,9 +291,8 @@ void core2nrn_data_return() { for (int tid = 0; tid < nrn_nthread; ++tid) { size_t n = 0; double* data = nullptr; - double** mdata = nullptr; NrnThread& nt = nrn_threads[tid]; - + std::vector mdata{}; n = (*nrn2core_type_return_)(0, tid, data, mdata); // 0 means time if (n) { // not the empty thread data[0] = nt._t; @@ -248,7 +314,7 @@ void core2nrn_data_return() { int mtype = tml->index; Memb_list* ml = tml->ml; n = (*nrn2core_type_return_)(mtype, tid, data, mdata); - assert(n == size_t(ml->nodecount) && mdata); + assert(n == size_t(ml->nodecount) && !mdata.empty()); if (n == 0) { continue; } @@ -273,6 +339,7 @@ void core2nrn_data_return() { } core2nrn_corepointer(tid, tml); + c2n_nmodlrandom(tid, tml); } // Copy the event queue and related state. diff --git a/src/coreneuron/io/file_utils.cpp b/src/coreneuron/io/file_utils.cpp deleted file mode 100644 index 96ee47d2f3..0000000000 --- a/src/coreneuron/io/file_utils.cpp +++ /dev/null @@ -1,53 +0,0 @@ -/* -# ============================================================================= -# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL -# -# See top-level LICENSE file for details. -# ============================================================================= -*/ - -#include -#include -#include -#include -#include - -#if defined(MINGW) -#define mkdir(dir_name, permission) _mkdir(dir_name) -#endif - -/* adapted from : gist@jonathonreinhart/mkdir_p.c */ -int mkdir_p(const char* path) { - const int path_len = strlen(path); - if (path_len == 0) { - printf("Warning: Empty path for creating directory"); - return -1; - } - - char* dirpath = new char[path_len + 1]; - strcpy(dirpath, path); - errno = 0; - - /* iterate from outer upto inner dir */ - for (char* p = dirpath + 1; *p; p++) { - if (*p == '/') { - /* temporarily truncate to sub-dir */ - *p = '\0'; - - if (mkdir(dirpath, S_IRWXU) != 0) { - if (errno != EEXIST) - return -1; - } - *p = '/'; - } - } - - if (mkdir(dirpath, S_IRWXU) != 0) { - if (errno != EEXIST) { - return -1; - } - } - - delete[] dirpath; - return 0; -} diff --git a/src/coreneuron/io/file_utils.hpp b/src/coreneuron/io/file_utils.hpp deleted file mode 100644 index 1ce2eab838..0000000000 --- a/src/coreneuron/io/file_utils.hpp +++ /dev/null @@ -1,21 +0,0 @@ -/* -# ============================================================================= -# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL -# -# See top-level LICENSE file for details. -# ============================================================================= -*/ - -/** - * @file file_utils.h - * @brief Utility functions for file/directory management - * - */ - -#pragma once - -/** @brief Creates directory if doesn't exisit (similar to mkdir -p) - * @param Directory path - * @return Status - */ -int mkdir_p(const char* path); diff --git a/src/coreneuron/io/global_vars.cpp b/src/coreneuron/io/global_vars.cpp index 815423ea92..1dc5a5effc 100644 --- a/src/coreneuron/io/global_vars.cpp +++ b/src/coreneuron/io/global_vars.cpp @@ -141,14 +141,6 @@ void set_globals(const char* path, bool cli_global_seed, int cli_global_seed_val secondorder = n; } else if (strcmp(name, "Random123_globalindex") == 0) { nrnran123_set_globalindex((uint32_t) n); - } else if (strcmp(name, "_nrnunit_use_legacy_") == 0) { - if (n != CORENEURON_USE_LEGACY_UNITS) { - hoc_execerror( - "CORENRN_ENABLE_LEGACY_UNITS not" - " consistent with NEURON value of" - " nrnunit_use_legacy()", - nullptr); - } } } } diff --git a/src/coreneuron/io/lfp.hpp b/src/coreneuron/io/lfp.hpp index 105d0163b0..caf849c501 100644 --- a/src/coreneuron/io/lfp.hpp +++ b/src/coreneuron/io/lfp.hpp @@ -106,20 +106,20 @@ struct LFPCalculator { }; template <> -double LFPCalculator::getFactor(const lfputils::Point3D& e_pos, - const lfputils::Point3D& seg_0, - const lfputils::Point3D& seg_1, - const double radius, - const double f) const { +inline double LFPCalculator::getFactor(const lfputils::Point3D& e_pos, + const lfputils::Point3D& seg_0, + const lfputils::Point3D& seg_1, + const double radius, + const double f) const { return lfputils::line_source_lfp_factor(e_pos, seg_0, seg_1, radius, f); } template <> -double LFPCalculator::getFactor(const lfputils::Point3D& e_pos, - const lfputils::Point3D& seg_0, - const lfputils::Point3D& seg_1, - const double radius, - const double f) const { +inline double LFPCalculator::getFactor(const lfputils::Point3D& e_pos, + const lfputils::Point3D& seg_0, + const lfputils::Point3D& seg_1, + const double radius, + const double f) const { return lfputils::point_source_lfp_factor(e_pos, lfputils::barycenter(seg_0, seg_1), radius, f); } diff --git a/src/coreneuron/io/nrn2core_direct.h b/src/coreneuron/io/nrn2core_direct.h index 3359c847b1..5790f2197d 100644 --- a/src/coreneuron/io/nrn2core_direct.h +++ b/src/coreneuron/io/nrn2core_direct.h @@ -28,7 +28,7 @@ extern int (*nrn2core_get_global_int_item_)(const char* name); extern int (*nrn2core_get_dat1_)(int tid, int& n_presyn, int& n_netcon, - int*& output_gid, + std::vector& output_gid, int*& netcon_srcgid, std::vector& netcon_negsrcgid_tid); @@ -59,6 +59,7 @@ extern int (*nrn2core_get_dat2_mech_)(int tid, int*& nodeindices, double*& data, int*& pdata, + std::vector& nmodlrandom, std::vector& pointer2type); extern int (*nrn2core_get_dat2_3_)(int tid, @@ -121,5 +122,8 @@ extern int (*nrn2core_all_spike_vectors_return_)(std::vector& spikevec, extern void (*nrn2core_all_weights_return_)(std::vector& weights); /* get data array pointer from NEURON to copy into. */ -extern size_t (*nrn2core_type_return_)(int type, int tid, double*& data, double**& mdata); +extern size_t (*nrn2core_type_return_)(int type, + int tid, + double*& data, + std::vector& mdata); } // extern "C" diff --git a/src/coreneuron/io/nrn_checkpoint.cpp b/src/coreneuron/io/nrn_checkpoint.cpp index 29e9deb777..ed91d7de81 100644 --- a/src/coreneuron/io/nrn_checkpoint.cpp +++ b/src/coreneuron/io/nrn_checkpoint.cpp @@ -5,6 +5,7 @@ # See top-level LICENSE file for details. # =============================================================================. */ +#include #include #include #include @@ -19,13 +20,14 @@ #include "coreneuron/network/netpar.hpp" #include "coreneuron/utils/vrecitem.h" #include "coreneuron/mechanism/mech/mod2c_core_thread.hpp" -#include "coreneuron/io/file_utils.hpp" #include "coreneuron/permute/data_layout.hpp" #include "coreneuron/permute/node_permute.h" #include "coreneuron/coreneuron.hpp" #include "coreneuron/utils/nrnoc_aux.hpp" #include "coreneuron/apps/corenrn_parameters.hpp" +namespace fs = std::filesystem; + namespace coreneuron { // Those functions comes from mod file directly extern int checkpoint_save_patternstim(_threadargsproto_); @@ -37,7 +39,7 @@ CheckPoints::CheckPoints(const std::string& save, const std::string& restore) , restored(false) { if (!save.empty()) { if (nrnmpi_myid == 0) { - mkdir_p(save.c_str()); + fs::create_directories(save); } } } @@ -71,6 +73,8 @@ void CheckPoints::write_checkpoint(NrnThread* nt, int nb_threads) const { /** * if openmp threading needed: * #pragma omp parallel for private(i) shared(nt, nb_threads) schedule(runtime) + * but note that nrn_mech_random_indices(type) is not threadsafe on first + * call for each type. */ for (int i = 0; i < nb_threads; i++) { if (nt[i].ncell || nt[i].tml) { @@ -305,12 +309,41 @@ void CheckPoints::write_phase2(NrnThread& nt) const { } } fh.write_array(d, cnt * sz); - delete[] d; size_t s = pointer2type.size(); fh << s << " npointer\n"; if (s) { fh.write_array(pointer2type.data(), s); } + + // nmodlrandom + auto& indices = nrn_mech_random_indices(type); + s = indices.size() ? (1 + indices.size() + 5 * cnt * indices.size()) : 0; + fh << s << " nmodlrandom\n"; + if (s) { + std::vector nmodlrandom{}; + nmodlrandom.reserve(s); + nmodlrandom.push_back((uint32_t) indices.size()); + for (auto ix: indices) { + nmodlrandom.push_back((uint32_t) ix); + } + for (auto ix: indices) { + uint32_t data[5]; + char which; + for (int i = 0; i < cnt; ++i) { + void* v = nt._vdata[d[i * sz + ix]]; + nrnran123_State* r = (nrnran123_State*) v; + nrnran123_getids3(r, &data[0], &data[1], &data[2]); + nrnran123_getseq(r, &data[3], &which); + data[4] = uint32_t(which); + for (auto j: data) { + nmodlrandom.push_back(j); + } + } + } + fh.write_array(nmodlrandom.data(), nmodlrandom.size()); + } + + delete[] d; } } diff --git a/src/coreneuron/io/nrn_setup.cpp b/src/coreneuron/io/nrn_setup.cpp index 19d5b52974..8697a9015d 100644 --- a/src/coreneuron/io/nrn_setup.cpp +++ b/src/coreneuron/io/nrn_setup.cpp @@ -478,8 +478,7 @@ void nrn_setup(const char* filesdat, } else { nrn_multithread_job([](NrnThread* n) { Phase1 p1{n->id}; - NrnThread& nt = *n; - p1.populate(nt, mut); + p1.populate(*n, mut); }); } @@ -739,6 +738,16 @@ void nrn_cleanup() { (*s)(nt, ml, tml->index); } + // Moved from below as priv_dtor is now deleting the RANDOM streams, + // and at this moment need an undeleted pdata. + // Destroy the global variables struct allocated in nrn_init + if (auto* const priv_dtor = corenrn.get_memb_func(tml->index).private_destructor) { + (*priv_dtor)(nt, ml, tml->index); + assert(!ml->instance); + assert(!ml->global_variables); + assert(ml->global_variables_size == 0); + } + ml->data = nullptr; // this was pointing into memory owned by nt free_memory(ml->pdata); ml->pdata = nullptr; @@ -754,14 +763,6 @@ void nrn_cleanup() { ml->_thread = nullptr; } - // Destroy the global variables struct allocated in nrn_init - if (auto* const priv_dtor = corenrn.get_memb_func(tml->index).private_destructor) { - (*priv_dtor)(nt, ml, tml->index); - assert(!ml->instance); - assert(!ml->global_variables); - assert(ml->global_variables_size == 0); - } - NetReceiveBuffer_t* nrb = ml->_net_receive_buffer; if (nrb) { if (nrb->_size) { @@ -817,15 +818,8 @@ void nrn_cleanup() { nt->presyns_helper = nullptr; } - if (nt->pntprocs) { - free_memory(nt->pntprocs); - nt->pntprocs = nullptr; - } - - if (nt->presyns) { - delete[] nt->presyns; - nt->presyns = nullptr; - } + delete[] std::exchange(nt->pntprocs, nullptr); + delete[] std::exchange(nt->presyns, nullptr); if (nt->pnt2presyn_ix) { for (size_t i = 0; i < corenrn.get_has_net_event().size(); ++i) { diff --git a/src/coreneuron/io/output_spikes.cpp b/src/coreneuron/io/output_spikes.cpp index 6898f22561..6a17a3345f 100644 --- a/src/coreneuron/io/output_spikes.cpp +++ b/src/coreneuron/io/output_spikes.cpp @@ -73,14 +73,11 @@ static void local_spikevec_sort(std::vector& isvect, // first build a permutation vector std::vector perm(isvect.size()); std::iota(perm.begin(), perm.end(), 0); - // sort by gid (second predicate first) - std::stable_sort(perm.begin(), perm.end(), [&](std::size_t i, std::size_t j) { - return isvecg[i] < isvecg[j]; - }); - // then sort by time - std::stable_sort(perm.begin(), perm.end(), [&](std::size_t i, std::size_t j) { - return isvect[i] < isvect[j]; + // sort by time then gid + std::sort(perm.begin(), perm.end(), [&](std::size_t i, std::size_t j) { + return isvect[i] < isvect[j] || (isvect[i] == isvect[j] && isvecg[i] < isvecg[j]); }); + // now apply permutation to time and gid output vectors std::transform(perm.begin(), perm.end(), osvect.begin(), [&](std::size_t i) { return isvect[i]; diff --git a/src/coreneuron/io/phase1.cpp b/src/coreneuron/io/phase1.cpp index 54dd97b60d..7dc0aeecb6 100644 --- a/src/coreneuron/io/phase1.cpp +++ b/src/coreneuron/io/phase1.cpp @@ -17,7 +17,7 @@ int (*nrn2core_get_dat1_)(int tid, int& n_presyn, int& n_netcon, - int*& output_gid, + std::vector& output_gid, int*& netcon_srcgid, std::vector& netcon_negsrcgid_tid); @@ -36,7 +36,6 @@ Phase1::Phase1(FileHandler& F) { } Phase1::Phase1(int thread_id) { - int* output_gids; int* netcon_srcgid; int n_presyn; int n_netcon; @@ -47,9 +46,7 @@ Phase1::Phase1(int thread_id) { if (!valid) { return; } - - this->output_gids = std::vector(output_gids, output_gids + n_presyn); - delete[] output_gids; + assert(output_gids.size() == n_presyn); this->netcon_srcgids = std::vector(netcon_srcgid, netcon_srcgid + n_netcon); delete[] netcon_srcgid; } diff --git a/src/coreneuron/io/phase2.cpp b/src/coreneuron/io/phase2.cpp index 0dc334dd3b..0502129068 100644 --- a/src/coreneuron/io/phase2.cpp +++ b/src/coreneuron/io/phase2.cpp @@ -50,6 +50,7 @@ int (*nrn2core_get_dat2_mech_)(int tid, int*& nodeindices, double*& data, int*& pdata, + std::vector& nmodlrandom, std::vector& pointer2type); int (*nrn2core_get_dat2_3_)(int tid, @@ -182,6 +183,13 @@ void Phase2::read_file(FileHandler& F, const NrnThread& nt) { auto& p2t = tmls.back().pointer2type; p2t = F.read_vector(sz); } + + // nmodlrandom + sz = F.read_int(); + if (sz) { + auto& nmodlrandom = tmls.back().nmodlrandom; + nmodlrandom = F.read_vector(sz); + } } } output_vindex = F.read_vector(nt.n_presyn); @@ -325,13 +333,24 @@ void Phase2::read_direct(int thread_id, const NrnThread& nt) { int* nodeindices_ = nullptr; double* data_ = _data + offset; int* pdata_ = const_cast(tml.pdata.data()); + + // nmodlrandom, receives: + // number of random variables + // dparam index (neuron side) of each random variable + // 5 uint32 for each var of each instance + // id1, id2, id3, seq, uint32_t(which) + // all instances of ranvar1 first, then all instances of ranvar2, etc. + auto& nmodlrandom = tml.nmodlrandom; + (*nrn2core_get_dat2_mech_)(thread_id, i, dparam_sizes[type] > 0 ? dsz_inst : 0, nodeindices_, data_, pdata_, + nmodlrandom, tml.pointer2type); + if (dparam_sizes[type] > 0) dsz_inst++; offset += nrn_soa_padded_size(nodecounts[i], layout) * param_sizes[type]; @@ -1036,9 +1055,7 @@ void Phase2::populate(NrnThread& nt, const UserParams& userParams) { num_point_process += n; } } - nt.pntprocs = (Point_process*) ecalloc_align(num_point_process, - sizeof(Point_process)); // includes acell with and - // without gid + nt.pntprocs = new Point_process[num_point_process]{}; // includes acell with and without gid nt.n_pntproc = num_point_process; nt._ndata = offset; @@ -1113,6 +1130,31 @@ void Phase2::populate(NrnThread& nt, const UserParams& userParams) { pp->_tid = nt.id; } } + + auto& r = tmls[itml].nmodlrandom; + if (r.size()) { + size_t ix{}; + uint32_t n_randomvar = r[ix++]; + assert(r.size() == 1 + n_randomvar + 5 * n_randomvar * n); + std::vector indices(n_randomvar); + for (uint32_t i = 0; i < n_randomvar; ++i) { + indices[i] = r[ix++]; + } + int cnt = ml->nodecount; + for (auto index: indices) { + // should we also verify that index on corenrn side same as on nrn side? + // sonarcloud thinks ml_pdata can be nullptr, so ... + assert(index >= 0 && index < szdp); + for (int i = 0; i < n; ++i) { + nrnran123_State* state = nrnran123_newstream3(r[ix], r[ix + 1], r[ix + 2]); + nrnran123_setseq(state, r[ix + 3], char(r[ix + 4])); + ix += 5; + int ipd = ml->pdata[nrn_i_layout(i, cnt, index, szdp, layout)]; + assert(ipd >= 0 && ipd < n_vdata + extra_nv); + nt._vdata[ipd] = state; + } + } + } } // pnt_offset needed for SelfEvent transfer from NEURON. Not needed on GPU. diff --git a/src/coreneuron/io/phase2.hpp b/src/coreneuron/io/phase2.hpp index 8b84305e43..9071e7ee38 100644 --- a/src/coreneuron/io/phase2.hpp +++ b/src/coreneuron/io/phase2.hpp @@ -115,6 +115,7 @@ class Phase2 { std::vector iArray; std::vector dArray; std::vector pointer2type; + std::vector nmodlrandom{}; }; std::vector tmls; std::vector output_vindex; diff --git a/src/coreneuron/io/reports/report_handler.cpp b/src/coreneuron/io/reports/report_handler.cpp index bf870f3266..0fd48d7a0a 100644 --- a/src/coreneuron/io/reports/report_handler.cpp +++ b/src/coreneuron/io/reports/report_handler.cpp @@ -297,7 +297,8 @@ VarsToReport ReportHandler::get_summation_vars_to_report( if (report.section_type == SectionType::All) { double* variable = report_variable + segment_id; to_report.emplace_back(VarWithMapping(section_id, variable)); - } else if (report.section_type == SectionType::Cell) { + } else if (report.section_type == SectionType::Cell || + report.section_type == SectionType::Soma) { summation_report.gid_segments_[gid].push_back(segment_id); } } @@ -353,6 +354,24 @@ VarsToReport ReportHandler::get_synapse_vars_to_report( return vars_to_report; } +void add_clamp_current(const char* clamp, + const NrnThread& nt, + std::unordered_map>>& currents, + int gid, + const std::vector& nodes_to_gids) { + auto mech_id = nrn_get_mechtype(clamp); + Memb_list* ml = nt._ml_list[mech_id]; + if (ml) { + for (int i = 0; i < ml->nodecount; i++) { + auto segment_id = ml->nodeindices[i]; + if ((nodes_to_gids[segment_id] == gid)) { + double* var_value = get_var_location_from_var_name(mech_id, "i", ml, i); + currents[segment_id].push_back(std::make_pair(var_value, -1)); + } + } + } +} + VarsToReport ReportHandler::get_lfp_vars_to_report(const NrnThread& nt, const std::vector& gids_to_report, ReportConfiguration& report, @@ -368,18 +387,9 @@ VarsToReport ReportHandler::get_lfp_vars_to_report(const NrnThread& nt, VarsToReport vars_to_report; off_t offset_lfp = 0; for (const auto& gid: gids_to_report) { - // IClamp is needed for the LFP calculation - auto mech_id = nrn_get_mechtype("IClamp"); - Memb_list* ml = nt._ml_list[mech_id]; - if (ml) { - for (int j = 0; j < ml->nodecount; j++) { - auto segment_id = ml->nodeindices[j]; - if ((nodes_to_gids[segment_id] == gid)) { - double* var_value = get_var_location_from_var_name(mech_id, "i", ml, j); - summation_report.currents_[segment_id].push_back(std::make_pair(var_value, -1)); - } - } - } + // IClamp & SEClamp are needed for the LFP calculation + add_clamp_current("IClamp", nt, summation_report.currents_, gid, nodes_to_gids); + add_clamp_current("SEClamp", nt, summation_report.currents_, gid, nodes_to_gids); const auto& cell_mapping = mapinfo->get_cell_mapping(gid); if (cell_mapping == nullptr) { std::cerr << "[LFP] Error : Compartment mapping information is missing for gid " << gid diff --git a/src/coreneuron/mechanism/capac.cpp b/src/coreneuron/mechanism/capac.cpp index f47a4ebd77..4342803ccb 100644 --- a/src/coreneuron/mechanism/capac.cpp +++ b/src/coreneuron/mechanism/capac.cpp @@ -1,6 +1,3 @@ -/*** - THIS FILE IS AUTO GENERATED DONT MODIFY IT. - ***/ /* # ============================================================================= # Copyright (c) 2016 - 2021 Blue Brain Project/EPFL @@ -15,7 +12,7 @@ #define _PRAGMA_FOR_INIT_ACC_LOOP_ \ nrn_pragma_acc(parallel loop present(vdata [0:_cntml_padded * nparm]) if (_nt->compute_gpu)) \ nrn_pragma_omp(target teams distribute parallel for simd if(_nt->compute_gpu)) -#define _STRIDE _cntml_padded + _iml +#define CNRN_FLAT_INDEX_IML_ROW(i) ((i) * (_cntml_padded) + (_iml)) namespace coreneuron { @@ -45,8 +42,8 @@ void capacitance_reg(void) { hoc_register_prop_size(mechtype, nparm, 0); } -#define cm vdata[0 * _STRIDE] -#define i_cap vdata[1 * _STRIDE] +#define cm vdata[CNRN_FLAT_INDEX_IML_ROW(0)] +#define i_cap vdata[CNRN_FLAT_INDEX_IML_ROW(1)] /* cj is analogous to 1/dt for cvode and daspk diff --git a/src/coreneuron/mechanism/eion.cpp b/src/coreneuron/mechanism/eion.cpp index fb95ae21bb..e468ee3e01 100644 --- a/src/coreneuron/mechanism/eion.cpp +++ b/src/coreneuron/mechanism/eion.cpp @@ -6,8 +6,6 @@ # =============================================================================. */ -/// THIS FILE IS AUTO GENERATED DONT MODIFY IT. - #include #include @@ -18,7 +16,7 @@ #include "coreneuron/permute/data_layout.hpp" #include "coreneuron/utils/nrnoc_aux.hpp" -#define _STRIDE _cntml_padded + _iml +#define CNRN_FLAT_INDEX_IML_ROW(i) ((i) * (_cntml_padded) + (_iml)) namespace coreneuron { @@ -142,11 +140,11 @@ the USEION statement of any model using this ion\n", } #if VECTORIZE -#define erev pd[0 * _STRIDE] /* From Eion */ -#define conci pd[1 * _STRIDE] -#define conco pd[2 * _STRIDE] -#define cur pd[3 * _STRIDE] -#define dcurdv pd[4 * _STRIDE] +#define erev pd[CNRN_FLAT_INDEX_IML_ROW(0)] /* From Eion */ +#define conci pd[CNRN_FLAT_INDEX_IML_ROW(1)] +#define conco pd[CNRN_FLAT_INDEX_IML_ROW(2)] +#define cur pd[CNRN_FLAT_INDEX_IML_ROW(3)] +#define dcurdv pd[CNRN_FLAT_INDEX_IML_ROW(4)] /* handle erev, conci, conc0 "in the right way" according to ion_style diff --git a/src/coreneuron/mechanism/mech/enginemech.cpp b/src/coreneuron/mechanism/mech/enginemech.cpp index 2c20d1293e..19549228ba 100644 --- a/src/coreneuron/mechanism/mech/enginemech.cpp +++ b/src/coreneuron/mechanism/mech/enginemech.cpp @@ -17,6 +17,8 @@ #include #include +#include +#include namespace coreneuron { @@ -39,18 +41,6 @@ extern bool nrn_use_fast_imem; extern void nrn_cleanup_ion_map(); } // namespace coreneuron -/** Initialize mechanisms and run simulation using CoreNEURON - * - * This is mainly used to build nrniv-core executable - */ -int solve_core(int argc, char** argv) { - mk_mech_init(argc, argv); - coreneuron::modl_reg(); - int ret = run_solve_core(argc, argv); - coreneuron::nrn_cleanup_ion_map(); - return ret; -} - extern "C" { /// global variables from coreneuron library @@ -58,7 +48,7 @@ extern bool corenrn_embedded; extern int corenrn_embedded_nthread; /// parse arguments from neuron and prepare new one for coreneuron -char* prepare_args(int& argc, char**& argv, int use_mpi, const char* mpi_lib, const char* nrn_arg); +char* prepare_args(int& argc, char**& argv, std::string& args); /// initialize standard mechanisms from coreneuron void mk_mech_init(int argc, char** argv); @@ -66,6 +56,42 @@ void mk_mech_init(int argc, char** argv); /// set openmp threads equal to neuron's pthread void set_openmp_threads(int nthread); +/** + * Add MPI library loading CLI argument for CoreNEURON + * + * CoreNEURON requires `--mpi-lib` CLI argument with the + * path of library. In case of `solve_core()` call from MOD + * file, such CLI argument may not present. In this case, we + * additionally check `NRN_CORENRN_MPI_LIB` env variable set + * by NEURON. + * + * @param mpi_lib path of coreneuron MPI library to load + * @param args char* argv[] in std::string form + */ +void add_mpi_library_arg(const char* mpi_lib, std::string& args) { + std::string corenrn_mpi_lib; + + // check if user or neuron has provided one + if (mpi_lib != nullptr) { + corenrn_mpi_lib = std::string(mpi_lib); + } + + // if mpi library is not provided / empty then try to use what + // neuron might have detected and set via env var `NRN_CORENRN_MPI_LIB` + if (corenrn_mpi_lib.empty()) { + char* lib = getenv("NRN_CORENRN_MPI_LIB"); + if (lib != nullptr) { + corenrn_mpi_lib = std::string(lib); + } + } + + // add mpi library argument if found + if (!corenrn_mpi_lib.empty()) { + args.append(" --mpi-lib "); + args.append(corenrn_mpi_lib); + } +} + /** Run CoreNEURON in embedded mode with NEURON * * @param nthread Number of Pthreads on NEURON side @@ -82,8 +108,21 @@ int corenrn_embedded_run(int nthread, int use_fast_imem, const char* mpi_lib, const char* nrn_arg) { + bool corenrn_skip_write_model_to_disk = false; + const std::string corenrn_skip_write_model_to_disk_arg{"--skip-write-model-to-disk"}; + // If "only_simulate_str" exists in "nrn_arg" then avoid transferring any data between NEURON + // and CoreNEURON Instead run the CoreNEURON simulation only with the coredat files provided + // "only_simulate_str" is an internal string and shouldn't be made public to the CoreNEURON CLI + // options so it is removed from "nrn_arg" first construct all arguments as string + std::string filtered_nrn_arg{nrn_arg}; + const auto ind = + static_cast(filtered_nrn_arg).find(corenrn_skip_write_model_to_disk_arg); + if (ind != std::string::npos) { + corenrn_skip_write_model_to_disk = true; + filtered_nrn_arg.erase(ind, corenrn_skip_write_model_to_disk_arg.size()); + } // set coreneuron's internal variable based on neuron arguments - corenrn_embedded = true; + corenrn_embedded = !corenrn_skip_write_model_to_disk; corenrn_embedded_nthread = nthread; coreneuron::nrn_have_gaps = have_gaps != 0; coreneuron::nrn_use_fast_imem = use_fast_imem != 0; @@ -91,10 +130,20 @@ int corenrn_embedded_run(int nthread, // set number of openmp threads set_openmp_threads(nthread); + filtered_nrn_arg.insert(0, " coreneuron "); + filtered_nrn_arg.append(" --skip-mpi-finalize "); + + if (use_mpi) { + filtered_nrn_arg.append(" --mpi "); + } + + add_mpi_library_arg(mpi_lib, filtered_nrn_arg); + // pre-process argumnets from neuron and prepare new for coreneuron int argc; char** argv; - char* new_arg = prepare_args(argc, argv, use_mpi, mpi_lib, nrn_arg); + + char* new_arg = prepare_args(argc, argv, filtered_nrn_arg); // initialize internal arguments mk_mech_init(argc, argv); @@ -117,3 +166,36 @@ int corenrn_embedded_run(int nthread, return corenrn_embedded ? 1 : 0; } } + +/** Initialize mechanisms and run simulation using CoreNEURON + * + * NOTE: this type of usage via MOD file exist in neurodamus + * where we delete entire model and call coreneuron via file + * mode. + */ +int solve_core(int argc, char** argv) { + // first construct argument as string + std::string args; + for (int i = 0; i < argc; i++) { + args.append(argv[i]); + args.append(" "); + } + + // add mpi library argument + add_mpi_library_arg("", args); + + // pre-process arguments and prepare it fore coreneuron + int new_argc; + char** new_argv; + char* arg_to_free = prepare_args(new_argc, new_argv, args); + + mk_mech_init(new_argc, new_argv); + coreneuron::modl_reg(); + int ret = run_solve_core(new_argc, new_argv); + + coreneuron::nrn_cleanup_ion_map(); + free(arg_to_free); + delete[] new_argv; + + return ret; +} diff --git a/src/coreneuron/mechanism/mech/mod2c_core_thread.hpp b/src/coreneuron/mechanism/mech/mod2c_core_thread.hpp index d18160f3a7..1615d20bd2 100644 --- a/src/coreneuron/mechanism/mech/mod2c_core_thread.hpp +++ b/src/coreneuron/mechanism/mech/mod2c_core_thread.hpp @@ -14,7 +14,7 @@ namespace coreneuron { -#define _STRIDE _cntml_padded + _iml +#define CNRN_FLAT_INDEX_IML_ROW(i) ((i) * (_cntml_padded) + (_iml)) #define _threadargscomma_ _iml, _cntml_padded, _p, _ppvar, _thread, _nt, _ml, _v, #define _threadargsprotocomma_ \ @@ -77,7 +77,7 @@ int euler_thread(int neqn, int* var, int* der, F fun, _threadargsproto_) { fun(_threadargs_); // std::invoke in C++17 /* update dependent variables */ for (int i = 0; i < neqn; i++) { - _p[var[i] * _STRIDE] += dt * (_p[der[i] * _STRIDE]); + _p[CNRN_FLAT_INDEX_IML_ROW(var[i])] += dt * (_p[CNRN_FLAT_INDEX_IML_ROW(der[i])]); } return 0; } diff --git a/src/coreneuron/mechanism/mech/modfile/netstim.mod b/src/coreneuron/mechanism/mech/modfile/netstim.mod index 1ec52940ac..86bb7562fe 100644 --- a/src/coreneuron/mechanism/mech/modfile/netstim.mod +++ b/src/coreneuron/mechanism/mech/modfile/netstim.mod @@ -1,377 +1,134 @@ : $Id: netstim.mod 2212 2008-09-08 14:32:26Z hines $ : comments at end -: the Random idiom has been extended to support CoreNEURON. - -: For backward compatibility, noiseFromRandom(hocRandom) can still be used -: as well as the default low-quality scop_exprand generator. -: However, CoreNEURON will not accept usage of the low-quality generator, -: and, if noiseFromRandom is used to specify the random stream, that stream -: must be using the Random123 generator. - -: The recommended idiom for specfication of the random stream is to use -: noiseFromRandom123(id1, id2[, id3]) - -: If any instance uses noiseFromRandom123, then no instance can use noiseFromRandom -: and vice versa. - NEURON { - ARTIFICIAL_CELL NetStim - RANGE interval, number, start - RANGE noise - THREADSAFE : only true if every instance has its own distinct Random - BBCOREPOINTER donotuse + ARTIFICIAL_CELL NetStim + THREADSAFE + RANGE interval, number, start + RANGE noise + RANDOM ranvar } PARAMETER { - interval = 10 (ms) <1e-9,1e9>: time between spikes (msec) - number = 10 <0,1e9> : number of spikes (independent of noise) - start = 50 (ms) : start of first spike - noise = 0 <0,1> : amount of randomness (0.0 - 1.0) + interval = 10 (ms) <1e-9,1e9> : time between spikes (msec) + number = 10 <0,1e9> : number of spikes (independent of noise) + start = 50 (ms) : start of first spike + noise = 0 <0,1> : amount of randomness (0.0 - 1.0) } ASSIGNED { - event (ms) - on - ispike - donotuse -} - -VERBATIM -#if NRNBBCORE /* running in CoreNEURON */ - -#define IFNEWSTYLE(arg) arg - -#else /* running in NEURON */ - -/* - 1 means noiseFromRandom was called when _ran_compat was previously 0 . - 2 means noiseFromRandom123 was called when _ran_compat was previously 0. -*/ -static int _ran_compat; /* specifies the noise style for all instances */ -#define IFNEWSTYLE(arg) if(_ran_compat == 2) { arg } - -#endif /* running in NEURON */ -ENDVERBATIM - -:backward compatibility -PROCEDURE seed(x) { -VERBATIM -#if !NRNBBCORE -ENDVERBATIM - set_seed(x) -VERBATIM -#endif -ENDVERBATIM + event (ms) + on + ispike } INITIAL { - - VERBATIM - if (_p_donotuse) { - /* only this style initializes the stream on finitialize */ - IFNEWSTYLE(nrnran123_setseq((nrnran123_State*)_p_donotuse, 0, 0);) - } - ENDVERBATIM - - on = 0 : off - ispike = 0 - if (noise < 0) { - noise = 0 - } - if (noise > 1) { - noise = 1 - } - if (start >= 0 && number > 0) { - on = 1 - : randomize the first spike so on average it occurs at - : start + noise*interval - event = start + invl(interval) - interval*(1. - noise) - : but not earlier than 0 - if (event < 0) { - event = 0 - } - net_send(event, 3) - } + seed(0) + on = 0 : off + ispike = 0 + if (noise < 0) { + noise = 0 + } + if (noise > 1) { + noise = 1 + } + if (start >= 0 && number > 0) { + on = 1 + : randomize the first spike so on average it occurs at + : start + noise*interval + event = start + invl(interval) - interval*(1. - noise) + : but not earlier than 0 + if (event < 0) { + event = 0 + } + net_send(event, 3) + } } PROCEDURE init_sequence(t(ms)) { - if (number > 0) { - on = 1 - event = 0 - ispike = 0 - } + if (number > 0) { + on = 1 + event = 0 + ispike = 0 + } } FUNCTION invl(mean (ms)) (ms) { - if (mean <= 0.) { - mean = .01 (ms) : I would worry if it were 0. - } - if (noise == 0) { - invl = mean - }else{ - invl = (1. - noise)*mean + noise*mean*erand() - } + if (mean <= 0.) { + mean = .01 (ms) : I would worry if it were 0. + } + if (noise == 0) { + invl = mean + } else { + invl = (1. - noise)*mean + noise*mean*erand() + } } -VERBATIM -#include "nrnran123.h" - -#if !NRNBBCORE -/* backward compatibility */ -double nrn_random_pick(void* r); -void* nrn_random_arg(int argpos); -int nrn_random_isran123(void* r, uint32_t* id1, uint32_t* id2, uint32_t* id3); -int nrn_random123_setseq(void* r, uint32_t seq, char which); -int nrn_random123_getseq(void* r, uint32_t* seq, char* which); -#endif -ENDVERBATIM FUNCTION erand() { -VERBATIM - if (_p_donotuse) { - /* - :Supports separate independent but reproducible streams for - : each instance. However, the corresponding hoc Random - : distribution MUST be set to Random.negexp(1) - */ -#if !NRNBBCORE - if (_ran_compat == 2) { - _lerand = nrnran123_negexp((nrnran123_State*)_p_donotuse); - }else{ - _lerand = nrn_random_pick(_p_donotuse); - } -#else - _lerand = nrnran123_negexp((nrnran123_State*)_p_donotuse); -#endif - return _lerand; - }else{ -#if NRNBBCORE - assert(0); -#else - /* - : the old standby. Cannot use if reproducible parallel sim - : independent of nhost or which host this instance is on - : is desired, since each instance on this cpu draws from - : the same stream - */ -#endif - } -#if !NRNBBCORE -ENDVERBATIM - erand = exprand(1) -VERBATIM -#endif -ENDVERBATIM -} - -PROCEDURE noiseFromRandom() { -VERBATIM -#if !NRNBBCORE - { - void** pv = (void**)(&_p_donotuse); - if (_ran_compat == 2) { - fprintf(stderr, "NetStim.noiseFromRandom123 was previously called\n"); - assert(0); - } - _ran_compat = 1; - if (ifarg(1)) { - *pv = nrn_random_arg(1); - }else{ - *pv = (void*)0; - } - } -#endif -ENDVERBATIM + erand = random_negexp(ranvar) } - -PROCEDURE noiseFromRandom123() { -VERBATIM -#if !NRNBBCORE - { - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - if (_ran_compat == 1) { - fprintf(stderr, "NetStim.noiseFromRandom was previously called\n"); - assert(0); - } - _ran_compat = 2; - if (*pv) { - nrnran123_deletestream(*pv); - *pv = (nrnran123_State*)0; - } - if (ifarg(3)) { - *pv = nrnran123_newstream3((uint32_t)*getarg(1), (uint32_t)*getarg(2), (uint32_t)*getarg(3)); - }else if (ifarg(2)) { - *pv = nrnran123_newstream((uint32_t)*getarg(1), (uint32_t)*getarg(2)); - } - } -#endif -ENDVERBATIM -} - -DESTRUCTOR { -VERBATIM - if (!noise) { return; } - if (_p_donotuse) { -#if NRNBBCORE - { /* but note that mod2c does not translate DESTRUCTOR */ -#else - if (_ran_compat == 2) { -#endif - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - nrnran123_deletestream(*pv); - *pv = (nrnran123_State*)0; - } - } -ENDVERBATIM +PROCEDURE next_invl() { + if (number > 0) { + event = invl(interval) + } + if (ispike >= number) { + on = 0 + } } -VERBATIM -static void bbcore_write(double* x, int* d, int* xx, int *offset, _threadargsproto_) { - if (!noise) { return; } - /* error if using the legacy scop_exprand */ - if (!_p_donotuse) { - fprintf(stderr, "NetStim: cannot use the legacy scop_negexp generator for the random stream.\n"); - assert(0); - } - if (d) { - char which; - uint32_t* di = ((uint32_t*)d) + *offset; -#if !NRNBBCORE - if (_ran_compat == 1) { - void** pv = (void**)(&_p_donotuse); - /* error if not using Random123 generator */ - if (!nrn_random_isran123(*pv, di, di+1, di+2)) { - fprintf(stderr, "NetStim: Random123 generator is required\n"); - assert(0); - } - nrn_random123_getseq(*pv, di+3, &which); - di[4] = (int)which; - }else{ -#else - { -#endif - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - nrnran123_getids3(*pv, di, di+1, di+2); - nrnran123_getseq(*pv, di+3, &which); - di[4] = (int)which; -#if NRNBBCORE - /* CORENeuron does not call DESTRUCTOR so... */ - nrnran123_deletestream(*pv); - *pv = (nrnran123_State*)0; -#endif - } - /*printf("Netstim bbcore_write %d %d %d\n", di[0], di[1], di[3]);*/ - } - *offset += 5; +NET_RECEIVE (w) { + if (flag == 0) { : external event + if (w > 0 && on == 0) { : turn on spike sequence + : but not if a netsend is on the queue + init_sequence(t) + : randomize the first spike so on average it occurs at + : noise*interval (most likely interval is always 0) + next_invl() + event = event - interval*(1. - noise) + net_send(event, 1) + }else if (w < 0) { : turn off spiking definitively + on = 0 + } + } + if (flag == 3) { : from INITIAL + if (on == 1) { : but ignore if turned off by external event + init_sequence(t) + net_send(0, 1) + } + } + if (flag == 1 && on == 1) { + ispike = ispike + 1 + net_event(t) + next_invl() + if (on == 1) { + net_send(event, 1) + } + } } -static void bbcore_read(double* x, int* d, int* xx, int* offset, _threadargsproto_) { - if (!noise) { return; } - /* Generally, CoreNEURON, in the context of psolve, begins with - an empty model so this call takes place in the context of a freshly - created instance and _p_donotuse is not NULL. - However, this function - is also now called from NEURON at the end of coreneuron psolve - in order to transfer back the nrnran123 sequence state. That - allows continuation with a subsequent psolve within NEURON or - properly transfer back to CoreNEURON if we continue the psolve - there. So now, extra logic is needed for this call to work in - a NEURON context. - */ +:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +: Legacy API +: +: Difference: seed(x) merely sets ranvar sequence to ((uint32_t)x, 0) +: noiseFromRandom HOC Random object must use Random123 +: generator. The ids and sequence are merely copied +: into ranvar. +:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: - uint32_t* di = ((uint32_t*)d) + *offset; -#if NRNBBCORE - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - assert(!_p_donotuse); - *pv = nrnran123_newstream3(di[0], di[1], di[2]); - nrnran123_setseq(*pv, di[3], (char)di[4]); -#else - uint32_t id1, id2, id3; - assert(_p_donotuse); - if (_ran_compat == 1) { /* Hoc Random.Random123 */ - void** pv = (void**)(&_p_donotuse); - int b = nrn_random_isran123(*pv, &id1, &id2, &id3); - assert(b); - nrn_random123_setseq(*pv, di[3], (char)di[4]); - }else{ - assert(_ran_compat == 2); - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - nrnran123_getids3(*pv, &id1, &id2, &id3); - nrnran123_setseq(*pv, di[3], (char)di[4]); - } - /* Random123 on NEURON side has same ids as on CoreNEURON side */ - assert(di[0] == id1 && di[1] == id2 && di[2] == id3); -#endif - *offset += 5; -} -ENDVERBATIM +: the Random idiom has been extended to support CoreNEURON. -PROCEDURE next_invl() { - if (number > 0) { - event = invl(interval) - } - if (ispike >= number) { - on = 0 - } -} +: For backward compatibility, noiseFromRandom(hocRandom) can still be used +: as well as the default low-quality scop_exprand generator. +: However, CoreNEURON will not accept usage of the low-quality generator, +: and, if noiseFromRandom is used to specify the random stream, that stream +: must be using the Random123 generator. -NET_RECEIVE (w) { - if (flag == 0) { : external event - if (w > 0 && on == 0) { : turn on spike sequence - : but not if a netsend is on the queue - init_sequence(t) - : randomize the first spike so on average it occurs at - : noise*interval (most likely interval is always 0) - next_invl() - event = event - interval*(1. - noise) - net_send(event, 1) - }else if (w < 0) { : turn off spiking definitively - on = 0 - } - } - if (flag == 3) { : from INITIAL - if (on == 1) { : but ignore if turned off by external event - init_sequence(t) - net_send(0, 1) - } - } - if (flag == 1 && on == 1) { - ispike = ispike + 1 - net_event(t) - next_invl() - if (on == 1) { - net_send(event, 1) - } - } -} +: The recommended idiom for specfication of the random stream is to use +: noiseFromRandom123(id1, id2[, id3]) -FUNCTION bbsavestate() { - bbsavestate = 0 - : limited to noiseFromRandom123 -VERBATIM -#if !NRNBBCORE - if (_ran_compat == 2) { - nrnran123_State** pv = (nrnran123_State**)(&_p_donotuse); - if (!*pv) { return 0.0; } - char which; - uint32_t seq; - double *xdir, *xval; - xdir = hoc_pgetarg(1); - if (*xdir == -1.) { *xdir = 2; return 0.0; } - xval = hoc_pgetarg(2); - if (*xdir == 0.) { - nrnran123_getseq(*pv, &seq, &which); - xval[0] = (double)seq; - xval[1] = (double)which; - } - if (*xdir == 1) { - nrnran123_setseq(*pv, (uint32_t)xval[0], (char)xval[1]); - } - } /* else do nothing */ -#endif -ENDVERBATIM -} +: If any instance uses noiseFromRandom123, then no instance can use noiseFromRandom +: and vice versa. COMMENT @@ -409,3 +166,39 @@ its sequence. ENDCOMMENT +PROCEDURE seed(x) { + random_setseq(ranvar, x) +} + +PROCEDURE noiseFromRandom() { +VERBATIM +#if !NRNBBCORE + { + if (ifarg(1)) { + Rand* r = nrn_random_arg(1); + uint32_t id[3]; + if (!nrn_random_isran123(r, &id[0], &id[1], &id[2])) { + hoc_execerr_ext("NetStim: Random.Random123 generator is required."); + } + nrnran123_setids(ranvar, id[0], id[1], id[2]); + char which; + nrn_random123_getseq(r, &id[0], &which); + nrnran123_setseq(ranvar, id[0], which); + } + } +#endif +ENDVERBATIM +} + +PROCEDURE noiseFromRandom123() { +VERBATIM +#if !NRNBBCORE + if (ifarg(3)) { + nrnran123_setids(ranvar, static_cast(*getarg(1)), static_cast(*getarg(2)), static_cast(*getarg(3))); + } else if (ifarg(2)) { + nrnran123_setids(ranvar, static_cast(*getarg(1)), static_cast(*getarg(2)), 0); + } + nrnran123_setseq(ranvar, 0, 0); +#endif +ENDVERBATIM +} diff --git a/src/coreneuron/mechanism/register_mech.cpp b/src/coreneuron/mechanism/register_mech.cpp index 6e3741043d..9853c1243a 100644 --- a/src/coreneuron/mechanism/register_mech.cpp +++ b/src/coreneuron/mechanism/register_mech.cpp @@ -234,6 +234,8 @@ void hoc_register_dparam_semantics(int type, int ix, const char* name) { memb_func[type].dparam_semantics[ix] = -9; } else if (strcmp(name, "fornetcon") == 0) { memb_func[type].dparam_semantics[ix] = -10; + } else if (strcmp(name, "random") == 0) { + memb_func[type].dparam_semantics[ix] = -11; } else { int i = name[0] == '#' ? 1 : 0; int etype = nrn_get_mechtype(name + i); diff --git a/src/coreneuron/mpi/core/nrnmpidec.cpp b/src/coreneuron/mpi/core/nrnmpidec.cpp deleted file mode 100644 index 25764a47a5..0000000000 --- a/src/coreneuron/mpi/core/nrnmpidec.cpp +++ /dev/null @@ -1,69 +0,0 @@ -/* -# ============================================================================= -# Copyright (c) 2016 - 2021 Blue Brain Project/EPFL -# -# See top-level LICENSE file for details. -# =============================================================================. -*/ - -#include "../nrnmpi.h" - -namespace coreneuron { - - -/* from nrnmpi.cpp */ -mpi_function nrnmpi_init{"nrnmpi_init_impl"}; -mpi_function nrnmpi_finalize{ - "nrnmpi_finalize_impl"}; -mpi_function - nrnmpi_check_threading_support{"nrnmpi_check_threading_support_impl"}; -mpi_function nrnmpi_write_file{ - "nrnmpi_write_file_impl"}; - -/* from mpispike.c */ -mpi_function nrnmpi_spike_exchange{ - "nrnmpi_spike_exchange_impl"}; -mpi_function - nrnmpi_spike_exchange_compressed{"nrnmpi_spike_exchange_compressed_impl"}; -mpi_function nrnmpi_int_allmax{ - "nrnmpi_int_allmax_impl"}; -mpi_function nrnmpi_int_allgather{ - "nrnmpi_int_allgather_impl"}; -mpi_function nrnmpi_int_alltoall{ - "nrnmpi_int_alltoall_impl"}; -mpi_function nrnmpi_int_alltoallv{ - "nrnmpi_int_alltoallv_impl"}; -mpi_function nrnmpi_dbl_alltoallv{ - "nrnmpi_dbl_alltoallv_impl"}; -mpi_function nrnmpi_dbl_allmin{ - "nrnmpi_dbl_allmin_impl"}; -mpi_function nrnmpi_dbl_allmax{ - "nrnmpi_dbl_allmax_impl"}; -mpi_function nrnmpi_barrier{ - "nrnmpi_barrier_impl"}; -mpi_function nrnmpi_dbl_allreduce{ - "nrnmpi_dbl_allreduce_impl"}; -mpi_function nrnmpi_dbl_allreduce_vec{ - "nrnmpi_dbl_allreduce_vec_impl"}; -mpi_function - nrnmpi_long_allreduce_vec{"nrnmpi_long_allreduce_vec_impl"}; -mpi_function nrnmpi_initialized{ - "nrnmpi_initialized_impl"}; -mpi_function nrnmpi_abort{"nrnmpi_abort_impl"}; -mpi_function nrnmpi_wtime{"nrnmpi_wtime_impl"}; -mpi_function nrnmpi_local_rank{ - "nrnmpi_local_rank_impl"}; -mpi_function nrnmpi_local_size{ - "nrnmpi_local_size_impl"}; -#if NRN_MULTISEND -mpi_function nrnmpi_multisend_comm{ - "nrnmpi_multisend_comm_impl"}; -mpi_function nrnmpi_multisend{ - "nrnmpi_multisend_impl"}; -mpi_function - nrnmpi_multisend_single_advance{"nrnmpi_multisend_single_advance_impl"}; -mpi_function - nrnmpi_multisend_conserve{"nrnmpi_multisend_conserve_impl"}; -#endif // NRN_MULTISEND - -} // namespace coreneuron diff --git a/src/coreneuron/mpi/core/resolve.cpp b/src/coreneuron/mpi/core/resolve.cpp index 877cd98ced..1c4d5874eb 100644 --- a/src/coreneuron/mpi/core/resolve.cpp +++ b/src/coreneuron/mpi/core/resolve.cpp @@ -4,21 +4,23 @@ namespace coreneuron { // Those functions are part of a mechanism to dynamically load mpi or not -void mpi_manager_t::resolve_symbols(void* handle) { - for (auto* ptr: m_function_ptrs) { +void mpi_manager_t::resolve_symbols(void* lib_handle) { + for (size_t i = 0; i < m_num_function_ptrs; ++i) { + auto* ptr = m_function_ptrs[i]; assert(!(*ptr)); - ptr->resolve(handle); + ptr->resolve(lib_handle); assert(*ptr); } } -void mpi_function_base::resolve(void* handle) { +void mpi_function_base::resolve(void* lib_handle) { dlerror(); - void* ptr = dlsym(handle, m_name); + void* ptr = dlsym(lib_handle, m_name); const char* error = dlerror(); if (error) { std::ostringstream oss; - oss << "Could not get symbol " << m_name << " from handle " << handle << ": " << error; + oss << "Could not get symbol '" << m_name << "' from handle '" << lib_handle + << "': " << error; throw std::runtime_error(oss.str()); } assert(ptr); diff --git a/src/coreneuron/mpi/lib/mpispike.cpp b/src/coreneuron/mpi/lib/mpispike.cpp index bbe81ac6c2..b7c1bcf1bf 100644 --- a/src/coreneuron/mpi/lib/mpispike.cpp +++ b/src/coreneuron/mpi/lib/mpispike.cpp @@ -289,46 +289,30 @@ void nrnmpi_barrier_impl() { MPI_Barrier(nrnmpi_comm); } -double nrnmpi_dbl_allreduce_impl(double x, int type) { - double result; - MPI_Op tt; +static MPI_Op type2OP(int type) { if (type == 1) { - tt = MPI_SUM; + return MPI_SUM; } else if (type == 2) { - tt = MPI_MAX; + return MPI_MAX; } else { - tt = MPI_MIN; + return MPI_MIN; } - MPI_Allreduce(&x, &result, 1, MPI_DOUBLE, tt, nrnmpi_comm); +} + +double nrnmpi_dbl_allreduce_impl(double x, int type) { + double result; + MPI_Allreduce(&x, &result, 1, MPI_DOUBLE, type2OP(type), nrnmpi_comm); return result; } void nrnmpi_dbl_allreduce_vec_impl(double* src, double* dest, int cnt, int type) { - MPI_Op tt; assert(src != dest); - if (type == 1) { - tt = MPI_SUM; - } else if (type == 2) { - tt = MPI_MAX; - } else { - tt = MPI_MIN; - } - MPI_Allreduce(src, dest, cnt, MPI_DOUBLE, tt, nrnmpi_comm); - return; + MPI_Allreduce(src, dest, cnt, MPI_DOUBLE, type2OP(type), nrnmpi_comm); } void nrnmpi_long_allreduce_vec_impl(long* src, long* dest, int cnt, int type) { - MPI_Op tt; assert(src != dest); - if (type == 1) { - tt = MPI_SUM; - } else if (type == 2) { - tt = MPI_MAX; - } else { - tt = MPI_MIN; - } - MPI_Allreduce(src, dest, cnt, MPI_LONG, tt, nrnmpi_comm); - return; + MPI_Allreduce(src, dest, cnt, MPI_LONG, type2OP(type), nrnmpi_comm); } #if NRN_MULTISEND diff --git a/src/coreneuron/mpi/nrnmpi.h b/src/coreneuron/mpi/nrnmpi.h index 03a1d24613..a011b2ee8b 100644 --- a/src/coreneuron/mpi/nrnmpi.h +++ b/src/coreneuron/mpi/nrnmpi.h @@ -8,10 +8,9 @@ #pragma once +#include #include -#include -#include -#include +#include #include "coreneuron/mpi/nrnmpiuse.h" @@ -25,9 +24,7 @@ struct NRNMPI_Spikebuf { int gid[nrn_spikebuf_size]; double spiketime[nrn_spikebuf_size]; }; -} // namespace coreneuron -namespace coreneuron { struct NRNMPI_Spike { int gid; double spiketime; @@ -39,13 +36,17 @@ struct mpi_function_base; struct mpi_manager_t { void register_function(mpi_function_base* ptr) { - m_function_ptrs.push_back(ptr); + if (m_num_function_ptrs == max_mpi_functions) { + throw std::runtime_error("mpi_manager_t::max_mpi_functions reached"); + } + m_function_ptrs[m_num_function_ptrs++] = ptr; } void resolve_symbols(void* dlsym_handle); private: - std::vector m_function_ptrs; - // true when symbols are resolved + constexpr static auto max_mpi_functions = 128; + std::size_t m_num_function_ptrs{}; + std::array m_function_ptrs{}; }; inline mpi_manager_t& mpi_manager() { @@ -68,28 +69,36 @@ struct mpi_function_base { const char* m_name; }; -// This could be done with a simpler -// template struct function : function_base { ... }; -// pattern in C++17... -template -struct mpi_function {}; - -#define cnrn_make_integral_constant_t(x) std::integral_constant, x> - -template -struct mpi_function>: mpi_function_base { +#ifdef NRNMPI_DYNAMICLOAD +template +struct mpi_function: mpi_function_base { using mpi_function_base::mpi_function_base; template // in principle deducible from `function_ptr` auto operator()(Args&&... args) const { -#ifdef CORENEURON_ENABLE_MPI_DYNAMIC // Dynamic MPI, m_fptr should have been initialised via dlsym. assert(m_fptr); - return (*reinterpret_cast(m_fptr))(std::forward(args)...); + return (*reinterpret_cast(m_fptr))(std::forward(args)...); + } +}; +#define declare_mpi_method(x) \ + inline mpi_function x { \ +#x "_impl" \ + } #else +template +struct mpi_function: mpi_function_base { + using mpi_function_base::mpi_function_base; + template // in principle deducible from `function_ptr` + auto operator()(Args&&... args) const { // No dynamic MPI, use `fptr` directly. Will produce link errors if libmpi.so is not linked. return (*fptr)(std::forward(args)...); -#endif } }; +#define declare_mpi_method(x) \ + inline mpi_function x { \ +#x "_impl" \ + } +#endif + } // namespace coreneuron #include "coreneuron/mpi/nrnmpidec.h" diff --git a/src/coreneuron/mpi/nrnmpidec.h b/src/coreneuron/mpi/nrnmpidec.h index 26aebc358d..840da62210 100644 --- a/src/coreneuron/mpi/nrnmpidec.h +++ b/src/coreneuron/mpi/nrnmpidec.h @@ -22,17 +22,16 @@ struct nrnmpi_init_ret_t { int myid; }; extern "C" nrnmpi_init_ret_t nrnmpi_init_impl(int* pargc, char*** pargv, bool is_quiet); -extern mpi_function nrnmpi_init; +declare_mpi_method(nrnmpi_init); extern "C" void nrnmpi_finalize_impl(void); -extern mpi_function nrnmpi_finalize; +declare_mpi_method(nrnmpi_finalize); extern "C" void nrnmpi_check_threading_support_impl(); -extern mpi_function - nrnmpi_check_threading_support; +declare_mpi_method(nrnmpi_check_threading_support); // Write given buffer to a new file using MPI collective I/O extern "C" void nrnmpi_write_file_impl(const std::string& filename, const char* buffer, size_t length); -extern mpi_function nrnmpi_write_file; +declare_mpi_method(nrnmpi_write_file); /* from mpispike.cpp */ @@ -44,8 +43,7 @@ extern "C" int nrnmpi_spike_exchange_impl(int* nin, int nout, NRNMPI_Spikebuf* spbufout, NRNMPI_Spikebuf* spbufin); -extern mpi_function - nrnmpi_spike_exchange; +declare_mpi_method(nrnmpi_spike_exchange); extern "C" int nrnmpi_spike_exchange_compressed_impl(int, unsigned char*&, int, @@ -55,64 +53,58 @@ extern "C" int nrnmpi_spike_exchange_compressed_impl(int, int, unsigned char*, int& ovfl); -extern mpi_function - nrnmpi_spike_exchange_compressed; +declare_mpi_method(nrnmpi_spike_exchange_compressed); extern "C" int nrnmpi_int_allmax_impl(int i); -extern mpi_function nrnmpi_int_allmax; +declare_mpi_method(nrnmpi_int_allmax); extern "C" void nrnmpi_int_allgather_impl(int* s, int* r, int n); -extern mpi_function nrnmpi_int_allgather; +declare_mpi_method(nrnmpi_int_allgather); extern "C" void nrnmpi_int_alltoall_impl(int* s, int* r, int n); -extern mpi_function nrnmpi_int_alltoall; +declare_mpi_method(nrnmpi_int_alltoall); extern "C" void nrnmpi_int_alltoallv_impl(const int* s, const int* scnt, const int* sdispl, int* r, int* rcnt, int* rdispl); -extern mpi_function nrnmpi_int_alltoallv; +declare_mpi_method(nrnmpi_int_alltoallv); extern "C" void nrnmpi_dbl_alltoallv_impl(double* s, int* scnt, int* sdispl, double* r, int* rcnt, int* rdispl); -extern mpi_function nrnmpi_dbl_alltoallv; +declare_mpi_method(nrnmpi_dbl_alltoallv); extern "C" double nrnmpi_dbl_allmin_impl(double x); -extern mpi_function nrnmpi_dbl_allmin; +declare_mpi_method(nrnmpi_dbl_allmin); extern "C" double nrnmpi_dbl_allmax_impl(double x); -extern mpi_function nrnmpi_dbl_allmax; +declare_mpi_method(nrnmpi_dbl_allmax); extern "C" void nrnmpi_barrier_impl(void); -extern mpi_function nrnmpi_barrier; +declare_mpi_method(nrnmpi_barrier); extern "C" double nrnmpi_dbl_allreduce_impl(double x, int type); -extern mpi_function nrnmpi_dbl_allreduce; +declare_mpi_method(nrnmpi_dbl_allreduce); extern "C" void nrnmpi_dbl_allreduce_vec_impl(double* src, double* dest, int cnt, int type); -extern mpi_function - nrnmpi_dbl_allreduce_vec; +declare_mpi_method(nrnmpi_dbl_allreduce_vec); extern "C" void nrnmpi_long_allreduce_vec_impl(long* src, long* dest, int cnt, int type); -extern mpi_function - nrnmpi_long_allreduce_vec; +declare_mpi_method(nrnmpi_long_allreduce_vec); extern "C" bool nrnmpi_initialized_impl(); -extern mpi_function nrnmpi_initialized; +declare_mpi_method(nrnmpi_initialized); extern "C" void nrnmpi_abort_impl(int); -extern mpi_function nrnmpi_abort; +declare_mpi_method(nrnmpi_abort); extern "C" double nrnmpi_wtime_impl(); -extern mpi_function nrnmpi_wtime; +declare_mpi_method(nrnmpi_wtime); extern "C" int nrnmpi_local_rank_impl(); -extern mpi_function nrnmpi_local_rank; +declare_mpi_method(nrnmpi_local_rank); extern "C" int nrnmpi_local_size_impl(); -extern mpi_function nrnmpi_local_size; +declare_mpi_method(nrnmpi_local_size); #if NRN_MULTISEND extern "C" void nrnmpi_multisend_comm_impl(); -extern mpi_function - nrnmpi_multisend_comm; +declare_mpi_method(nrnmpi_multisend_comm); extern "C" void nrnmpi_multisend_impl(NRNMPI_Spike* spk, int n, int* hosts); -extern mpi_function nrnmpi_multisend; +declare_mpi_method(nrnmpi_multisend); extern "C" int nrnmpi_multisend_single_advance_impl(NRNMPI_Spike* spk); -extern mpi_function - nrnmpi_multisend_single_advance; +declare_mpi_method(nrnmpi_multisend_single_advance); extern "C" int nrnmpi_multisend_conserve_impl(int nsend, int nrecv); -extern mpi_function - nrnmpi_multisend_conserve; +declare_mpi_method(nrnmpi_multisend_conserve); #endif } // namespace coreneuron diff --git a/src/coreneuron/mpi/nrnmpiuse.h b/src/coreneuron/mpi/nrnmpiuse.h index 7253f7d615..eec7609094 100644 --- a/src/coreneuron/mpi/nrnmpiuse.h +++ b/src/coreneuron/mpi/nrnmpiuse.h @@ -19,14 +19,8 @@ #define NRN_MULTISEND 1 #endif -/* define to 1 if you want parallel distributed cells (and gap junctions) */ -#define PARANEURON 1 - /* define to 1 if you want the MUSIC - MUlti SImulation Coordinator */ #undef NRN_MUSIC /* define to the dll path if you want to load automatically */ #undef DLL_DEFAULT_FNAME - -/* Number of times to retry a failed open */ -#undef FILE_OPEN_RETRY diff --git a/src/coreneuron/network/netcon.hpp b/src/coreneuron/network/netcon.hpp index 75441aa2aa..613a454f43 100644 --- a/src/coreneuron/network/netcon.hpp +++ b/src/coreneuron/network/netcon.hpp @@ -11,9 +11,6 @@ #include "coreneuron/mpi/nrnmpi.h" #undef check -#if MAC -#define NetCon nrniv_Dinfo -#endif namespace coreneuron { class PreSyn; class InputPreSyn; diff --git a/src/coreneuron/nrniv/nrniv_decl.h b/src/coreneuron/nrniv/nrniv_decl.h index 3cc20db667..00cdaef16d 100644 --- a/src/coreneuron/nrniv/nrniv_decl.h +++ b/src/coreneuron/nrniv/nrniv_decl.h @@ -48,6 +48,7 @@ extern void nrn_set_extra_thread0_vdata(void); extern Point_process* nrn_artcell_instantiate(const char* mechname); extern int nrnmpi_spike_compress(int nspike, bool gidcompress, int xchng); extern bool nrn_use_bin_queue_; +extern std::vector& nrn_mech_random_indices(int type); extern void nrn_outputevent(unsigned char, double); extern void ncs2nrn_integrate(double tstop); diff --git a/src/coreneuron/sim/scopmath/crout_thread.hpp b/src/coreneuron/sim/scopmath/crout_thread.hpp index 3ac9c66f2c..fec10fe9b7 100644 --- a/src/coreneuron/sim/scopmath/crout_thread.hpp +++ b/src/coreneuron/sim/scopmath/crout_thread.hpp @@ -18,8 +18,8 @@ namespace coreneuron { #error "naming clash on crout_thread.hpp-internal macros" #endif #define scopmath_crout_b(arg) b[scopmath_crout_ix(arg)] -#define scopmath_crout_ix(arg) ((arg) *_STRIDE) -#define scopmath_crout_y(arg) _p[y[arg] * _STRIDE] +#define scopmath_crout_ix(arg) CNRN_FLAT_INDEX_IML_ROW(arg) +#define scopmath_crout_y(arg) _p[CNRN_FLAT_INDEX_IML_ROW(y[arg])] /** * Performs an LU triangular factorization of a real matrix by the Crout diff --git a/src/coreneuron/sim/scopmath/newton_thread.hpp b/src/coreneuron/sim/scopmath/newton_thread.hpp index dc70b643ae..44076c986a 100644 --- a/src/coreneuron/sim/scopmath/newton_thread.hpp +++ b/src/coreneuron/sim/scopmath/newton_thread.hpp @@ -21,9 +21,9 @@ namespace coreneuron { #if defined(scopmath_newton_ix) || defined(scopmath_newton_s) || defined(scopmath_newton_x) #error "naming clash on newton_thread.hpp-internal macros" #endif -#define scopmath_newton_ix(arg) ((arg) *_STRIDE) -#define scopmath_newton_s(arg) _p[s[arg] * _STRIDE] -#define scopmath_newton_x(arg) _p[(arg) *_STRIDE] +#define scopmath_newton_ix(arg) CNRN_FLAT_INDEX_IML_ROW(arg) +#define scopmath_newton_s(arg) _p[CNRN_FLAT_INDEX_IML_ROW(s[arg])] +#define scopmath_newton_x(arg) _p[CNRN_FLAT_INDEX_IML_ROW(arg)] namespace detail { /** * @brief Calculate the Jacobian matrix using finite central differences. diff --git a/src/coreneuron/sim/scopmath/sparse_thread.hpp b/src/coreneuron/sim/scopmath/sparse_thread.hpp index 8d84cbb0e8..71978197fe 100644 --- a/src/coreneuron/sim/scopmath/sparse_thread.hpp +++ b/src/coreneuron/sim/scopmath/sparse_thread.hpp @@ -436,7 +436,7 @@ inline void init_coef_list(SparseObj* so, int _iml) { defined(scopmath_sparse_x) #error "naming clash on sparse_thread.hpp-internal macros" #endif -#define scopmath_sparse_ix(arg) ((arg) *_STRIDE) +#define scopmath_sparse_ix(arg) CNRN_FLAT_INDEX_IML_ROW(arg) inline void subrow(SparseObj* so, Elm* pivot, Elm* rowsub, int _iml) { unsigned int const _cntml_padded{so->_cntml_padded}; double const r{rowsub->value[_iml] / pivot->value[_iml]}; @@ -602,7 +602,7 @@ int sparse_thread(SparseObj* so, #undef scopmath_sparse_d #undef scopmath_sparse_ix #undef scopmath_sparse_s -#define scopmath_sparse_x(arg) _p[x[arg] * _STRIDE] +#define scopmath_sparse_x(arg) _p[CNRN_FLAT_INDEX_IML_ROW(x[arg])] /* for solving ax=b */ template int _cvode_sparse_thread(void** vpr, int n, int* x, SPFUN fun, _threadargsproto_) { diff --git a/src/coreneuron/sim/scopmath/ssimplic_thread.hpp b/src/coreneuron/sim/scopmath/ssimplic_thread.hpp index cd341a6274..4b31dc4706 100644 --- a/src/coreneuron/sim/scopmath/ssimplic_thread.hpp +++ b/src/coreneuron/sim/scopmath/ssimplic_thread.hpp @@ -13,7 +13,7 @@ namespace coreneuron { #if defined(scopmath_ssimplic_s) #error "naming clash on ssimplic_thread.hpp-internal macros" #endif -#define scopmath_ssimplic_s(arg) _p[s[arg] * _STRIDE] +#define scopmath_ssimplic_s(arg) _p[CNRN_FLAT_INDEX_IML_ROW(s[arg])] static int check_state(int n, int* s, _threadargsproto_) { bool flag{true}; for (int i = 0; i < n; i++) { diff --git a/src/coreneuron/utils/memory.h b/src/coreneuron/utils/memory.h index 4f388c58dc..81a4ce3006 100644 --- a/src/coreneuron/utils/memory.h +++ b/src/coreneuron/utils/memory.h @@ -10,6 +10,7 @@ #include #include +#include #include #include "coreneuron/utils/nrn_assert.h" diff --git a/src/coreneuron/utils/nrnoc_aux.cpp b/src/coreneuron/utils/nrnoc_aux.cpp index 0ff43f3d2d..6d51dbe1f6 100644 --- a/src/coreneuron/utils/nrnoc_aux.cpp +++ b/src/coreneuron/utils/nrnoc_aux.cpp @@ -21,7 +21,7 @@ int v_structure_change; int diam_changed; #define MAXERRCOUNT 5 int hoc_errno_count; -const char* bbcore_write_version = "1.6"; // Allow multiple gid and PreSyn per real cell. +const char* bbcore_write_version = "1.7"; // NMODLRandom char* pnt_name(Point_process* pnt) { return corenrn.get_memb_func(pnt->_type).sym; diff --git a/src/coreneuron/utils/profile/profiler_interface.h b/src/coreneuron/utils/profile/profiler_interface.h index 2c68a0ae18..f4bdefcadc 100644 --- a/src/coreneuron/utils/profile/profiler_interface.h +++ b/src/coreneuron/utils/profile/profiler_interface.h @@ -11,7 +11,7 @@ #include #include -#if defined(CORENEURON_CALIPER) +#if defined(NRN_CALIPER) #include #endif @@ -141,7 +141,7 @@ struct Instrumentor { #pragma clang diagnostic pop }; -#if defined(CORENEURON_CALIPER) +#if defined(NRN_CALIPER) struct Caliper { inline static void phase_begin(const char* name) { @@ -267,7 +267,7 @@ struct NullInstrumentor { }; using InstrumentorImpl = detail::Instrumentor< -#if defined CORENEURON_CALIPER +#if defined NRN_CALIPER detail::Caliper, #endif #ifdef CORENEURON_CUDA_PROFILING diff --git a/src/coreneuron/utils/randoms/nrnran123.cpp b/src/coreneuron/utils/randoms/nrnran123.cpp index a618312a34..2f1b12cb3d 100644 --- a/src/coreneuron/utils/randoms/nrnran123.cpp +++ b/src/coreneuron/utils/randoms/nrnran123.cpp @@ -88,7 +88,7 @@ namespace random123_global { #else #define g_k_qualifiers #endif -g_k_qualifiers philox4x32_key_t g_k{}; +g_k_qualifiers philox4x32_key_t g_k{{0, 0}}; // Cannot refer to g_k directly from a nrn_pragma_acc(routine seq) method like // coreneuron_random123_philox4x32_helper, and cannot have this inlined there at diff --git a/src/coreneuron/utils/randoms/nrnran123.h b/src/coreneuron/utils/randoms/nrnran123.h index d4108612d0..efd4760691 100644 --- a/src/coreneuron/utils/randoms/nrnran123.h +++ b/src/coreneuron/utils/randoms/nrnran123.h @@ -41,10 +41,7 @@ of the full distribution available from #include -// Some files are compiled with DISABLE_OPENACC, and some builds have no GPU -// support at all. In these two cases, request that the random123 state is -// allocated using new/delete instead of CUDA unified memory. -#if defined(CORENEURON_ENABLE_GPU) && !defined(DISABLE_OPENACC) +#if defined(CORENEURON_ENABLE_GPU) #define CORENRN_RAN123_USE_UNIFIED_MEMORY true #else #define CORENRN_RAN123_USE_UNIFIED_MEMORY false @@ -137,6 +134,15 @@ inline double nrnran123_dblpick(nrnran123_State* s) { return nrnran123_uint2dbl(nrnran123_ipick(s)); } +// same as dblpick +inline double nrnran123_uniform(nrnran123_State* s) { + return nrnran123_uint2dbl(nrnran123_ipick(s)); +} + +inline double nrnran123_uniform(nrnran123_State* s, double low, double high) { + return low + nrnran123_uint2dbl(nrnran123_ipick(s)) * (high - low); +} + /* this could be called from openacc parallel construct (in INITIAL block) */ inline void nrnran123_setseq(nrnran123_State* s, uint32_t seq, char which) { if (which > 3) { @@ -148,6 +154,22 @@ inline void nrnran123_setseq(nrnran123_State* s, uint32_t seq, char which) { s->r = coreneuron_random123_philox4x32_helper(s); } +/* this could be called from openacc parallel construct (in INITIAL block) */ +inline void nrnran123_setseq(nrnran123_State* s, double seq34) { + if (seq34 < 0.0) { + seq34 = 0.0; + } + if (seq34 > double(0XffffffffffLL)) { + seq34 = 0.0; + } + + // at least 64 bits even on 32 bit machine (could be more) + unsigned long long x = ((unsigned long long) seq34) & 0X3ffffffffLL; + char which = x & 0X3; + uint32_t seq = x >> 2; + nrnran123_setseq(s, seq, which); +} + // nrnran123_negexp min value is 2.3283064e-10, max is 22.18071, mean 1.0 inline double nrnran123_negexp(nrnran123_State* s) { return -std::log(nrnran123_dblpick(s)); diff --git a/src/coreneuron/utils/units.hpp b/src/coreneuron/utils/units.hpp index de44343fe6..0e73d38dc0 100644 --- a/src/coreneuron/utils/units.hpp +++ b/src/coreneuron/utils/units.hpp @@ -8,14 +8,10 @@ #pragma once namespace coreneuron { namespace units { -#if CORENEURON_USE_LEGACY_UNITS == 1 -constexpr double faraday{96485.309}; -constexpr double gasconstant{8.3134}; -#else /* NMODL translated MOD files get unit constants typically from - * share/lib/nrnunits.lib.in. But there were other source files that hardcode + * share/lib/nrnunits.lib. But there were other source files that hardcode * some of the constants. Here we gather a few modern units into a single place - * (but, unfortunately, also in nrnunits.lib.in). Legacy units cannot be + * (but, unfortunately, also in nrnunits.lib). Legacy units cannot be * gathered here because they can differ slightly from place to place. * * These come from https://physics.nist.gov/cuu/Constants/index.html. @@ -33,6 +29,5 @@ constexpr double faraday{detail::electron_charge * detail::avogadro_number}; // // coulomb/mol constexpr double gasconstant{detail::boltzmann * detail::avogadro_number}; // 8.314462618... // joule/mol-K -#endif } // namespace units } // namespace coreneuron diff --git a/src/gnu/ACG.cpp b/src/gnu/ACG.cpp index ecba08420b..0f9136b347 100755 --- a/src/gnu/ACG.cpp +++ b/src/gnu/ACG.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> // This may look like C code, but it is really -*- C++ -*- /* Copyright (C) 1989 Free Software Foundation @@ -20,7 +19,7 @@ Foundation, 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. #ifdef __GNUG__ #pragma implementation #endif -#include +#include "ACG.h" #include // diff --git a/src/gnu/ACG.h b/src/gnu/ACG.h index 8cee68fafa..63230e0031 100755 --- a/src/gnu/ACG.h +++ b/src/gnu/ACG.h @@ -18,7 +18,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifndef _ACG_h #define _ACG_h 1 -#include +#include "RNG.h" #include #ifdef __GNUG__ //#pragma interface diff --git a/src/gnu/Binomial.cpp b/src/gnu/Binomial.cpp index 3794adc75a..4def0ff429 100755 --- a/src/gnu/Binomial.cpp +++ b/src/gnu/Binomial.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Binomial.h" double Binomial::operator()() { diff --git a/src/gnu/Binomial.h b/src/gnu/Binomial.h index 49d9c9170f..f787477ed8 100755 --- a/src/gnu/Binomial.h +++ b/src/gnu/Binomial.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Binomial_h 1 -#include +#include "Random.h" class Binomial: public Random { protected: diff --git a/src/gnu/CMakeLists.txt b/src/gnu/CMakeLists.txt new file mode 100644 index 0000000000..e16ae608b9 --- /dev/null +++ b/src/gnu/CMakeLists.txt @@ -0,0 +1,28 @@ +add_library( + nrngnu STATIC + ACG.cpp + Binomial.cpp + DiscUnif.cpp + Erlang.cpp + Geom.cpp + HypGeom.cpp + isaac64.cpp + Isaac64RNG.cpp + LogNorm.cpp + MCellRan4RNG.cpp + mcran4.cpp + MLCG.cpp + NegExp.cpp + Normal.cpp + nrnisaac.cpp + nrnran123.cpp + Poisson.cpp + Rand.cpp + Random.cpp + RndInt.cpp + RNG.cpp + Uniform.cpp + Weibull.cpp) +set_property(TARGET nrngnu PROPERTY POSITION_INDEPENDENT_CODE ON) +target_include_directories(nrngnu SYSTEM PRIVATE "${PROJECT_SOURCE_DIR}/external/Random123/include") +target_compile_definitions(nrngnu PRIVATE ${NRN_R123_COMPILE_DEFS}) diff --git a/src/gnu/DiscUnif.cpp b/src/gnu/DiscUnif.cpp index 72a9df8222..b41d1ca9ec 100755 --- a/src/gnu/DiscUnif.cpp +++ b/src/gnu/DiscUnif.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "DiscUnif.h" double DiscreteUniform::operator()() { diff --git a/src/gnu/DiscUnif.h b/src/gnu/DiscUnif.h index 5fa98eb4a9..9f1bafc63c 100755 --- a/src/gnu/DiscUnif.h +++ b/src/gnu/DiscUnif.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _DiscreteUniform_h 1 -#include +#include "Random.h" // // The interval [lo..hi) diff --git a/src/gnu/Erlang.cpp b/src/gnu/Erlang.cpp index f9638df073..ce03930f35 100755 --- a/src/gnu/Erlang.cpp +++ b/src/gnu/Erlang.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Erlang.h" double Erlang::operator()() { diff --git a/src/gnu/Erlang.h b/src/gnu/Erlang.h index d6c8ccd5e3..f000d3a568 100755 --- a/src/gnu/Erlang.h +++ b/src/gnu/Erlang.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Erlang_h 1 -#include +#include "Random.h" class Erlang: public Random { protected: diff --git a/src/gnu/Geom.cpp b/src/gnu/Geom.cpp index d2883f5469..6f90799e3b 100755 --- a/src/gnu/Geom.cpp +++ b/src/gnu/Geom.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Geom.h" double Geometric::operator()() { diff --git a/src/gnu/Geom.h b/src/gnu/Geom.h index 231744fe41..5f91085f51 100755 --- a/src/gnu/Geom.h +++ b/src/gnu/Geom.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Geometric_h -#include +#include "Random.h" class Geometric: public Random { protected: diff --git a/src/gnu/HypGeom.cpp b/src/gnu/HypGeom.cpp index 9dc1ba5d2b..b3d0288f50 100755 --- a/src/gnu/HypGeom.cpp +++ b/src/gnu/HypGeom.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation @@ -19,8 +18,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "HypGeom.h" double HyperGeometric::operator()() { diff --git a/src/gnu/HypGeom.h b/src/gnu/HypGeom.h index ea99afe70b..cc91112e4b 100755 --- a/src/gnu/HypGeom.h +++ b/src/gnu/HypGeom.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _HyperGeometric_h -#include +#include "Random.h" class HyperGeometric: public Random { protected: diff --git a/src/gnu/Isaac64RNG.cpp b/src/gnu/Isaac64RNG.cpp new file mode 100644 index 0000000000..559c307d4d --- /dev/null +++ b/src/gnu/Isaac64RNG.cpp @@ -0,0 +1,20 @@ +#include "Isaac64RNG.hpp" + +uint32_t Isaac64::cnt_ = 0; + +Isaac64::Isaac64(std::uint32_t seed) { + if (cnt_ == 0) { + cnt_ = 0xffffffff; + } + --cnt_; + seed_ = seed; + if (seed_ == 0) { + seed_ = cnt_; + } + rng_ = nrnisaac_new(); + reset(); +} + +Isaac64::~Isaac64() { + nrnisaac_delete(rng_); +} diff --git a/src/gnu/Isaac64RNG.hpp b/src/gnu/Isaac64RNG.hpp new file mode 100644 index 0000000000..c825c741ca --- /dev/null +++ b/src/gnu/Isaac64RNG.hpp @@ -0,0 +1,33 @@ +#pragma once + +#include + +#include "RNG.h" +#include "nrnisaac.h" + +class Isaac64: public RNG { + public: + Isaac64(std::uint32_t seed = 0); + ~Isaac64(); + std::uint32_t asLong() { + return nrnisaac_uint32_pick(rng_); + } + void reset() { + nrnisaac_init(rng_, seed_); + } + double asDouble() { + return nrnisaac_dbl_pick(rng_); + } + std::uint32_t seed() { + return seed_; + } + void seed(std::uint32_t s) { + seed_ = s; + reset(); + } + + private: + std::uint32_t seed_; + void* rng_; + static std::uint32_t cnt_; +}; diff --git a/src/gnu/LogNorm.cpp b/src/gnu/LogNorm.cpp index 14f75f3fca..89d457a7e9 100755 --- a/src/gnu/LogNorm.cpp +++ b/src/gnu/LogNorm.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,10 +17,10 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Normal.h" -#include +#include "LogNorm.h" #ifndef M_E #define M_E 2.71828182845904523536 diff --git a/src/gnu/LogNorm.h b/src/gnu/LogNorm.h index ccc94ce4be..9218382617 100755 --- a/src/gnu/LogNorm.h +++ b/src/gnu/LogNorm.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _LogNormal_h -#include +#include "Normal.h" class LogNormal: public Normal { protected: diff --git a/src/gnu/MCellRan4RNG.cpp b/src/gnu/MCellRan4RNG.cpp new file mode 100644 index 0000000000..43b732cbe1 --- /dev/null +++ b/src/gnu/MCellRan4RNG.cpp @@ -0,0 +1,15 @@ +#include "MCellRan4RNG.hpp" + +MCellRan4::MCellRan4(std::uint32_t ihigh, std::uint32_t ilow) { + ++cnt_; + ilow_ = ilow; + ihigh_ = ihigh; + if (ihigh_ == 0) { + ihigh_ = cnt_; + ihigh_ = (std::uint32_t) asLong(); + } + orig_ = ihigh_; +} +MCellRan4::~MCellRan4() {} + +std::uint32_t MCellRan4::cnt_ = 0; diff --git a/src/gnu/MCellRan4RNG.hpp b/src/gnu/MCellRan4RNG.hpp new file mode 100644 index 0000000000..a4356562a9 --- /dev/null +++ b/src/gnu/MCellRan4RNG.hpp @@ -0,0 +1,34 @@ +#pragma once + +#include + +#include "RNG.h" +#include "mcran4.h" + +// The decision that has to be made is whether each generator instance +// should have its own seed or only one seed for all. We choose separate +// seed for each but if arg not present or 0 then seed chosen by system. + +// the addition of ilow > 0 means that value is used for the lowindex +// instead of the mcell_ran4_init global 32 bit lowindex. + +class MCellRan4: public RNG { + public: + MCellRan4(std::uint32_t ihigh = 0, std::uint32_t ilow = 0); + virtual ~MCellRan4(); + virtual std::uint32_t asLong() { + return (std::uint32_t) (ilow_ == 0 ? mcell_iran4(&ihigh_) : nrnRan4int(&ihigh_, ilow_)); + } + virtual void reset() { + ihigh_ = orig_; + } + virtual double asDouble() { + return (ilow_ == 0 ? mcell_ran4a(&ihigh_) : nrnRan4dbl(&ihigh_, ilow_)); + } + std::uint32_t ihigh_; + std::uint32_t orig_; + std::uint32_t ilow_; + + private: + static std::uint32_t cnt_; +}; diff --git a/src/gnu/MLCG.cpp b/src/gnu/MLCG.cpp index aa8b10f4b8..dce8358a43 100755 --- a/src/gnu/MLCG.cpp +++ b/src/gnu/MLCG.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> // This may look like C code, but it is really -*- C++ -*- /* Copyright (C) 1989 Free Software Foundation @@ -18,7 +17,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include +#include "MLCG.h" // // SEED_TABLE_SIZE must be a power of 2 // diff --git a/src/gnu/MLCG.h b/src/gnu/MLCG.h index e0ff5c4b62..2b566acb91 100755 --- a/src/gnu/MLCG.h +++ b/src/gnu/MLCG.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. //#pragma interface #endif -#include +#include "RNG.h" #include // diff --git a/src/gnu/NegExp.cpp b/src/gnu/NegExp.cpp index caf5afc913..99ad65cde0 100755 --- a/src/gnu/NegExp.cpp +++ b/src/gnu/NegExp.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "NegExp.h" double NegativeExpntl::operator()() { diff --git a/src/gnu/NegExp.h b/src/gnu/NegExp.h index 1be7d68e8b..1b8a48b032 100755 --- a/src/gnu/NegExp.h +++ b/src/gnu/NegExp.h @@ -27,7 +27,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. // // -#include +#include "Random.h" class NegativeExpntl: public Random { protected: diff --git a/src/gnu/Normal.cpp b/src/gnu/Normal.cpp index 25aab78752..11ef2e21be 100755 --- a/src/gnu/Normal.cpp +++ b/src/gnu/Normal.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Normal.h" // // See Simulation, Modelling & Analysis by Law & Kelton, pp259 // diff --git a/src/gnu/Normal.h b/src/gnu/Normal.h index f96ed6dc81..fbbc8716be 100755 --- a/src/gnu/Normal.h +++ b/src/gnu/Normal.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Normal_h -#include +#include "Random.h" class Normal: public Random { char haveCachedNormal; diff --git a/src/gnu/NrnRandom123RNG.cpp b/src/gnu/NrnRandom123RNG.cpp new file mode 100644 index 0000000000..e69de29bb2 diff --git a/src/gnu/NrnRandom123RNG.hpp b/src/gnu/NrnRandom123RNG.hpp new file mode 100644 index 0000000000..5e790c939c --- /dev/null +++ b/src/gnu/NrnRandom123RNG.hpp @@ -0,0 +1,28 @@ +#pragma once + +#include + +#include "nrnran123.h" +#include "RNG.h" + +class NrnRandom123: public RNG { + public: + NrnRandom123(std::uint32_t id1, std::uint32_t id2, std::uint32_t id3 = 0); + ~NrnRandom123(); + std::uint32_t asLong() { + return nrnran123_ipick(s_); + } + double asDouble() { + return nrnran123_dblpick(s_); + } + void reset() { + nrnran123_setseq(s_, 0, 0); + } + nrnran123_State* s_; +}; +NrnRandom123::NrnRandom123(std::uint32_t id1, std::uint32_t id2, std::uint32_t id3) { + s_ = nrnran123_newstream3(id1, id2, id3); +} +NrnRandom123::~NrnRandom123() { + nrnran123_deletestream(s_); +} diff --git a/src/gnu/Poisson.cpp b/src/gnu/Poisson.cpp index 77a3b96cdf..b032ebd12f 100755 --- a/src/gnu/Poisson.cpp +++ b/src/gnu/Poisson.cpp @@ -1,5 +1,3 @@ -#include <../../nrnconf.h> - /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -19,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Poisson.h" double Poisson::operator()() { diff --git a/src/gnu/Poisson.h b/src/gnu/Poisson.h index 4bc7054353..4aad14d158 100755 --- a/src/gnu/Poisson.h +++ b/src/gnu/Poisson.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Poisson_h -#include +#include "Random.h" class Poisson: public Random { protected: diff --git a/src/gnu/RNG.cpp b/src/gnu/RNG.cpp index 95dd5b3b39..40c3057364 100755 --- a/src/gnu/RNG.cpp +++ b/src/gnu/RNG.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> // This may look like C code, but it is really -*- C++ -*- /* Copyright (C) 1989 Free Software Foundation @@ -19,7 +18,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #pragma implementation #endif #include -#include +#include "RNG.h" // These two static fields get initialized by RNG::RNG(). PrivateRNGSingleType RNG::singleMantissa; diff --git a/src/gnu/RNG.h b/src/gnu/RNG.h index 455aeeff5d..578be4bd85 100755 --- a/src/gnu/RNG.h +++ b/src/gnu/RNG.h @@ -26,18 +26,9 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. * int32_t and uint32_t have been defined by the configure procedure. Just * use these in place of the ones that libg++ used to provide. */ -#if defined(HAVE_INTTYPES_H) -#include -#elif defined(HAVE_SYS_TYPES_H) -#include -#elif defined(HAVE__G_CONFIG_H) -#include <_G_config.h> -typedef _G_int32_t int32_t; -typedef _G_uint32_t uint32_t; -#endif - -#include -#include +#include +#include +#include union PrivateRNGSingleType { // used to access floats as unsigneds float s; diff --git a/src/gnu/Rand.cpp b/src/gnu/Rand.cpp new file mode 100644 index 0000000000..a3b526de5e --- /dev/null +++ b/src/gnu/Rand.cpp @@ -0,0 +1,19 @@ +#include "Rand.hpp" + +#include "ACG.h" +#include "Normal.h" + +Rand::Rand(unsigned long seed, int size, Object* obj) { + // printf("Rand\n"); + gen = new ACG(seed, size); + rand = new Normal(0., 1., gen); + type_ = 0; + obj_ = obj; +} + +Rand::~Rand() { + // printf("~Rand\n"); + delete gen; + delete rand; +} + diff --git a/src/ivoc/random1.h b/src/gnu/Rand.hpp similarity index 63% rename from src/ivoc/random1.h rename to src/gnu/Rand.hpp index 8fedd8f5b0..f5b47a6a61 100644 --- a/src/ivoc/random1.h +++ b/src/gnu/Rand.hpp @@ -1,14 +1,20 @@ -#ifndef random1_h -#define random1_h +#pragma once #include "RNG.h" #include "Random.h" struct Object; +/* type_: + * 0: ACG + * 1: MLCG + * 2: MCellRan4 + * 3: Isaac64 + * 4: Random123 + */ class Rand { public: - Rand(unsigned long seed = 0, int size = 55, Object* obj = NULL); + Rand(unsigned long seed = 0, int size = 55, Object* obj = nullptr); ~Rand(); RNG* gen; Random* rand; @@ -16,5 +22,3 @@ class Rand { // double* looks like random variable that gets changed on every fadvance Object* obj_; }; - -#endif diff --git a/src/gnu/Random.cpp b/src/gnu/Random.cpp index d0d52f1aff..1e12a096cb 100755 --- a/src/gnu/Random.cpp +++ b/src/gnu/Random.cpp @@ -1,5 +1,4 @@ -#include <../../nrnconf.h> #ifdef __GNUG__ #pragma implementation #endif -#include +#include "Random.h" diff --git a/src/gnu/Random.h b/src/gnu/Random.h index a4b9b5c5ec..fb6f7d067f 100755 --- a/src/gnu/Random.h +++ b/src/gnu/Random.h @@ -23,11 +23,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #include -#if MAC - #define Random gnu_Random -#endif - -#include +#include "RNG.h" class Random { protected: diff --git a/src/gnu/RndInt.cpp b/src/gnu/RndInt.cpp index 18d1c1b38b..9bdb39be72 100755 --- a/src/gnu/RndInt.cpp +++ b/src/gnu/RndInt.cpp @@ -1,5 +1,4 @@ -#include <../../nrnconf.h> #ifdef __GNUG__ #pragma implementation #endif -#include +#include "RndInt.h" diff --git a/src/gnu/RndInt.h b/src/gnu/RndInt.h index 97251ff666..c54b44de0e 100755 --- a/src/gnu/RndInt.h +++ b/src/gnu/RndInt.h @@ -44,7 +44,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. // override stored values #include -#include +#include "RNG.h" class RandomInteger { diff --git a/src/gnu/Uniform.cpp b/src/gnu/Uniform.cpp index ee76bce881..ea41b9610d 100755 --- a/src/gnu/Uniform.cpp +++ b/src/gnu/Uniform.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Uniform.h" double Uniform::operator()() { diff --git a/src/gnu/Uniform.h b/src/gnu/Uniform.h index c9f3ec63c9..b6a0016a09 100755 --- a/src/gnu/Uniform.h +++ b/src/gnu/Uniform.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Uniform_h 1 -#include +#include "Random.h" // // The interval [lo..hi] diff --git a/src/gnu/Weibull.cpp b/src/gnu/Weibull.cpp index 8d1f6b4f74..1c408ac64d 100755 --- a/src/gnu/Weibull.cpp +++ b/src/gnu/Weibull.cpp @@ -1,4 +1,3 @@ -#include <../../nrnconf.h> /* Copyright (C) 1988 Free Software Foundation written by Dirk Grunwald (grunwald@cs.uiuc.edu) @@ -18,8 +17,8 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #ifdef __GNUG__ #pragma implementation #endif -#include -#include +#include "Random.h" +#include "Weibull.h" // // See Simulation, Modelling & Analysis by Law & Kelton, pp259 diff --git a/src/gnu/Weibull.h b/src/gnu/Weibull.h index 1cc8fd2b13..48ad831a56 100755 --- a/src/gnu/Weibull.h +++ b/src/gnu/Weibull.h @@ -21,7 +21,7 @@ Foundation, 675 Mass Ave, Cambridge, MA 02139, USA. #endif #define _Weibull_h -#include +#include "Random.h" class Weibull: public Random { protected: diff --git a/src/oc/isaac64.cpp b/src/gnu/isaac64.cpp similarity index 98% rename from src/oc/isaac64.cpp rename to src/gnu/isaac64.cpp index 579f52d4f0..b0c6c40317 100644 --- a/src/oc/isaac64.cpp +++ b/src/gnu/isaac64.cpp @@ -1,8 +1,3 @@ -#include <../../nrnconf.h> -#if HAVE_SYS_TYPES_H -#include -#endif - /* ------------------------------------------------------------------------------ isaac64.cpp: A Fast cryptographic random number generator diff --git a/src/oc/isaac64.h b/src/gnu/isaac64.h similarity index 97% rename from src/oc/isaac64.h rename to src/gnu/isaac64.h index d41a6f5edc..4af3b7586e 100644 --- a/src/oc/isaac64.h +++ b/src/gnu/isaac64.h @@ -1,3 +1,5 @@ +#pragma once + /* ------------------------------------------------------------------------------ isaac64.h: Definitions for a fast cryptographic random number generator @@ -12,13 +14,8 @@ Jenkins, R.J. (1996) ISAAC, in Fast Software Encryption, vol. 1039, ------------------------------------------------------------------------------ */ -#ifndef ISAAC64_H -#define ISAAC64_H -#include -#if defined(HAVE_STDINT_H) #include -#endif #define RANDSIZL (4) /* I recommend 8 for crypto, 4 for simulations */ #define RANDSIZ (1 << RANDSIZL) @@ -89,5 +86,3 @@ Macros to get individual random numbers rng->randcnt = RANDMAX - 2, \ DBL64 * (*((ub8*) (((ub4*) (rng->randrsl)) + rng->randcnt))))) - -#endif /* ISAAC64_H */ diff --git a/src/oc/mcran4.cpp b/src/gnu/mcran4.cpp similarity index 86% rename from src/oc/mcran4.cpp rename to src/gnu/mcran4.cpp index 1b303164d6..a9553687f6 100644 --- a/src/oc/mcran4.cpp +++ b/src/gnu/mcran4.cpp @@ -39,17 +39,19 @@ contained the header: Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA */ -#include <../../nrnconf.h> #include #include #include #include #include -#include -#include "hocdec.h" +#include "mcran4.h" static uint32_t lowindex = 0; +double mcell_lowindex() { + return static_cast(lowindex); +} + void mcell_ran4_init(uint32_t low) { lowindex = low; } @@ -70,40 +72,6 @@ uint32_t mcell_iran4(uint32_t* high) { return nrnRan4int(high, lowindex); } -/* Hoc interface */ -extern double chkarg(); -extern int use_mcell_ran4_; - - -void hoc_mcran4() { - uint32_t idx; - double* xidx; - double x; - xidx = hoc_pgetarg(1); - idx = (uint32_t) (*xidx); - x = mcell_ran4a(&idx); - *xidx = idx; - hoc_ret(); - hoc_pushx(x); -} -void hoc_mcran4init() { - double prev = (double) lowindex; - if (ifarg(1)) { - uint32_t idx = (uint32_t) chkarg(1, 0., 4294967295.); - mcell_ran4_init(idx); - } - hoc_ret(); - hoc_pushx(prev); -} -void hoc_usemcran4() { - double prev = (double) use_mcell_ran4_; - if (ifarg(1)) { - use_mcell_ran4_ = (int) chkarg(1, 0., 1.); - } - hoc_ret(); - hoc_pushx(prev); -} - uint32_t nrnRan4int(uint32_t* idx1, uint32_t idx2) { uint32_t u, v, w, m, n; /* 64-bit hash */ diff --git a/src/oc/mcran4.h b/src/gnu/mcran4.h similarity index 98% rename from src/oc/mcran4.h rename to src/gnu/mcran4.h index 7d251b4cc1..9d7ff79dd9 100644 --- a/src/oc/mcran4.h +++ b/src/gnu/mcran4.h @@ -1,6 +1,7 @@ #pragma once #include +double mcell_lowindex(); void mcell_ran4_init(uint32_t); double mcell_ran4(uint32_t* idx1, double* x, unsigned int n, double range); double mcell_ran4a(uint32_t* idx1); diff --git a/src/gnu/nrnisaac.cpp b/src/gnu/nrnisaac.cpp new file mode 100644 index 0000000000..f0549a4d9a --- /dev/null +++ b/src/gnu/nrnisaac.cpp @@ -0,0 +1,25 @@ +#include +#include "nrnisaac.h" +#include "isaac64.h" + +using RNG = struct isaac64_state; + +void* nrnisaac_new() { + return new RNG; +} + +void nrnisaac_delete(void* v) { + delete static_cast(v); +} + +void nrnisaac_init(void* v, unsigned long int seed) { + isaac64_init(static_cast(v), seed); +} + +double nrnisaac_dbl_pick(void* v) { + return isaac64_dbl32(static_cast(v)); +} + +std::uint32_t nrnisaac_uint32_pick(void* v) { + return isaac64_uint32(static_cast(v)); +} diff --git a/src/gnu/nrnisaac.h b/src/gnu/nrnisaac.h new file mode 100644 index 0000000000..5322febafe --- /dev/null +++ b/src/gnu/nrnisaac.h @@ -0,0 +1,9 @@ +#pragma once + +#include + +void* nrnisaac_new(); +void nrnisaac_delete(void* rng); +void nrnisaac_init(void* rng, unsigned long int seed); +double nrnisaac_dbl_pick(void* rng); +std::uint32_t nrnisaac_uint32_pick(void* rng); diff --git a/src/gnu/nrnran123.cpp b/src/gnu/nrnran123.cpp new file mode 100644 index 0000000000..6180140c09 --- /dev/null +++ b/src/gnu/nrnran123.cpp @@ -0,0 +1,187 @@ +#include +#include "nrnran123.h" +#include +#include +#include + +using RNG = r123::Philox4x32; + +static RNG::key_type k = {{0}}; + +struct nrnran123_State { + RNG::ctr_type c; + RNG::ctr_type r; + char which_; +}; + +void nrnran123_set_globalindex(std::uint32_t gix) { + k[0] = gix; +} + +/* if one sets the global, one should reset all the stream sequences. */ +std::uint32_t nrnran123_get_globalindex() { + return k[0]; +} + +/* deprecated */ +nrnran123_State* nrnran123_newstream3(std::uint32_t id1, std::uint32_t id2, std::uint32_t id3) { + return nrnran123_newstream(id1, id2, id3); +} + +nrnran123_State* nrnran123_newstream() { + extern int nrnmpi_myid; + static std::uint32_t id3{}; + return nrnran123_newstream(1, nrnmpi_myid, ++id3); +} + +nrnran123_State* nrnran123_newstream(std::uint32_t id1, std::uint32_t id2, std::uint32_t id3) { + auto* s = new nrnran123_State; + s->c[1] = id3; + s->c[2] = id1; + s->c[3] = id2; + nrnran123_setseq(s, 0, 0); + return s; +} + +void nrnran123_deletestream(nrnran123_State* s) { + delete s; +} + +void nrnran123_getseq(nrnran123_State* s, std::uint32_t* seq, char* which) { + *seq = s->c[0]; + *which = s->which_; +} + +void nrnran123_setseq(nrnran123_State* s, std::uint32_t seq, char which) { + if (which > 3 || which < 0) { + s->which_ = 0; + } else { + s->which_ = which; + } + s->c[0] = seq; + s->r = philox4x32(s->c, k); +} + +/** @brief seq4which is 34 bit uint encoded as double(seq)*4 + which + * More convenient to get and set from interpreter +*/ +void nrnran123_setseq(nrnran123_State* s, double seq4which) { + if (seq4which < 0.0) { + seq4which = 0.0; + } + if (seq4which > double(0XffffffffffLL)) { + seq4which = 0.0; + } + // at least 64 bits even on 32 bit machine (could be more) + unsigned long long x = ((unsigned long long) seq4which) & 0X3ffffffffLL; + char which = x & 0X3; + uint32_t seq = x >> 2; + nrnran123_setseq(s, seq, which); +} + +void nrnran123_getids(nrnran123_State* s, std::uint32_t* id1, std::uint32_t* id2) { + *id1 = s->c[2]; + *id2 = s->c[3]; +} + +void nrnran123_getids(nrnran123_State* s, + std::uint32_t* id1, + std::uint32_t* id2, + std::uint32_t* id3) { + *id3 = s->c[1]; + *id1 = s->c[2]; + *id2 = s->c[3]; +} + +/* Deprecated */ +void nrnran123_getids3(nrnran123_State* s, + std::uint32_t* id1, + std::uint32_t* id2, + std::uint32_t* id3) { + nrnran123_getids(s, id1, id2, id3); +} + +void nrnran123_setids(nrnran123_State* s, std::uint32_t id1, std::uint32_t id2, std::uint32_t id3) { + s->c[1] = id3; + s->c[2] = id1; + s->c[3] = id2; +} + +std::uint32_t nrnran123_ipick(nrnran123_State* s) { + char which = s->which_; + std::uint32_t rval = s->r[which++]; + if (which > 3) { + which = 0; + s->c.incr(); + s->r = philox4x32(s->c, k); + } + s->which_ = which; + return rval; +} + +double nrnran123_dblpick(nrnran123_State* s) { + static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */ + auto u = nrnran123_ipick(s); + return ((double) u + 1.0) * SHIFT32; +} + +double nrnran123_uniform(nrnran123_State* s) { + return nrnran123_dblpick(s); +} + +double nrnran123_uniform(nrnran123_State* s, double a, double b) { + return a + nrnran123_dblpick(s) * (b - a); +} + +double nrnran123_negexp(nrnran123_State* s, double mean) { + /* min 2.3283064e-10 to max 22.18071 (if mean is 1) */ + return -std::log(nrnran123_dblpick(s)) * mean; +} + +double nrnran123_negexp(nrnran123_State* s) { + /* min 2.3283064e-10 to max 22.18071 */ + return -std::log(nrnran123_dblpick(s)); +} + +/* At cost of a cached value we could compute two at a time. */ +/* But that would make it difficult to transfer to coreneuron for t > 0 */ +double nrnran123_normal(nrnran123_State* s) { + double w, x, y; + double u1, u2; + do { + u1 = nrnran123_dblpick(s); + u2 = nrnran123_dblpick(s); + u1 = 2. * u1 - 1.; + u2 = 2. * u2 - 1.; + w = (u1 * u1) + (u2 * u2); + } while (w > 1); + + y = std::sqrt((-2. * std::log(w)) / w); + x = u1 * y; + return x; +} + +double nrnran123_normal(nrnran123_State* s, double mu, double sigma) { + return mu + nrnran123_normal(s) * sigma; +} + +nrnran123_array4x32 nrnran123_iran(std::uint32_t seq, std::uint32_t id1, std::uint32_t id2) { + return nrnran123_iran3(seq, id1, id2, 0); +} +nrnran123_array4x32 nrnran123_iran3(std::uint32_t seq, + std::uint32_t id1, + std::uint32_t id2, + std::uint32_t id3) { + nrnran123_array4x32 a; + RNG::ctr_type c; + c[0] = seq; + c[1] = id3; + c[2] = id1; + c[3] = id2; + RNG::ctr_type r = philox4x32(c, k); + a.v[0] = r[0]; + a.v[1] = r[1]; + a.v[2] = r[2]; + a.v[3] = r[3]; + return a; +} diff --git a/src/gnu/nrnran123.h b/src/gnu/nrnran123.h new file mode 100644 index 0000000000..e087a1607e --- /dev/null +++ b/src/gnu/nrnran123.h @@ -0,0 +1,130 @@ +#ifndef nrnran123_h +#define nrnran123_h + +/* interface to Random123 */ +/* http://www.thesalmons.org/john/random123/papers/random123sc11.pdf */ + +/* +The 4x32 generators utilize a uint32x4 counter and uint32x4 key to transform +into an almost cryptographic quality uint32x4 random result. +There are many possibilites for balancing the sharing of the internal +state instances while reserving a uint32 counter for the stream sequence +and reserving other portions of the counter vector for stream identifiers +and global index used by all streams. + +We currently provide a single instance by default in which the policy is +to use the 0th counter uint32 as the stream sequence, words 2, 3 and 4 as the +stream identifier, and word 0 of the key as the global index. Unused words +are constant uint32 0. + +It is also possible to use Random123 directly without reference to this +interface. See Random123-1.02/docs/html/index.html +of the full distribution available from +http://www.deshawresearch.com/resources_random123.html +*/ + +#include + + +struct nrnran123_State; + +struct nrnran123_array4x32 { + std::uint32_t v[4]; +}; + +/* global index. eg. run number */ +/* all generator instances share this global index */ +extern void nrnran123_set_globalindex(std::uint32_t gix); +extern std::uint32_t nrnran123_get_globalindex(); + +/** Construct a new Random123 stream based on the philox4x32 generator. + * + * @param id1 stream ID + * @param id2 optional defaults to 0 + * @param id3 optional defaults to 0 + * @return an nrnran123_State object representing this stream + */ +extern nrnran123_State* nrnran123_newstream(std::uint32_t id1, + std::uint32_t id2 = 0, + std::uint32_t id3 = 0); + +/** @deprecated use nrnran123_newstream instead **/ +extern nrnran123_State* nrnran123_newstream3(std::uint32_t id1, + std::uint32_t id2, + std::uint32_t id3); + +/** Construct a new Random123 stream based on the philox4x32 generator. + * + * @note This overload constructs each stream instance as an independent stream. + * Independence is derived by using id1=1, id2=nrnmpi_myid, + * id3 = ++internal_static_uint32_initialized_to_0 + */ +extern nrnran123_State* nrnran123_newstream(); + +/** Destroys the given Random123 stream. */ +extern void nrnran123_deletestream(nrnran123_State* s); + +/** Get sequence number and selector from an nrnran123_State object */ +extern void nrnran123_getseq(nrnran123_State* s, std::uint32_t* seq, char* which); + +/** Set a Random123 sequence for a sequnece ID and which selector. + * + * @param s an Random123 state object + * @param seq the sequence ID for which to initialize the random number sequence + * @param which the selector (0 <= which < 4) of the sequence + */ +extern void nrnran123_setseq(nrnran123_State* s, std::uint32_t seq, char which); + +/** Set a Random123 sequence for a sequnece ID and which selector. + * + * This overload encodes the sequence ID and which in one double. This is done specifically to be + * able to expose the Random123 API in HOC, which only supports real numbers. + * + * @param s an Random123 state object + * @param seq4which encodes both seq and which as seq*4+which + */ +extern void nrnran123_setseq(nrnran123_State* s, double seq4which); // seq*4+which); + +/** Get stream IDs from Random123 State object */ +extern void nrnran123_getids(nrnran123_State* s, std::uint32_t* id1, std::uint32_t* id2); + + +/** Get stream IDs from Random123 State object */ +extern void nrnran123_getids(nrnran123_State* s, + std::uint32_t* id1, + std::uint32_t* id2, + std::uint32_t* id3); + +/** @brief. Deprecated, use nrnran123_getids **/ +extern void nrnran123_getids3(nrnran123_State*, + std::uint32_t* id1, + std::uint32_t* id2, + std::uint32_t* id3); + +extern void nrnran123_setids(nrnran123_State*, std::uint32_t id1, std::uint32_t id2, std::uint32_t id3); + +// Get a random uint32_t in [0, 2^32-1] +extern std::uint32_t nrnran123_ipick(nrnran123_State*); +// Get a random double on [0, 1] +// nrnran123_dblpick minimum value is 2.3283064e-10 and max value is 1-min +extern double nrnran123_dblpick(nrnran123_State*); + +/* nrnran123_negexp min value is 2.3283064e-10, max is 22.18071 if mean is 1.0 */ +extern double nrnran123_negexp(nrnran123_State*); // mean = 1.0 +extern double nrnran123_negexp(nrnran123_State*, double mean); +extern double nrnran123_normal(nrnran123_State*); // mean = 0.0, std = 1.0 +extern double nrnran123_normal(nrnran123_State*, double mean, double std); + +extern double nrnran123_uniform(nrnran123_State*); // same as dblpick +extern double nrnran123_uniform(nrnran123_State*, double min, double max); + +/* more fundamental (stateless) (though the global index is still used) */ +extern nrnran123_array4x32 nrnran123_iran(std::uint32_t seq, std::uint32_t id1, std::uint32_t id2 = 0, std::uint32_t id3 = 0); + +/** @brief. Deprecated, use nrnran123_iran **/ +extern nrnran123_array4x32 nrnran123_iran3(std::uint32_t seq, + std::uint32_t id1, + std::uint32_t id2, + std::uint32_t id3); + +#endif diff --git a/src/ivoc/apwindow.cpp b/src/ivoc/apwindow.cpp index 1d9e7ab467..71c07bcf36 100644 --- a/src/ivoc/apwindow.cpp +++ b/src/ivoc/apwindow.cpp @@ -11,14 +11,9 @@ #include #include #else -#ifdef MAC -#include -#include -#else #include #include #endif -#endif #include #include #include @@ -46,10 +41,6 @@ extern void handle_old_focus(); extern int iv_mere_dismiss; #endif -#if MAC -extern void ivoc_dismiss_defer(); -#endif - // just because avoiding virtual resource /*static*/ class DBAction: public Action { public: @@ -104,10 +95,7 @@ void WinDismiss::execute() { if (win_) { win_->unmap(); } -#if MAC -#else Session::instance()->quit(); -#endif dismiss_defer(); win_defer_ = win_; win_ = NULL; @@ -179,11 +167,7 @@ DismissableWindow::DismissableWindow(Glyph* g, bool force_menubar) dbutton_ = NULL; Style* style = Session::instance()->style(); String str("Close"); -#if MAC - if (0) { -#else if ((style->find_attribute("dismiss_button", str) && str != "off") || force_menubar) { -#endif if (!PrintableWindow::leader()) { style->find_attribute("pwm_dismiss_button", str); } @@ -257,18 +241,10 @@ void DismissableWindow::name(const char* s) { { SetWindowText(hw, s); } - } else -#endif -#if MAC - Str255 st; - strncpy(&st[1], s, 254); - st[0] = strlen(s); - WindowPtr theWin = Window::rep()->macWindow(); - if (theWin) { - SetWTitle(theWin, st); - } -#endif + } else if (style()) { +#else // not WIN32 if (style()) { +#endif style()->attribute("name", s); set_props(); // replaces following two statements // rep()->wm_name(this); @@ -384,12 +360,6 @@ Glyph* PrintableWindow::print_glyph() { void PrintableWindow::map() { if (mappable_) { DismissableWindow::map(); -#if MAC - // just can't transform between top and bottom and also take into account decorations. - if (xplace_) { - xmove(xleft_, xtop_); - } -#endif single_event_run(); notify(); } else { @@ -435,13 +405,6 @@ bool PrintableWindow::receive(const Event& e) { return DismissableWindow::receive(e); } #else -#if MAC -bool PrintableWindow::receive(const Event& e) { - reconfigured(); - notify(); - return (false); -} -#else bool PrintableWindow::receive(const Event& e) { DismissableWindow::receive(e); if (e.type() == Event::other_event) { @@ -484,13 +447,12 @@ bool PrintableWindow::receive(const Event& e) { return false; } #endif -#endif void PrintableWindow::type(const char* s) { type_ = s; } const char* PrintableWindow::type() const { - return type_.string(); + return type_.c_str(); } // StandardWindow diff --git a/src/ivoc/apwindow.h b/src/ivoc/apwindow.h index a478212f86..5c173056a0 100644 --- a/src/ivoc/apwindow.h +++ b/src/ivoc/apwindow.h @@ -1,6 +1,8 @@ #ifndef dismiswin_h #define dismiswin_h +#include + #include #include @@ -103,7 +105,7 @@ class PrintableWindow: public DismissableWindow, public Observable { virtual void default_geometry(); private: - CopyString type_; + std::string type_; static OcGlyphContainer* intercept_; bool mappable_; bool xplace_; diff --git a/src/ivoc/checkpnt.cpp b/src/ivoc/checkpnt.cpp index d5bc91b47a..4ff1749d85 100644 --- a/src/ivoc/checkpnt.cpp +++ b/src/ivoc/checkpnt.cpp @@ -86,8 +86,6 @@ data depending on type. eg for VAR && NOTUSER it is */ -#ifndef MAC - #define HAVE_XDR 0 #include @@ -250,9 +248,6 @@ void PortablePointer::set(void* address, int type, unsigned long s) { } PortablePointer::~PortablePointer() {} -declareList(PPList, PortablePointer) -implementList(PPList, PortablePointer) - class OcCheckpoint { public: OcCheckpoint(); @@ -299,7 +294,6 @@ class OcCheckpoint { int cnt_; int nobj_; Objects* otable_; - PPList* ppl_; bool (OcCheckpoint::*func_)(Symbol*); Symbols* stable_; #if HAVE_XDR @@ -377,18 +371,7 @@ bool Checkpoint::xdr(Object*& o) { } } - -#else -void hoc_checkpoint(); -int hoc_readcheckpoint(char*); -void hoc_ret(); -void hoc_pushx(double); - - -#endif // from top of file - void hoc_checkpoint() { -#ifndef MAC if (!cp_) { cp_ = new OcCheckpoint(); } @@ -396,14 +379,9 @@ void hoc_checkpoint() { b = cp_->write(gargstr(1)); hoc_ret(); hoc_pushx(double(b)); -#else - hoc_ret(); - hoc_pushx(0.); -#endif } int hoc_readcheckpoint(char* fname) { -#ifndef MAC f_ = fopen(fname, "r"); if (!f_) { return 0; @@ -428,14 +406,9 @@ int hoc_readcheckpoint(char* fname) { delete rdckpt_; rdckpt_ = NULL; return rval; -#else - return 0; -#endif } -#ifndef MAC OcCheckpoint::OcCheckpoint() { - ppl_ = NULL; func_ = NULL; stable_ = NULL; otable_ = NULL; @@ -452,9 +425,6 @@ OcCheckpoint::OcCheckpoint() { } OcCheckpoint::~OcCheckpoint() { - if (ppl_) { - delete ppl_; - } if (stable_) { delete stable_; } @@ -1420,4 +1390,3 @@ bool OcReadChkPnt::get(Object*& o) { return true; } #endif -#endif diff --git a/src/ivoc/classreg.cpp b/src/ivoc/classreg.cpp index 0c37c6c313..d5e8c81776 100644 --- a/src/ivoc/classreg.cpp +++ b/src/ivoc/classreg.cpp @@ -3,7 +3,6 @@ #include #include -#include #include "classreg.h" #ifndef OC_CLASSES #define OC_CLASSES "occlass.h" diff --git a/src/ivoc/datapath.cpp b/src/ivoc/datapath.cpp index dea6e61b2d..c4da677e77 100644 --- a/src/ivoc/datapath.cpp +++ b/src/ivoc/datapath.cpp @@ -2,10 +2,7 @@ #include #include #include -#include -#include #include "hoclist.h" -#include #if HAVE_IV #include "graph.h" #endif @@ -23,24 +20,16 @@ extern Objectdata* hoc_top_level_data; /*static*/ class PathValue { public: PathValue(); - ~PathValue(); - CopyString* path; + ~PathValue() = default; + std::string path{}; Symbol* sym; double original; char* str; }; PathValue::PathValue() { - path = NULL; str = NULL; sym = NULL; } -PathValue::~PathValue() { - if (path) { - delete path; - } -} - -using StringList = std::vector; class HocDataPathImpl { private: @@ -63,7 +52,7 @@ class HocDataPathImpl { private: std::map table_; - StringList strlist_; + std::vector strlist_; int size_, count_, found_so_far_; int pathstyle_; }; @@ -111,14 +100,14 @@ void HocDataPaths::search() { } } -String* HocDataPaths::retrieve(double* pd) const { +std::string HocDataPaths::retrieve(double* pd) const { assert(impl_->pathstyle_ != 2); // printf("HocDataPaths::retrieve\n"); auto const it = impl_->table_.find(pd); if (it != impl_->table_.end()) { return it->second->path; } - return nullptr; + return {}; } Symbol* HocDataPaths::retrieve_sym(double* pd) const { @@ -140,13 +129,13 @@ void HocDataPaths::append(char** pd) { } } -String* HocDataPaths::retrieve(char** pd) const { +std::string HocDataPaths::retrieve(char** pd) const { // printf("HocDataPaths::retrieve\n"); auto const it = impl_->table_.find(pd); if (it != impl_->table_.end()) { return it->second->path; } - return nullptr; + return {}; } /*------------------------------*/ @@ -206,20 +195,20 @@ PathValue* HocDataPathImpl::found_v(void* v, const char* buf, Symbol* sym) { PathValue* pv; if (pathstyle_ != 2) { char path[500]; - CopyString cs(""); + std::string cs{}; for (const auto& str: strlist_) { - Sprintf(path, "%s%s.", cs.string(), str); + Sprintf(path, "%s%s.", cs.c_str(), str.c_str()); cs = path; } - Sprintf(path, "%s%s", cs.string(), buf); + Sprintf(path, "%s%s", cs.c_str(), buf); const auto& it = table_.find(v); if (it == table_.end()) { hoc_warning("table lookup failed for pointer for-", path); return nullptr; } pv = it->second; - if (!pv->path) { - pv->path = new CopyString(path); + if (pv->path.empty()) { + pv->path = path; pv->sym = sym; ++found_so_far_; } @@ -259,7 +248,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { Symbol* sym; int i, total; char buf[200]; - CopyString cs(""); + std::string cs{}; if (sl) for (sym = sl->first; sym; sym = sym->next) { if (sym->cpublic != 2) { @@ -279,7 +268,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { if (pd[i] == sentinal) { Sprintf(buf, "%s%s", sym->name, hoc_araystr(sym, i, od)); cs = buf; - found(pd + i, cs.string(), sym); + found(pd + i, cs.c_str(), sym); } } } break; @@ -288,7 +277,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { if (*pstr == NULL) { Sprintf(buf, "%s", sym->name); cs = buf; - found(pstr, cs.string(), sym); + found(pstr, cs.c_str(), sym); } } break; case OBJECTVAR: { @@ -305,7 +294,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { if (obp[i]->u.dataspace != od) { Sprintf(buf, "%s%s", sym->name, hoc_araystr(sym, i, od)); cs = buf; - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); obp[i]->recurse = 1; search(obp[i]->u.dataspace, obp[i]->ctemplate->symtable); obp[i]->recurse = 0; @@ -316,7 +305,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { if (t->is_point_) { Sprintf(buf, "%s%s", sym->name, hoc_araystr(sym, i, od)); cs = buf; - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); search((Point_process*) obp[i]->u.this_pointer, sym); strlist_.pop_back(); } @@ -331,7 +320,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { if (pitm[i]) { Sprintf(buf, "%s%s", sym->name, hoc_araystr(sym, i, od)); cs = buf; - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); search(hocSEC(pitm[i])); strlist_.pop_back(); } @@ -344,7 +333,7 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { Object* obj = OBJ(q); Sprintf(buf, "%s[%d]", sym->name, obj->index); cs = buf; - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); if (!t->constructor) { search(obj->u.dataspace, t->symtable); } else { @@ -362,14 +351,14 @@ void HocDataPathImpl::search(Objectdata* od, Symlist* sl) { void HocDataPathImpl::search_vectors() { char buf[200]; - CopyString cs(""); + std::string cs{}; cTemplate* t = sym_vec->u.ctemplate; hoc_Item* q; ITERATE(q, t->olist) { Object* obj = OBJ(q); Sprintf(buf, "%s[%d]", sym_vec->name, obj->index); cs = buf; - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); Vect* vec = (Vect*) obj->u.this_pointer; int size = vec->size(); double* pd = vector_vec(vec); @@ -385,14 +374,14 @@ void HocDataPathImpl::search_vectors() { void HocDataPathImpl::search_pysec() { #if USE_PYTHON - CopyString cs(""); + std::string cs{}; hoc_Item* qsec; // ForAllSections(sec) ITERATE(qsec, section_list) { Section* sec = hocSEC(qsec); if (sec->prop && sec->prop->dparam[PROP_PY_INDEX].get()) { cs = secname(sec); - strlist_.push_back((char*) cs.string()); + strlist_.push_back(cs); search(sec); strlist_.pop_back(); } @@ -419,10 +408,11 @@ void HocDataPathImpl::search(Section* sec) { } void HocDataPathImpl::search(Node* nd, double x) { char buf[100]; - CopyString cs(""); if (NODEV(nd) == sentinal) { Sprintf(buf, "v(%g)", x); - found(static_cast(&NODEV(nd)), buf, sym_v); + // the conversion below yields a pointer that is potentially invalidated + // by almost any Node operation + found(static_cast(nd->v_handle()), buf, sym_v); } #if EXTRACELLULAR @@ -472,7 +462,12 @@ void HocDataPathImpl::search(Prop* prop, double x) { if (memb_func[type].hoc_mech) { pd = prop->ob->u.dataspace[ir].pval; } else { - pd = prop->param + ir; + if (type == EXTRACELL && ir == neuron::extracellular::vext_pseudoindex()) { + // skip as it was handled by caller + continue; + } else { + pd = static_cast(prop->param_handle_legacy(ir)); + } } imax = hoc_total_array_data(psym, 0); for (i = 0; i < imax; ++i) { diff --git a/src/ivoc/datapath.h b/src/ivoc/datapath.h index 1e8d96920a..0f88d6e6d4 100644 --- a/src/ivoc/datapath.h +++ b/src/ivoc/datapath.h @@ -15,8 +15,8 @@ class HocDataPaths { void append(double*); void append(char**); void search(); - String* retrieve(double*) const; - String* retrieve(char**) const; + std::string retrieve(double*) const; + std::string retrieve(char**) const; Symbol* retrieve_sym(double*) const; int style(); diff --git a/src/ivoc/fourier.cpp b/src/ivoc/fourier.cpp index c14c1fb646..f3b0ec61c0 100644 --- a/src/ivoc/fourier.cpp +++ b/src/ivoc/fourier.cpp @@ -16,15 +16,7 @@ #undef DEBUG_SPCTRM #undef myfabs -#if MAC -#if __GNUC__ < 4 -#define myfabs std::fabs -#else -#define myfabs ::fabs -#endif -#else #define myfabs fabs -#endif #include "oc_ansi.h" diff --git a/src/ivoc/graph.cpp b/src/ivoc/graph.cpp index fb868ece01..3868bc0707 100644 --- a/src/ivoc/graph.cpp +++ b/src/ivoc/graph.cpp @@ -48,10 +48,6 @@ extern Image* gif_image(const char*); #include "classreg.h" #include "gui-redirect.h" -#include "treeset.h" - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); #if HAVE_IV #define Graph_Crosshair_ "Crosshair Graph" @@ -512,38 +508,39 @@ static void gr_add(void* v, bool var) { Object* obj = NULL; char* lab = NULL; char* expr = NULL; - int ioff = 0; // deal with 0, 1, or 2 optional arguments after first - double* pd = NULL; // pointer to varname if second arg is varname string + int ioff = 0; // deal with 0, 1, or 2 optional arguments after first + // pointer to varname if second arg is varname string + neuron::container::data_handle pd{}; int fixtype = g->labeltype(); // organize args for backward compatibility and the new // addexpr("label, "expr", obj,.... style if (ifarg(2)) { if (var) { // if string or address then variable and 1 was label - expr = gargstr(1); + expr = hoc_gargstr(1); if (hoc_is_str_arg(2)) { - pd = hoc_val_pointer(gargstr(2)); + pd = hoc_val_handle(hoc_gargstr(2)); ioff += 1; } else if (hoc_is_pdouble_arg(2)) { - pd = hoc_pgetarg(2); + pd = hoc_hgetarg(2); ioff += 1; } } else if (hoc_is_str_arg(2)) { // 1 label, 2 expression - lab = gargstr(1); - expr = gargstr(2); + lab = hoc_gargstr(1); + expr = hoc_gargstr(2); ioff += 1; if (ifarg(3) && hoc_is_object_arg(3)) { // object context obj = *hoc_objgetarg(3); ioff += 1; } } else if (hoc_is_object_arg(2)) { // 1 expr, 2 object context - expr = gargstr(1); + expr = hoc_gargstr(1); obj = *hoc_objgetarg(2); ioff += 1; } else { - expr = gargstr(1); + expr = hoc_gargstr(1); } } else { - expr = gargstr(1); + expr = hoc_gargstr(1); } if (ifarg(3 + ioff)) { if (ifarg(6 + ioff)) { @@ -625,7 +622,7 @@ static double gr_vector(void* v) { Graph* g = (Graph*) v; int n = int(chkarg(1, 1., 1.e5)); double* x = hoc_pgetarg(2); - double* y = hoc_pgetarg(3); + auto y_handle = hoc_hgetarg(3); GraphVector* gv = new GraphVector(""); if (ifarg(4)) { gv->color(colors->color(int(*getarg(4)))); @@ -635,7 +632,7 @@ static double gr_vector(void* v) { gv->brush(g->brush()); } for (int i = 0; i < n; ++i) { - gv->add(x[i], y + i); + gv->add(x[i], y_handle.next_array_element(i)); } // GLabel* glab = g->label(gv->name()); // ((GraphItem*)g->component(g->glyph_index(glab)))->save(false); @@ -1362,7 +1359,6 @@ void GraphItem::pick(Canvas* c, const Allocation& a, int depth, Hit& h) { } // Graph -implementPtrList(LineList, GraphLine); declareActionCallback(Graph); implementActionCallback(Graph); @@ -1372,8 +1368,7 @@ Graph::Graph(bool b) : Scene(0, 0, XSCENE, YSCENE) { loc_ = 0; x_expr_ = NULL; - x_pval_ = NULL; - var_name_ = NULL; + x_pval_ = {}; rvp_ = NULL; cross_action_ = NULL; vector_copy_ = false; @@ -1455,8 +1450,8 @@ Graph::Graph(bool b) Graph::~Graph() { // printf("~Graph\n"); - for (long i = 0; i < line_list_.count(); ++i) { - Resource::unref(line_list_.item(i)); + for (auto& item: line_list_) { + Resource::unref(item); } Resource::unref(keep_lines_toggle_); Resource::unref(x_); @@ -1467,20 +1462,13 @@ Graph::~Graph() { Resource::unref(sc_); Resource::unref(current_polyline_); Resource::unref(family_label_); - if (var_name_) { - delete var_name_; - } if (cross_action_) { delete cross_action_; } } void Graph::name(char* s) { - if (var_name_) { - *var_name_ = s; - } else { - var_name_ = new CopyString(s); - } + var_name_ = s; } void Graph::help() { @@ -1498,36 +1486,31 @@ void Graph::help() { } void Graph::delete_label(GLabel* glab) { - GraphLine* glin = NULL; - GlyphIndex i, cnt; - cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - if (line_list_.item(i)->label() == glab) { - glin = line_list_.item(i); - break; - } - } - if (glin) { - line_list_.remove(i); + GraphLine* glin = nullptr; + auto it = std::find_if(line_list_.begin(), line_list_.end(), [&](const auto& e) { + return e->label() == glab; + }); + if (it != line_list_.end()) { + glin = *it; + line_list_.erase(it); glin->unref(); - i = glyph_index(glin); - remove(i); + GlyphIndex index = glyph_index(glin); + remove(index); } if (!glin) { // but possibly a vector line - cnt = count(); - for (i = 0; i < cnt; ++i) { - GraphItem* gi = (GraphItem*) component(i); + for (GlyphIndex index = 0; index < count(); ++index) { + GraphItem* gi = (GraphItem*) component(index); if (gi->is_polyline()) { GPolyLine* gpl = (GPolyLine*) gi->body(); if (gpl->label() == glab) { - remove(i); + remove(index); break; } } } } - i = glyph_index(glab); - remove(i); + GlyphIndex index = glyph_index(glab); + remove(index); } GLabel* Graph::new_proto_label() const { @@ -1535,18 +1518,17 @@ GLabel* Graph::new_proto_label() const { } bool Graph::change_label(GLabel* glab, const char* text, GLabel* gl) { - GlyphIndex i, cnt = line_list_.count(); if (strcmp(glab->text(), text)) { - for (i = 0; i < cnt; ++i) { - if (line_list_.item(i)->label() == glab) { - if (!line_list_.item(i)->change_expr(text, &symlist_)) { + for (auto& line: line_list_) { + if (line->label() == glab) { + if (!line->change_expr(text, &symlist_)) { return false; } } } glab->text(text); } - i = glyph_index(glab); + GlyphIndex i = glyph_index(glab); if (glab->fixtype() != gl->fixtype()) { if (gl->fixed()) { glab->fixed(gl->scale()); @@ -1579,8 +1561,8 @@ void Graph::change_line_color(GPolyLine* glin) { } GlyphIndex Graph::glyph_index(const Glyph* gl) { - GlyphIndex i, cnt = count(); - for (i = 0; i < cnt; ++i) { + GlyphIndex cnt = count(); + for (GlyphIndex i = 0; i < cnt; ++i) { Glyph* g = ((GraphItem*) component(i))->body(); if (g == gl) { return i; @@ -1600,13 +1582,12 @@ std::ostream* Graph::ascii() { } void Graph::draw(Canvas* c, const Allocation& a) const { - long i, cnt = line_list_.count(); // if (!extension_flushed_) { Scene::draw(c, a); //} if (extension_flushed_) { - for (i = 0; i < cnt; ++i) { - line_list_.item(i)->extension()->draw(c, a); + for (auto& item: line_list_) { + item->extension()->draw(c, a); } } if (ascii_) { @@ -1615,7 +1596,7 @@ void Graph::draw(Canvas* c, const Allocation& a) const { } void Graph::ascii_save(std::ostream& o) const { - long line, lcnt = line_list_.count(); + long line, lcnt = line_list_.size(); int i, dcnt; if (lcnt == 0 || !x_ || family_label_) { // tries to print in matrix form is labels and each line the same @@ -1626,8 +1607,8 @@ void Graph::ascii_save(std::ostream& o) const { } if (lcnt) { o << lcnt << " addvar/addexpr lines:"; - for (i = 0; i < lcnt; ++i) { - o << " " << line_list_.item(i)->name(); + for (const auto& line: line_list_) { + o << " " << line->name(); } o << std::endl; } @@ -1733,15 +1714,15 @@ void Graph::ascii_save(std::ostream& o) const { } else { o << "x"; } - for (line = 0; line < lcnt; ++line) { - o << " " << line_list_.item(line)->name(); + for (const auto& item: line_list_) { + o << " " << item->name(); } o << std::endl; dcnt = x_->count(); for (i = 0; i < dcnt; ++i) { o << x_->get_val(i); - for (line = 0; line < lcnt; ++line) { - o << "\t" << line_list_.item(line)->y(i); + for (const auto& item: line_list_) { + o << "\t" << item->y(i); } o << std::endl; } @@ -1813,10 +1794,6 @@ void Graph::wholeplot(Coord& l, Coord& b, Coord& r, Coord& t) const { GraphLine* gl; l = b = 1e9; r = t = -1e9; -#if 0 - cnt = line_list_.count(); - if (!cnt) { -#endif cnt = count(); for (i = 0; i < cnt; ++i) { GraphItem* gi = (GraphItem*) component(i); @@ -1857,19 +1834,6 @@ void Graph::wholeplot(Coord& l, Coord& b, Coord& r, Coord& t) const { t = -1e30; } return; -#if 0 - } - for (i = 0; i < cnt; ++i) { - gl = line_list_.item(i); - l = std::min(l, gl->x_data()->min()); - b = std::min(b, gl->y_data()->min()); - r = std::max(r, gl->x_data()->max()); - t = std::max(t, gl->y_data()->max()); - } - if (l >= r || b >= t) { - Scene::wholeplot(l, b, r, t); - } -#endif } void Graph::axis(DimensionName d, @@ -1908,7 +1872,7 @@ GraphLine* Graph::add_var(const char* expr, const Brush* brush, bool usepointer, int fixtype, - double* pd, + neuron::container::data_handle pd, const char* lab, Object* obj) { GraphLine* gl = new GraphLine(expr, x_, &symlist_, color, brush, usepointer, pd, obj); @@ -1922,7 +1886,7 @@ GraphLine* Graph::add_var(const char* expr, ((GraphItem*) component(i))->save(false); glab->color(color); gl->label(glab); - line_list_.append(gl); + line_list_.push_back(gl); gl->ref(); Scene::append(new GPolyLineItem(gl)); return gl; @@ -1943,12 +1907,12 @@ void Graph::x_expr(const char* expr, bool usepointer) { hoc_execerror(expr, "not an expression"); } if (usepointer) { - x_pval_ = hoc_val_pointer(expr); + x_pval_ = hoc_val_handle(expr); if (!x_pval_) { hoc_execerror(expr, "is invalid left hand side of assignment statement"); } } else { - x_pval_ = 0; + x_pval_ = {}; } } @@ -1958,10 +1922,8 @@ void Graph::begin() { keep_lines(); family_value(); } - long count = line_list_.count(); int hem = hoc_execerror_messages; - for (long i = 0; i < count; ++i) { - GraphLine* gl = line_list_.item(i); + for (auto& gl: line_list_) { gl->erase(); if (family_on_) { ((GPolyLine*) gl)->color(color()); @@ -1990,9 +1952,8 @@ void Graph::plot(float x) { } else { x_->add(x); } - long count = line_list_.count(); - for (long i = 0; i < count; ++i) { - line_list_.item(i)->plot(); + for (auto& item: line_list_) { + item->plot(); } } void Graph::begin_line(const char* s) { @@ -2024,37 +1985,23 @@ void Graph::flush() { // damage_all();//too conservative. plots everything every time } void Graph::fast_flush() { -#if 0 - long i, cnt = line_list_.count(); - for (i=0; i < cnt; ++i) { - modified( - glyph_index( - line_list_.item(i)->extension() - ) - ); - } -#else - long i, cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - line_list_.item(i)->extension()->damage(this); + for (auto& item: line_list_) { + item->extension()->damage(this); } -#endif extension_flushed_ = true; } void Graph::extension_start() { x_->running_start(); - long i, cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - line_list_.item(i)->extension_start(); + for (auto& item: line_list_) { + item->extension_start(); } extension_flushed_ = false; } void Graph::extension_continue() { x_->running_start(); - long i, cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - line_list_.item(i)->extension_continue(); + for (auto& item: line_list_) { + item->extension_continue(); } extension_flushed_ = false; } @@ -2115,28 +2062,21 @@ void Graph::cross_action(char c, Coord x, Coord y) { } } void Graph::erase() { - long count = line_list_.count(); - for (long i = 0; i < count; ++i) { - line_list_.item(i)->erase(); + for (auto& item: line_list_) { + item->erase(); } damage_all(); } void Graph::erase_all() { - int i; -#if 0 - while(count()) { - remove(0); - } -#else - for (i = count() - 1; i >= 0; --i) { + for (int i = count() - 1; i >= 0; --i) { remove(i); } -#endif - while (line_list_.count()) { - Resource::unref(line_list_.item(0)); - line_list_.remove(0); + for (auto& item: line_list_) { + Resource::unref(item); } + line_list_.clear(); + line_list_.shrink_to_fit(); label_n_ = 0; } void Graph::family_value() { @@ -2207,9 +2147,7 @@ void Graph::family(bool i) { } else { family_on_ = false; keep_lines_toggle_->set(TelltaleState::is_chosen, false); - long count = line_list_.count(); - for (long i = 0; i < count; ++i) { - GraphLine* gl = line_list_.item(i); + for (auto& gl: line_list_) { gl->color(gl->save_color()); gl->brush(gl->save_brush()); } @@ -2325,18 +2263,14 @@ void Graph::erase_lines() { } } } - cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - GraphLine* gl = line_list_.item(i); + for (auto& gl: line_list_) { gl->label()->erase_flag(false); } cnt = count(); for (i = cnt - 1; i >= 0; --i) { ((GraphItem*) component(i))->erase(this, i, GraphItem::ERASE_LINE); } - cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - GraphLine* gl = line_list_.item(i); + for (auto& gl: line_list_) { Scene::append(new GPolyLineItem(gl)); } erase(); @@ -2421,14 +2355,14 @@ void Graph::save_phase2(std::ostream& o) { Sprintf(buf, "save_window_.family(\"%s\")", family_label_->text()); o << buf << std::endl; } - if (var_name_) { - if ((var_name_->string())[var_name_->length() - 1] == '.') { - Sprintf(buf, "%sappend(save_window_)", var_name_->string()); + if (!var_name_.empty()) { + if (var_name_.back() == '.') { + Sprintf(buf, "%sappend(save_window_)", var_name_.c_str()); } else { - Sprintf(buf, "%s = save_window_", var_name_->string()); + Sprintf(buf, "%s = save_window_", var_name_.c_str()); } o << buf << std::endl; - Sprintf(buf, "save_window_.save_name(\"%s\")", var_name_->string()); + Sprintf(buf, "save_window_.save_name(\"%s\")", var_name_.c_str()); o << buf << std::endl; } if (x_expr_) { @@ -2482,17 +2416,18 @@ void Graph::choose_sym() { w = v->canvas()->window(); } while ((w && sc_->post_for_aligned(w, .5, 1.)) || (!w && sc_->post_at(300, 300))) { - // printf("Graph selected %s\n", sc_->selected()->string()); char buf[256]; double* pd = sc_->selected_var(); + neuron::container::data_handle pd_handle{pd}; + if (sc_->selected_vector_count()) { - Sprintf(buf, "%s", sc_->selected()->string()); + Sprintf(buf, "%s", sc_->selected().c_str()); GraphVector* gv = new GraphVector(buf); gv->color(color()); gv->brush(brush()); int n = sc_->selected_vector_count(); for (int i = 0; i < n; ++i) { - gv->add(double(i), pd + i); + gv->add(double(i), pd_handle.next_array_element(i)); } GLabel* glab = label(gv->name()); ((GraphItem*) component(glyph_index(glab)))->save(false); @@ -2501,20 +2436,19 @@ void Graph::choose_sym() { flush(); break; } else if (pd) { - // add_var(sc_->selected()->string(), color(), brush(), 1, 2, pd); - add_var(sc_->selected()->string(), color(), brush(), 1, 2); + add_var(sc_->selected().c_str(), color(), brush(), 1, 2); break; } else { - CopyString s(*sc_->selected()); + auto s = sc_->selected(); // above required due to bug in mswindows version in which // sc_->selected seems volatile under some kinds of hoc // executions. - Sprintf(buf, "hoc_ac_ = %s\n", s.string()); + Sprintf(buf, "hoc_ac_ = %s\n", s.c_str()); if (oc.run(buf) == 0) { - add_var(s.string(), color(), brush(), 0, 2); + add_var(s.c_str(), color(), brush(), 0, 2); break; } - hoc_warning(s.string(), "is not an expression."); + hoc_warning(s.c_str(), "is not an expression."); } } // sc_->unref(); @@ -2533,12 +2467,12 @@ void Graph::family_label_chooser() { } while (fsc_->post_for_aligned(XYView::current_pick_view()->canvas()->window(), .5, 1.)) { char buf[256]; - Sprintf(buf, "hoc_ac_ = %s\n", fsc_->selected()->string()); + Sprintf(buf, "hoc_ac_ = %s\n", fsc_->selected().c_str()); if (oc.run(buf) == 0) { - family(fsc_->selected()->string()); + family(fsc_->selected().c_str()); break; } - hoc_warning(sc_->selected()->string(), "is not an expression."); + hoc_warning(sc_->selected().c_str(), "is not an expression."); } } @@ -2549,7 +2483,7 @@ GraphLine::GraphLine(const char* expr, const Color* c, const Brush* b, bool usepointer, - double* pd, + neuron::container::data_handle pd, Object* obj) : GPolyLine(x, c, b) { Oc oc; @@ -2561,16 +2495,16 @@ GraphLine::GraphLine(const char* expr, // char buf[256]; // Sprintf(buf, "%s", expr); // expr_ = oc.parseExpr(buf, symlist); - expr_ = NULL; + expr_ = nullptr; pval_ = pd; } else { expr_ = oc.parseExpr(expr, symlist); - pval_ = hoc_val_pointer(expr); + pval_ = hoc_val_handle(expr); if (!pval_) { hoc_execerror(expr, "is invalid left hand side of assignment statement"); } } - oc.notify_when_freed(pval_, this); + neuron::container::notify_when_handle_dies(pval_, this); } else { if (obj) { obj_ = obj; @@ -2580,7 +2514,7 @@ GraphLine::GraphLine(const char* expr, } else { expr_ = oc.parseExpr(expr, symlist); } - pval_ = 0; + pval_ = {}; } if (!pval_ && !expr_) { hoc_execerror(expr, "not an expression"); @@ -2674,7 +2608,7 @@ void GraphLine::simgraph_continuous(double tt) { void GraphLine::update(Observable*) { // *pval_ has been freed // printf("GraphLine::update pval_ has been freed\n"); - pval_ = NULL; + pval_ = {}; if (obj_) { expr_ = NULL; } @@ -2691,9 +2625,9 @@ bool GraphLine::change_expr(const char* expr, Symlist** symlist) { if (sym) { expr_ = sym; if (pval_) { - Oc oc; - oc.notify_pointer_disconnect(this); - pval_ = NULL; + // we are no longer interested in updates to pval_ + nrn_notify_pointer_disconnect(this); + pval_ = {}; } return true; } else { @@ -3068,7 +3002,7 @@ GLabel::~GLabel() { } Glyph* GLabel::clone() const { - return new GLabel(text_.string(), color_, fixtype_, scale_, x_align_, y_align_); + return new GLabel(text_.c_str(), color_, fixtype_, scale_, x_align_, y_align_); } void GLabel::save(std::ostream& o, Coord x, Coord y) { @@ -3080,7 +3014,7 @@ void GLabel::save(std::ostream& o, Coord x, Coord y) { "save_window_.label(%g, %g, \"%s\", %d, %g, %g, %g, %d)", x, y, - text_.string(), + text_.c_str(), fixtype_, scale_, x_align_, @@ -3108,7 +3042,7 @@ void GLabel::align(float x, float y) { void GLabel::color(const Color* c) { Resource::unref(label_); WidgetKit& kit = *WidgetKit::instance(); - label_ = new Label(text_, kit.font(), c); + label_ = new Label(text_.c_str(), kit.font(), c); label_->ref(); Resource::ref(c); Resource::unref(color_); @@ -3122,7 +3056,7 @@ void GLabel::text(const char* t) { Resource::unref(label_); WidgetKit& kit = *WidgetKit::instance(); text_ = t; - label_ = new Label(text_, kit.font(), color_); + label_ = new Label(text_.c_str(), kit.font(), color_); label_->ref(); } @@ -3164,7 +3098,7 @@ void GLabel::draw(Canvas* c, const Allocation& a1) const { // printf("transformer %g %g %g %g %g %g\n", a00, a01, a10, a11, a20, a21); label_->draw(c, a2); c->pop_transform(); - IfIdraw(text(c, text_.string(), t, NULL, color())); + IfIdraw(text(c, text_.c_str(), t, NULL, color())); } // DataVec------------------ @@ -3329,27 +3263,6 @@ void DataVec::write() { #endif } -DataPointers::DataPointers(int size) { - count_ = 0; - size_ = size; - px_ = new double*[size]; -} -DataPointers::~DataPointers() { - delete[] px_; -} -void DataPointers::add(double* pd) { - if (count_ == size_) { - size_ *= 2; - double** px = new double*[size_]; - for (int i = 0; i < count_; i++) { - px[i] = px_[i]; - } - delete[] px_; - px_ = px; - } - px_[count_++] = pd; -} - GraphVector::GraphVector(const char* name, const Color* color, const Brush* brush) : GPolyLine(new DataVec(50), color, brush) { dp_ = new DataPointers(); @@ -3367,7 +3280,7 @@ GraphVector::~GraphVector() { } const char* GraphVector::name() const { - return name_.string(); + return name_.c_str(); } void GraphVector::save(std::ostream&) {} @@ -3386,28 +3299,28 @@ void GraphVector::update(Observable*) { begin(); } -void GraphVector::add(float x, double* py) { +void GraphVector::add(float x, neuron::container::data_handle py) { if (disconnect_defer_) { Oc oc; oc.notify_pointer_disconnect(this); disconnect_defer_ = false; } - if (dp_->count() == 0 || py != dp_->p(dp_->count() - 1) + 1) { - Oc oc; - oc.notify_when_freed(py, this); + // Dubious + if (dp_->count() == 0 || + static_cast(py) != static_cast(dp_->p(dp_->count() - 1)) + 1) { + neuron::container::notify_when_handle_dies(py, this); } x_->add(x); - double* p = &zero; - if (py) { - p = py; + if (!py) { + py = {neuron::container::do_not_search, &zero}; } - dp_->add(p); - y_->add(float(*p)); + y_->add(*py); + dp_->add(std::move(py)); } bool GraphVector::trivial() const { for (int i = 0; i < dp_->count(); ++i) { - if (dp_->p(i) != &zero) { + if (static_cast(dp_->p(i)) != &zero) { return false; } } @@ -3490,46 +3403,4 @@ void Graph::change_prop() { } } -void Graph::update_ptrs() { - if (x_pval_) { - x_pval_ = nrn_recalc_ptr(x_pval_); - } - if (rvp_) { - rvp_->update_ptrs(); - } - GlyphIndex i, cnt = count(); - for (i = 0; i < cnt; ++i) { - GraphItem* gi = (GraphItem*) component(i); - if (gi->is_graphVector()) { - GraphVector* gv = (GraphVector*) (gi->body()); - if (gv) { - gv->update_ptrs(); - } - } - } - cnt = line_list_.count(); - for (i = 0; i < line_list_.count(); ++i) { - line_list_.item(i)->update_ptrs(); - } -} - -void DataPointers::update_ptrs() { - int i; - for (i = 0; i < count_; ++i) { - px_[i] = nrn_recalc_ptr(px_[i]); - } -} - -void GraphLine::update_ptrs() { - if (pval_) { - pval_ = nrn_recalc_ptr(pval_); - } -} - -void GraphVector::update_ptrs() { - if (dp_) { - dp_->update_ptrs(); - } -} - #endif /* HAVE_IV */ diff --git a/src/ivoc/graph.h b/src/ivoc/graph.h index 32b3cb14b8..72e58b5874 100644 --- a/src/ivoc/graph.h +++ b/src/ivoc/graph.h @@ -1,6 +1,7 @@ #ifndef graph_h #define graph_h +#include "neuron/container/data_handle.hpp" #include #include #include @@ -22,8 +23,6 @@ class LineExtension; class TelltaleState; struct Object; -declarePtrList(LineList, GraphLine); - // all Glyphs added to Graph must be enclosed in a GraphItem class GraphItem: public MonoGlyph { public: @@ -71,7 +70,7 @@ class Graph: public Scene { // Scene of GraphLines labels and polylines const Brush*, bool usepointer, int fixtype = 1, - double* p = NULL, + neuron::container::data_handle p = {}, const char* lab = NULL, Object* obj = NULL); void x_expr(const char*, bool usepointer); @@ -145,7 +144,6 @@ class Graph: public Scene { // Scene of GraphLines labels and polylines void name(char*); void change_label_color(GLabel*); void change_line_color(GPolyLine*); - void update_ptrs(); virtual void save_phase1(std::ostream&); virtual void save_phase2(std::ostream&); @@ -166,13 +164,13 @@ class Graph: public Scene { // Scene of GraphLines labels and polylines private: Symlist* symlist_; - LineList line_list_; + std::vector line_list_; int loc_; DataVec* x_; bool extension_flushed_; SymChooser* sc_; static SymChooser* fsc_; - CopyString* var_name_; + std::string var_name_; GPolyLine* current_polyline_; const Color* color_; @@ -190,7 +188,7 @@ class Graph: public Scene { // Scene of GraphLines labels and polylines bool vector_copy_; Symbol* x_expr_; - double* x_pval_; + neuron::container::data_handle x_pval_; GraphVector* rvp_; static std::ostream* ascii_; @@ -232,26 +230,26 @@ class DataVec: public Resource { // info for single dimension class DataPointers: public Resource { // vector of pointers public: - DataPointers(int size = 50); - virtual ~DataPointers(); - void add(double*); + virtual ~DataPointers() {} + void add(neuron::container::data_handle dh) { + px_.push_back(std::move(dh)); + } void erase() { - count_ = 0; + px_.clear(); } - int size() { - return size_; + [[nodiscard]] std::size_t size() { + return px_.capacity(); } - int count() { - return count_; + [[nodiscard]] std::size_t count() { + return px_.size(); } - double* p(int i) { + [[nodiscard]] neuron::container::data_handle p(std::size_t i) { + assert(i < px_.size()); return px_[i]; } - void update_ptrs(); private: - int count_, size_; - double** px_; + std::vector> px_; }; class GPolyLine: public Glyph { @@ -332,7 +330,7 @@ class GraphLine: public GPolyLine, public Observer { // An oc variable to plot const Color* = NULL, const Brush* = NULL, bool usepointer = 0, - double* pd = NULL, + neuron::container::data_handle pd = {}, Object* obj = NULL); virtual ~GraphLine(); @@ -364,10 +362,9 @@ class GraphLine: public GPolyLine, public Observer { // An oc variable to plot void simgraph_activate(bool); void simgraph_init(); void simgraph_continuous(double); - void update_ptrs(); Symbol* expr_; - double* pval_; + neuron::container::data_handle pval_; Object* obj_; private: @@ -384,7 +381,7 @@ class GraphVector: public GPolyLine, public Observer { // fixed x and vector of virtual ~GraphVector(); virtual void request(Requisition&) const; void begin(); - void add(float, double*); + void add(float, neuron::container::data_handle); virtual void save(std::ostream&); const char* name() const; bool trivial() const; @@ -394,13 +391,12 @@ class GraphVector: public GPolyLine, public Observer { // fixed x and vector of DataPointers* py_data() { return dp_; } - void update_ptrs(); void record_install(); void record_uninstall(); private: DataPointers* dp_; - CopyString name_; + std::string name_; bool disconnect_defer_; }; @@ -451,7 +447,7 @@ class GLabel: public Glyph { return scale_; } const char* text() const { - return text_.string(); + return text_.c_str(); } int fixtype() const { return fixtype_; @@ -484,7 +480,7 @@ class GLabel: public Glyph { int fixtype_; float scale_; float x_align_, y_align_; - CopyString text_; + std::string text_; Glyph* label_; const Color* color_; GPolyLine* gpl_; diff --git a/src/ivoc/graphvec.cpp b/src/ivoc/graphvec.cpp index ebb9c8f92e..dcc9710048 100644 --- a/src/ivoc/graphvec.cpp +++ b/src/ivoc/graphvec.cpp @@ -10,9 +10,6 @@ #include "oc2iv.h" #include "ivocvect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - Object** DataVec::new_vect(GLabel* gl) const { int i, cnt; Vect* vec; diff --git a/src/ivoc/grglyph.cpp b/src/ivoc/grglyph.cpp index ddc5b1f314..1c79e5da1b 100644 --- a/src/ivoc/grglyph.cpp +++ b/src/ivoc/grglyph.cpp @@ -26,9 +26,6 @@ class GrGlyph: public Resource { #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - double gr_addglyph(void* v) { TRY_GUI_REDIRECT_ACTUAL_DOUBLE("Graph.addglyph", v); #if HAVE_IV diff --git a/src/ivoc/grmanip.cpp b/src/ivoc/grmanip.cpp index 1d582d6d32..2462b8a9b3 100644 --- a/src/ivoc/grmanip.cpp +++ b/src/ivoc/grmanip.cpp @@ -44,7 +44,7 @@ class LineRubberMarker: public Rubberband { Label* label_; Coord x_, y_; int index_; -#if defined(WIN32) || MAC +#if defined(WIN32) CopyString def_str_; #endif }; @@ -280,7 +280,7 @@ bool LineRubberMarker::event(Event& e) { } return true; } else { -#if defined(WIN32) || MAC +#if defined(WIN32) if (e.type() == Event::down) { def_str_ = ((DismissableWindow*) canvas()->window())->name(); } else if (e.type() == Event::up) { @@ -297,7 +297,7 @@ void LineRubberMarker::undraw(Coord, Coord) { Transformer identity; c->push_transform(); c->transformer(identity); -#if !defined(WIN32) && !MAC +#if !defined(WIN32) Allocation a; a.allot_x(Allotment(x + 20, 0, 0)); a.allot_y(Allotment(y, 0, 0)); @@ -320,7 +320,7 @@ void LineRubberMarker::draw(Coord x, Coord y) { } char s[50]; -#if defined(WIN32) || MAC +#if defined(WIN32) Sprintf(s, "crosshair x=%g y=%g", x_, y_); ((DismissableWindow*) canvas()->window())->name(s); #else @@ -513,7 +513,7 @@ void MoveLabelBand::press(Event&) { } x0_ -= x_begin(); y0_ -= y_begin(); -#if !defined(WIN32) && !MAC +#if !defined(WIN32) undraw(x(), y()); // so initial draw does not make it disappear #endif } @@ -553,7 +553,7 @@ void MoveLabelBand::draw(Coord x, Coord y) { // printf("MoveLabelBand::draw(%g, %g)\n", x, y); a_.x_allotment().origin(x + x0_); a_.y_allotment().origin(y + y0_); -#if defined(WIN32) || MAC +#if defined(WIN32) c->rect(a_.x_allotment().begin(), a_.y_allotment().begin(), a_.x_allotment().end(), diff --git a/src/ivoc/idraw.cpp b/src/ivoc/idraw.cpp index 9ea3052f45..1216b226d1 100644 --- a/src/ivoc/idraw.cpp +++ b/src/ivoc/idraw.cpp @@ -38,7 +38,7 @@ void OcIdraw::prologue() { return; } name = expand_env_var(name.string()); -#if defined(WIN32) || defined(MAC) +#if defined(WIN32) if (!ibuf.open(name.string(), std::ios::in)) { #else if (!ibuf.open(name.string(), std::ios::in)) { @@ -429,7 +429,6 @@ void OcIdraw::brush(const Brush* b) { int i, p; p = 0; -#if !MAC if (b) for (i = 0; i < b->dash_count(); ++i) { int nbit = b->dash_list(i); @@ -437,15 +436,12 @@ void OcIdraw::brush(const Brush* b) { p = ((p << 1) | ((i + 1) % 2)); } } -#endif Sprintf(buf, "%%I b %d\n%d 0 0 [", p, int(w)); out << buf; -#if !MAC if (b) for (i = 0; i < b->dash_count(); ++i) { out << b->dash_list(i) << " "; } -#endif Sprintf(buf, "] 0 SetB"); out << buf << std::endl; } diff --git a/src/ivoc/ivoc.cpp b/src/ivoc/ivoc.cpp index 93570908d0..46fc0de8c5 100644 --- a/src/ivoc/ivoc.cpp +++ b/src/ivoc/ivoc.cpp @@ -4,14 +4,13 @@ #include #include #include +#include "neuron/container/soa_container.hpp" #include #include "oc2iv.h" #include "ocfunc.h" #include "ocnotify.h" #include "oc_ansi.h" - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); +#include "ocjump.h" #if HAVE_IV #include "utility.h" @@ -31,6 +30,9 @@ static FList* f_list; static nrn::tool::bimap* pvob; static nrn::tool::bimap* pdob; +using identifier_observer_bimap = + nrn::tool::bimap; +static identifier_observer_bimap* phob; // fast insert, find, and remove of (double*, Observer*) using either as // a key. Use pair of multimap since there can be many observers of the @@ -40,10 +42,6 @@ static nrn::tool::bimap* pdob; int nrn_err_dialog_active_; - -void* (*nrnpy_save_thread)(); -void (*nrnpy_restore_thread)(void*); - void nrn_notify_freed(PF pf) { if (!f_list) { f_list = new FList; @@ -77,16 +75,67 @@ void nrn_notify_pointer_disconnect(Observer* ob) { if (pdob) { pdob->obremove(ob); } + if (phob) { + phob->obremove(ob); + } MUTUNLOCK } +namespace neuron::container { +/** + * @brief Register that `obs` should be notified when `dh` dies. + * + * In general this should happen less often than before, as data_handle can remain valid + * even when the pointed-to value changes address. + */ +void notify_when_handle_dies(data_handle dh, Observer* obs) { + if (dh.refers_to_a_modern_data_structure()) { + assert(dh); // strange to set up notification-on-death for something that's already dead + MUTLOCK + if (!phob) { + phob = new identifier_observer_bimap{}; + } + phob->insert(dh.identifier(), obs); + MUTUNLOCK + } else { + // The handle is wrapping a raw pointer, fall back to the old code + nrn_notify_when_double_freed(static_cast(dh), obs); + } +} +namespace detail { +/** + * @brief Respond to the news that data_handles relying on `p` are now dead. + * + * The data_handle and generic_data_handle wrappers ultimately hold something like `vector_ptr` + * and `p`, where `p` is basically `std::size_t*`, and yield vector_ptr->at(*p). When the relevant + * value gets deleted, `*p` is set to a sentinel value, then this method is called, and then `p` + * is transferred to a garbage heap. + */ +void notify_handle_dying(non_owning_identifier_without_container p) { + // call Observer::update on everything that was observing `p`, and remove those entries from the + // table + if (!phob) { + return; + } + MUTLOCK + non_owning_identifier_without_container pv; + Observer* ob; + while (phob->find(p, pv, ob)) { + ob->update(nullptr); + phob->remove(pv, ob); + } + MUTUNLOCK +} +} // namespace detail +} // namespace neuron::container + void notify_pointer_freed(void* pt) { if (pvob) { MUTLOCK void* pv; Observer* ob; while (pvob->find(pt, pv, ob)) { - ob->update(NULL); + ob->update(nullptr); pvob->remove(pv, ob); } MUTUNLOCK @@ -124,7 +173,6 @@ char* cxx_char_alloc(size_t sz) { #ifndef MINGW // actual implementation in ivocwin.cpp -void nrniv_bind_thread(void); void nrniv_bind_thread() { hoc_pushx(1.); hoc_ret(); @@ -184,7 +232,6 @@ void ivoc_style(); // because NEURON can no longer maintain its own copy of dialogs.cpp // we communicate with the InterViews version through a callback. extern bool (*IVDialog_setAcceptInput)(bool); -bool setAcceptInputCallback(bool); bool setAcceptInputCallback(bool b) { Oc oc; return oc.setAcceptInput(b); @@ -211,7 +258,7 @@ if (WidgetKit::instance()->style()->find_attribute(gargstr(1)+1, s)) { hoc_pushx(1.); } -#if !defined(MINGW) && !defined(MAC) +#if !defined(MINGW) /*static*/ class ReqErr1: public ReqErr { public: ReqErr1(); @@ -246,10 +293,6 @@ void ReqErr1::Error() { static ReqErr1* reqerr1; #endif -#if MAC -static HandleStdin* hsd_; -#endif - #ifdef MINGW static HandleStdin* hsd_; void winio_key_press() { @@ -275,11 +318,11 @@ Oc::Oc(Session* s, const char* pname, const char** env) { notify_change_ = new Observable(); if (s) { helpmode_ = false; -#if !defined(WIN32) && !defined(MAC) +#if !defined(WIN32) reqerr1 = new ReqErr1; reqerr1->Install(); #endif -#if defined(MINGW) || defined(MAC) +#if defined(MINGW) hsd_ = handleStdin_ = new HandleStdin; #else handleStdin_ = new HandleStdin; @@ -287,9 +330,6 @@ Oc::Oc(Session* s, const char* pname, const char** env) { Dispatcher::instance().link(0, Dispatcher::ExceptMask, handleStdin_); #endif hoc_interviews = 1; -#if MAC - hoc_print_first_instance = 0; -#endif String str; if (session_->style()->find_attribute("first_instance_message", str)) { if (str == "on") { @@ -306,7 +346,7 @@ Oc::Oc(Session* s, const char* pname, const char** env) { Oc::~Oc() { MUTLOCK if (--refcnt_ == 0) { -#if !defined(MINGW) && !defined(MAC) +#if !defined(MINGW) if (reqerr1 && reqerr1->count()) { fprintf(stderr, "total X Errors: %d\n", reqerr1->count()); } @@ -325,8 +365,25 @@ int Oc::run(int argc, const char** argv) { } int Oc::run(const char* buf, bool show_err_mes) { + int hem = hoc_execerror_messages; hoc_execerror_messages = show_err_mes; - return hoc_oc(buf); + int err{}; + try_catch_depth_increment tell_children_we_will_catch{}; + try { + err = hoc_oc(buf); + } catch (std::exception const& e) { + if (show_err_mes) { + std::cerr << "Oc::run: caught exception"; + std::string_view what{e.what()}; + if (!what.empty()) { + std::cerr << ": " << what; + } + std::cerr << std::endl; + } + err = 1; + } + hoc_execerror_messages = hem; + return err; } Symbol* Oc::parseExpr(const char* expr, Symlist** ps) { @@ -355,10 +412,6 @@ void Oc::notify_when_freed(void* p, Observer* ob) { nrn_notify_when_void_freed(p, ob); } -void Oc::notify_when_freed(double* p, Observer* ob) { - nrn_notify_when_double_freed(p, ob); -} - void Oc::notify_pointer_disconnect(Observer* ob) { nrn_notify_pointer_disconnect(ob); } @@ -393,7 +446,7 @@ void ivoc_cleanup() {} int run_til_stdin() { Session* session = Oc::getSession(); -#if defined(WIN32) || MAC +#if defined(WIN32) Oc oc; oc.notify(); #endif @@ -402,18 +455,10 @@ int run_til_stdin() { #endif session->run(); WinDismiss::dismiss_defer(); // in case window was dismissed -#if MAC - extern Boolean IVOCGoodLine; - if (IVOCGoodLine) { - return 1; - } else { - return 0; - } -#endif #ifdef WIN32 return 0; #else - return Oc::getStdinSeen(); // MAC should not reach this point + return Oc::getStdinSeen(); #endif } @@ -423,12 +468,6 @@ void single_event_run() { Event e; // actually run till no more events Oc::setAcceptInput(false); -#if MAC - extern bool read_if_pending(Event&); - while (!session->done() && read_if_pending(e)) { - e.handle(); - } -#else bool dsav = session->done(); session->unquit(); while (session->pending() && !session->done()) { @@ -438,13 +477,8 @@ void single_event_run() { if (dsav) { session->quit(); } -#endif Oc::setAcceptInput(true); - ; HocPanel::keep_updated(); -#if MAC - Session::instance()->screen_update(); -#endif WinDismiss::dismiss_defer(); // in case window was dismissed } diff --git a/src/ivoc/ivoc.h b/src/ivoc/ivoc.h index acfd66d459..c4875b634a 100644 --- a/src/ivoc/ivoc.h +++ b/src/ivoc/ivoc.h @@ -56,7 +56,6 @@ class Oc { void notify_freed(void (*pf)(void*, int)); // register a callback func void notify_when_freed(void* p, Observer*); - void notify_when_freed(double* p, Observer*); void notify_pointer_disconnect(Observer*); static Session* getSession(); diff --git a/src/ivoc/ivocconf.h b/src/ivoc/ivocconf.h old mode 100755 new mode 100644 diff --git a/src/ivoc/ivocmac.cpp b/src/ivoc/ivocmac.cpp index f452b0f835..0ebc0345d4 100644 --- a/src/ivoc/ivocmac.cpp +++ b/src/ivoc/ivocmac.cpp @@ -246,10 +246,6 @@ void Rubberband::rubber_on(Canvas* c) { void Rubberband::rubber_off(Canvas* c) { // c->back_buffer(); SetGWorld(cg_, gd_); -#ifdef MAC - // this prevents failure for all future paints ... I am not sure this is what we want - c->damage_all(); -#endif // printf("Rubberband::rubber_off\n"); } diff --git a/src/ivoc/ivocmain.cpp b/src/ivoc/ivocmain.cpp index 5b0e34419e..315a375eda 100644 --- a/src/ivoc/ivocmain.cpp +++ b/src/ivoc/ivocmain.cpp @@ -1,7 +1,8 @@ #include <../../nrnconf.h> -#include <../nrnpython/nrnpython_config.h> #include "nrn_ansi.h" +#include "../utils/profile/profiler_interface.h" + long hoc_nframe, hoc_nstack; #if !HAVE_IV @@ -32,10 +33,10 @@ void iv_display_scale(float); #include "idraw.h" #include #endif -#include #include "string.h" #include "oc2iv.h" #include "nrnmpi.h" +#include "nrnpy.h" #if defined(IVX11_DYNAM) #include @@ -147,8 +148,7 @@ extern const char* nrn_mech_dll; #if defined(USE_PYTHON) int nrn_nopython; extern int use_python_interpreter; -extern int (*p_nrnpython_start)(int); -char* nrnpy_pyexe; +std::string nrnpy_pyexe; #endif /*****************************************************************************/ @@ -177,21 +177,12 @@ extern double hoc_default_dll_loaded_; extern int hoc_print_first_instance; int nrnpy_nositeflag; -#if !defined(MINGW) && !MAC +#if !defined(MINGW) extern void setneuronhome(const char*) { neuron_home = getenv("NEURONHOME"); } #endif -#if 0 -void penv() { - int i; - for (i=0; environ[i]; ++i) { - printf("%p %s\n", environ[i], environ[i]); - } -} -#endif - #if DARWIN || defined(__linux__) #include "nrnwrap_dlfcn.h" #include @@ -229,37 +220,6 @@ const char* path_prefix_to_libnrniv() { } #endif // DARWIN || defined(__linux__) -#if MAC -#include -#include -extern bool mac_load_dll(const char*); -void mac_open_doc(const char* s) { - // only chdir and load dll on the first opendoc - static bool done = false; - char cs[256]; - strncpy(cs, s, 256); - char* cp = strrchr(cs, ':'); - if (cp && !done) { - *cp = '\0'; - if (chdir(cs) == 0) { - done = true; - printf("current directory is \"%s\"\n", cs); - if (mac_load_dll("nrnmac.dll")) { - hoc_default_dll_loaded_ = 1.; - } - } - } - hoc_xopen1(s, 0); -} -void mac_open_app() { - hoc_xopen1(":lib:hoc:macload.hoc", 0); -} -#endif - -#ifdef MAC -#pragma export on -#endif - int ivocmain(int, const char**, const char**); int ivocmain_session(int, const char**, const char**, int start_session); int (*p_neosim_main)(int, const char**, const char**); @@ -270,7 +230,7 @@ extern int nrn_is_python_extension; extern void hoc_nrnmpi_init(); #if NRNMPI_DYNAMICLOAD extern void nrnmpi_stubs(); -extern std::string nrnmpi_load(int is_python); +extern std::string nrnmpi_load(); #endif // some things are defined in libraries earlier than they are used so... @@ -292,10 +252,6 @@ static bool isdir(const char* p) { } #endif -#ifdef MAC -#pragma export off -#endif - // in case we are running without IV then get some important args this way static bool nrn_optarg_on(const char* opt, int* argc, char** argv); static char* nrn_optarg(const char* opt, int* argc, char** argv); @@ -350,22 +306,12 @@ static int nrn_optargint(const char* opt, int* pargc, const char** argv, int dfl void nrn_InitializeJavaVM(); #endif -#if 0 // for debugging -void prargs(const char* s, int argc, const char** argv) { - int i; - printf("%s argc=%d\n", s, argc); - for (i=0; i < argc; ++i) { - printf(" %d |%s|\n", i, argv[i]); - } -} -#endif - void hoc_nrnmpi_init() { #if NRNMPI if (!nrnmpi_use) { #if NRNMPI_DYNAMICLOAD nrnmpi_stubs(); - auto const pmes = nrnmpi_load(1); + auto const pmes = nrnmpi_load(); if (!pmes.empty()) { std::cout << pmes << std::endl; } @@ -427,6 +373,8 @@ int ivocmain(int argc, const char** argv, const char** env) { * \return 0 on success, otherwise error code. */ int ivocmain_session(int argc, const char** argv, const char** env, int start_session) { + nrn::Instrumentor::init_profile(); + // third arg should not be used as it might become invalid // after putenv or setenv. Instead, if necessary use // #include @@ -546,7 +494,7 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se const char** our_argv = argv; int exit_status = 0; Session* session = NULL; -#if !defined(MINGW) && !defined(MAC) +#if !defined(MINGW) // Gary Holt's first pass at this was: // // Set the NEURONHOME environment variable. This should override any setting @@ -565,14 +513,6 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se #endif // putenv and setenv may invalidate env but we no longer // use it so following should not be needed -#if 0 -#if HAVE_UNISTD_H && !defined(__APPLE__) - env = environ; -#endif -#if defined(__APPLE__) - env = (*_NSGetEnviron()); -#endif -#endif } #else // Not unix: @@ -604,13 +544,6 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se our_argv[1] = ":lib:hoc:macload.hoc"; session = new Session("NEURON", our_argc, our_argv, options, properties); #else -#if MAC - our_argc = 1; - our_argv = new char*[1]; - our_argv[0] = "Neuron"; - session = new Session("NEURON", our_argc, our_argv, options, properties); - SIOUXSettings.asktosaveonclose = false; -#else #if defined(WIN32) IFGUI session = new Session("NEURON", our_argc, (char**) our_argv, options, properties); @@ -626,7 +559,6 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se hoc_usegui = 0; } ENDGUI -#endif #endif auto const nrn_props_size = strlen(neuron_home) + 20; char* nrn_props = new char[nrn_props_size]; @@ -655,13 +587,11 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se #else session->style()->load_file(String(nrn_props), -5); #endif -#if !MAC char* h = getenv("HOME"); if (h) { std::snprintf(nrn_props, nrn_props_size, "%s/%s", h, ".nrn.defaults"); session->style()->load_file(String(nrn_props), -5); } -#endif } delete[] nrn_props; @@ -692,7 +622,7 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se } String str; if (session->style()->find_attribute("pyexe", str)) { - nrnpy_pyexe = strdup(str.string()); + nrnpy_pyexe = str.string(); } } else #endif @@ -700,9 +630,8 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se if (nrn_optarg_on("-nopython", &our_argc, our_argv)) { nrn_nopython = 1; } - const char* buf = nrn_optarg("-pyexe", &our_argc, our_argv); - if (buf) { - nrnpy_pyexe = strdup(buf); + if (const char* buf = nrn_optarg("-pyexe", &our_argc, our_argv)) { + nrnpy_pyexe = buf; } } } @@ -724,8 +653,8 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se nrn_optarg_on("-mpi", &our_argc, our_argv); #if (defined(NRNMECH_DLL_STYLE) || defined(WIN32)) - String str; #if HAVE_IV + String str; if (session) { if (session->style()->find_attribute("nrnmechdll", str)) { nrn_mech_dll = str.string(); @@ -800,11 +729,10 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se if (nrn_is_python_extension) { return 0; } - // printf("p_nrnpython_start = %p\n", p_nrnpython_start); - if (p_nrnpython_start) { - (*p_nrnpython_start)(1); + if (neuron::python::methods.interpreter_start) { + neuron::python::methods.interpreter_start(1); } - if (use_python_interpreter && !p_nrnpython_start) { + if (use_python_interpreter && !neuron::python::methods.interpreter_start) { fprintf(stderr, "Python not available\n"); exit(1); } @@ -833,18 +761,21 @@ int ivocmain_session(int argc, const char** argv, const char** env, int start_se #if defined(USE_PYTHON) if (use_python_interpreter) { // process the .py files and an interactive interpreter - if (p_nrnpython_start && (*p_nrnpython_start)(2) != 0) { + if (neuron::python::methods.interpreter_start && + neuron::python::methods.interpreter_start(2) != 0) { // We encountered an error when processing the -c argument or Python // script given on the commandline. exit_status = 1; } } - if (p_nrnpython_start) { - (*p_nrnpython_start)(0); + if (neuron::python::methods.interpreter_start) { + neuron::python::methods.interpreter_start(0); } #endif hoc_final_exit(); ivoc_final_exit(); + nrn::Instrumentor::finalize_profile(); + return exit_status; } diff --git a/src/ivoc/ivocrand.cpp b/src/ivoc/ivocrand.cpp index 416a6b1a7c..f250049299 100644 --- a/src/ivoc/ivocrand.cpp +++ b/src/ivoc/ivocrand.cpp @@ -4,19 +4,19 @@ #include #include -#include "random1.h" +#include "Rand.hpp" #include #include "classreg.h" #include "oc2iv.h" #include "nrnisaac.h" +#include "utils/enumerate.h" #include #include #include "ocobserv.h" #include -#include #include #include #include @@ -32,6 +32,9 @@ #include #include #include +#include +#include +#include #if HAVE_IV #include "ivoc.h" @@ -44,156 +47,38 @@ extern "C" void nrn_random_play(); class RandomPlay: public Observer, public Resource { public: - RandomPlay(Rand*, double*); - virtual ~RandomPlay(); + RandomPlay(Rand*, neuron::container::data_handle px); + virtual ~RandomPlay() {} void play(); void list_remove(); virtual void update(Observable*); private: Rand* r_; - double* px_; + neuron::container::data_handle px_; }; using RandomPlayList = std::vector; static RandomPlayList* random_play_list_; -#include - -class NrnRandom123: public RNG { - public: - NrnRandom123(uint32_t id1, uint32_t id2, uint32_t id3 = 0); - virtual ~NrnRandom123(); - virtual uint32_t asLong() { - return nrnran123_ipick(s_); - } - virtual double asDouble() { - return nrnran123_dblpick(s_); - } - virtual void reset() { - nrnran123_setseq(s_, 0, 0); - } - nrnran123_State* s_; -}; -NrnRandom123::NrnRandom123(uint32_t id1, uint32_t id2, uint32_t id3) { - s_ = nrnran123_newstream3(id1, id2, id3); -} -NrnRandom123::~NrnRandom123() { - nrnran123_deletestream(s_); -} - - -// The decision that has to be made is whether each generator instance -// should have its own seed or only one seed for all. We choose separate -// seed for each but if arg not present or 0 then seed chosen by system. - -// the addition of ilow > 0 means that value is used for the lowindex -// instead of the mcell_ran4_init global 32 bit lowindex. - -class MCellRan4: public RNG { - public: - MCellRan4(uint32_t ihigh = 0, uint32_t ilow = 0); - virtual ~MCellRan4(); - virtual uint32_t asLong() { - return (uint32_t) (ilow_ == 0 ? mcell_iran4(&ihigh_) : nrnRan4int(&ihigh_, ilow_)); - } - virtual void reset() { - ihigh_ = orig_; - } - virtual double asDouble() { - return (ilow_ == 0 ? mcell_ran4a(&ihigh_) : nrnRan4dbl(&ihigh_, ilow_)); - } - uint32_t ihigh_; - uint32_t orig_; - uint32_t ilow_; - - private: - static uint32_t cnt_; -}; - -MCellRan4::MCellRan4(uint32_t ihigh, uint32_t ilow) { - ++cnt_; - ilow_ = ilow; - ihigh_ = ihigh; - if (ihigh_ == 0) { - ihigh_ = cnt_; - ihigh_ = (uint32_t) asLong(); - } - orig_ = ihigh_; -} -MCellRan4::~MCellRan4() {} - -uint32_t MCellRan4::cnt_ = 0; - -class Isaac64: public RNG { - public: - Isaac64(uint32_t seed = 0); - virtual ~Isaac64(); - virtual uint32_t asLong() { - return (uint32_t) nrnisaac_uint32_pick(rng_); - } - virtual void reset() { - nrnisaac_init(rng_, seed_); - } - virtual double asDouble() { - return nrnisaac_dbl_pick(rng_); - } - uint32_t seed() { - return seed_; - } - void seed(uint32_t s) { - seed_ = s; - reset(); - } - - private: - uint32_t seed_; - void* rng_; - static uint32_t cnt_; -}; - -Isaac64::Isaac64(uint32_t seed) { - if (cnt_ == 0) { - cnt_ = 0xffffffff; - } - --cnt_; - seed_ = seed; - if (seed_ == 0) { - seed_ = cnt_; - } - rng_ = nrnisaac_new(); - reset(); -} -Isaac64::~Isaac64() { - nrnisaac_delete(rng_); -} - -uint32_t Isaac64::cnt_ = 0; - -RandomPlay::RandomPlay(Rand* r, double* px) { - // printf("RandomPlay\n"); - r_ = r; - px_ = px; +RandomPlay::RandomPlay(Rand* r, neuron::container::data_handle px) + : r_{r} + , px_{std::move(px)} { random_play_list_->push_back(this); ref(); - nrn_notify_when_double_freed(px_, this); - nrn_notify_when_void_freed((void*) r->obj_, this); -} -RandomPlay::~RandomPlay() { - // printf("~RandomPlay\n"); + neuron::container::notify_when_handle_dies(px_, this); + nrn_notify_when_void_freed(r->obj_, this); } void RandomPlay::play() { // printf("RandomPlay::play\n"); *px_ = (*(r_->rand))(); } void RandomPlay::list_remove() { - for (auto it = random_play_list_->begin(); it != random_play_list_->end(); ++it) { - if (*it == (RandomPlay*) this) { - // printf("RandomPlay %p removed from list cnt=%d i=%d %p\n", this, cnt, i); - random_play_list_->erase(it); - unref_deferred(); - break; - } + if (auto it = std::find(random_play_list_->begin(), random_play_list_->end(), this); + it != random_play_list_->end()) { + // printf("RandomPlay %p removed from list cnt=%d i=%d %p\n", this, cnt, i); + random_play_list_->erase(it); + unref_deferred(); } } void RandomPlay::update(Observable*) { @@ -202,27 +87,6 @@ void RandomPlay::update(Observable*) { list_remove(); } -Rand::Rand(unsigned long seed, int size, Object* obj) { - // printf("Rand\n"); - gen = new ACG(seed, size); - rand = new Normal(0., 1., gen); - type_ = 0; - obj_ = obj; -} - -Rand::~Rand() { - // printf("~Rand\n"); - delete gen; - delete rand; -} - -// constructor for a random number generator based on the RNG class -// from the gnu c++ class library -// defaults to the ACG generator (see below) - -// syntax: -// a = new Rand([seed],[size]) - static void* r_cons(Object* obj) { unsigned long seed = 0; int size = 55; @@ -342,8 +206,12 @@ static double r_nrnran123(void* r) { id2 = (uint32_t) (chkarg(2, 0., dmaxuint)); if (ifarg(3)) id3 = (uint32_t) (chkarg(3, 0., dmaxuint)); - NrnRandom123* r123 = new NrnRandom123(id1, id2, id3); - x->rand->generator(r123); + try { + NrnRandom123* r123 = new NrnRandom123(id1, id2, id3); + x->rand->generator(r123); + } catch (const std::bad_alloc& e) { + hoc_execerror("Bad allocation for 'NrnRandom123'", e.what()); + } delete x->gen; x->gen = x->rand->generator(); x->type_ = 4; @@ -407,14 +275,22 @@ static double r_Isaac64(void* r) { uint32_t seed1 = 0; - if (ifarg(1)) - seed1 = (uint32_t) (*getarg(1)); - Isaac64* mcr = new Isaac64(seed1); - x->rand->generator(mcr); - delete x->gen; - x->gen = x->rand->generator(); - x->type_ = 3; - return (double) mcr->seed(); + if (ifarg(1)) { + seed1 = static_cast(*getarg(1)); + } + + double seed{}; + try { + Isaac64* mcr = new Isaac64(seed1); + x->rand->generator(mcr); + delete x->gen; + x->gen = x->rand->generator(); + x->type_ = 3; + seed = mcr->seed(); + } catch (const std::bad_alloc& e) { + hoc_execerror("Bad allocation for Isaac64 generator", e.what()); + } + return seed; } // Pick again from the distribution last used @@ -601,7 +477,7 @@ static double r_weibull(void* r) { } static double r_play(void* r) { - new RandomPlay((Rand*) r, hoc_pgetarg(1)); + new RandomPlay(static_cast(r), hoc_hgetarg(1)); return 0.; } diff --git a/src/ivoc/ivocvect.cpp b/src/ivoc/ivocvect.cpp index 43436ecadf..866fab6836 100644 --- a/src/ivoc/ivocvect.cpp +++ b/src/ivoc/ivocvect.cpp @@ -1,6 +1,7 @@ #include <../../nrnconf.h> //#include +#include #include #include #include @@ -26,8 +27,6 @@ //#include #include -#else -#include #endif #if defined(SVR4) @@ -43,9 +42,6 @@ extern void exit(int status); #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - #ifndef PI #ifndef M_PI #define M_PI 3.14159265358979323846 @@ -116,7 +112,7 @@ static double dmaxint_ = 9007199254740992; #include "ivocvect.h" // definition of random numer generator -#include "random1.h" +#include "Rand.hpp" #include #if HAVE_IV @@ -401,6 +397,16 @@ int is_vector_arg(int i) { return 1; } +Object** new_vect(Vect* v, ssize_t delta, ssize_t start, ssize_t step) { + // Creates a new vector of values delta steps from start + std::size_t size{(size_t) delta}; + auto* y = new Vect(size); + for (int i = 0; i < delta; ++i) { + y->elem(i) = v->elem(int(i * step + start)); + } + return y->temp_objvar(); +} + int vector_arg_px(int i, double** px) { Vect* x = vector_arg(i); *px = x->data(); @@ -977,18 +983,27 @@ static Object** v_plot(void* v) { // passed a vector Vect* vp2 = vector_arg(2); n = std::min(n, vp2->size()); - for (i = 0; i < n; ++i) - gv->add(vp2->elem(i), y + i); + for (i = 0; i < n; ++i) { + gv->add(vp2->elem(i), + neuron::container::data_handle{neuron::container::do_not_search, + y + i}); + } } else { // passed xinterval double interval = *getarg(2); - for (i = 0; i < n; ++i) - gv->add(i * interval, y + i); + for (i = 0; i < n; ++i) { + gv->add(i * interval, + neuron::container::data_handle{neuron::container::do_not_search, + y + i}); + } } } else { // passed line attributes or nothing - for (i = 0; i < n; ++i) - gv->add(i, y + i); + for (i = 0; i < n; ++i) { + gv->add(i, + neuron::container::data_handle{neuron::container::do_not_search, + y + i}); + } } if (vp->label_) { @@ -1559,7 +1574,6 @@ static Object** v_copy(void* v) { return y->temp_objvar(); } - static Object** v_at(void* v) { auto* x = static_cast(v); std::size_t start{}; @@ -1570,15 +1584,12 @@ static Object** v_at(void* v) { if (ifarg(2)) { end = chkarg(2, start, x->size() - 1) + 1.0; } - std::size_t size{end - start}; - auto* y = new Vect(size); - // ZFM: fixed bug -- ielem(i) = x->elem(i + start); - } - return y->temp_objvar(); + // Creation of a new vector has been moved to new_vect to allow slicing + ssize_t delta = end - start; + return new_vect(x, delta, start, 1); } + typedef struct { double x; int i; @@ -1673,7 +1684,6 @@ static Object** v_interpolate(void* v) { if (flag) { delete ys; } - return yd->temp_objvar(); } @@ -2358,7 +2368,11 @@ static Object** v_mul(void* v1) { static Object** v_div(void* v1) { Vect* x = (Vect*) v1; if (hoc_argtype(1) == NUMBER) { - std::for_each(x->begin(), x->end(), [](double& d) { d /= *getarg(1); }); + if (*getarg(1) == 0.0) { + hoc_execerror("Vector", "Division by zero"); + } else { + std::for_each(x->begin(), x->end(), [](double& d) { d /= *getarg(1); }); + } } if (hoc_is_object_arg(1)) { Vect* y = vector_arg(1); @@ -3862,198 +3876,9 @@ void Vector_reg() { #endif } -// hacked version of gsort from ../gnu/d_vec.cpp -// the transformation is that everything that used to be a double* becomes -// an int* and cmp(*arg1, *arg2) becomes cmp(vec[*arg1], vec[*arg2]) -// I am not sure what to do about the BYTES_PER_WORD - -// An adaptation of Schmidt's new quicksort - -static inline void SWAP(int* A, int* B) { - int tmp = *A; - *A = *B; - *B = tmp; -} - -/* This should be replaced by a standard ANSI macro. */ -#define BYTES_PER_WORD 8 -#define BYTES_PER_LONG 4 - -/* The next 4 #defines implement a very fast in-line stack abstraction. */ - -#define STACK_SIZE (BYTES_PER_WORD * BYTES_PER_LONG) -#define PUSH(LOW, HIGH) \ - do { \ - top->lo = LOW; \ - top++->hi = HIGH; \ - } while (0) -#define POP(LOW, HIGH) \ - do { \ - LOW = (--top)->lo; \ - HIGH = top->hi; \ - } while (0) -#define STACK_NOT_EMPTY (stack < top) - -/* Discontinue quicksort algorithm when partition gets below this size. - This particular magic number was chosen to work best on a Sun 4/260. */ -#define MAX_THRESH 4 - - -/* Order size using quicksort. This implementation incorporates - four optimizations discussed in Sedgewick: - - 1. Non-recursive, using an explicit stack of pointer that - store the next array partition to sort. To save time, this - maximum amount of space required to store an array of - MAX_INT is allocated on the stack. Assuming a 32-bit integer, - this needs only 32 * sizeof (stack_node) == 136 bits. Pretty - cheap, actually. - - 2. Chose the pivot element using a median-of-three decision tree. - This reduces the probability of selecting a bad pivot value and - eliminates certain extraneous comparisons. - - 3. Only quicksorts TOTAL_ELEMS / MAX_THRESH partitions, leaving - insertion sort to order the MAX_THRESH items within each partition. - This is a big win, since insertion sort is faster for small, mostly - sorted array segements. - - 4. The larger of the two sub-partitions is always pushed onto the - stack first, with the algorithm then concentrating on the - smaller partition. This *guarantees* no more than log (n) - stack size is needed! */ - int nrn_mlh_gsort(double* vec, int* base_ptr, int total_elems, int (*cmp)(double, double)) { - /* Stack node declarations used to store unfulfilled partition obligations. */ - struct stack_node { - int* lo; - int* hi; - }; - int pivot_buffer; - int max_thresh = MAX_THRESH; - - if (total_elems > MAX_THRESH) { - int* lo = base_ptr; - int* hi = lo + (total_elems - 1); - int* left_ptr; - int* right_ptr; - stack_node stack[STACK_SIZE]; /* Largest size needed for 32-bit int!!! */ - stack_node* top = stack + 1; - - while (STACK_NOT_EMPTY) { - { - int* pivot = &pivot_buffer; - { - /* Select median value from among LO, MID, and HI. Rearrange - LO and HI so the three values are sorted. This lowers the - probability of picking a pathological pivot value and - skips a comparison for both the LEFT_PTR and RIGHT_PTR. */ - - int* mid = lo + ((hi - lo) >> 1); - - if (cmp(vec[*mid], vec[*lo]) < 0) - SWAP(mid, lo); - if (cmp(vec[*hi], vec[*mid]) < 0) { - SWAP(mid, hi); - if (cmp(vec[*mid], vec[*lo]) < 0) - SWAP(mid, lo); - } - *pivot = *mid; - pivot = &pivot_buffer; - } - left_ptr = lo + 1; - right_ptr = hi - 1; - - /* Here's the famous ``collapse the walls'' section of quicksort. - Gotta like those tight inner loops! They are the main reason - that this algorithm runs much faster than others. */ - do { - while (cmp(vec[*left_ptr], vec[*pivot]) < 0) - left_ptr += 1; - - while (cmp(vec[*pivot], vec[*right_ptr]) < 0) - right_ptr -= 1; - - if (left_ptr < right_ptr) { - SWAP(left_ptr, right_ptr); - left_ptr += 1; - right_ptr -= 1; - } else if (left_ptr == right_ptr) { - left_ptr += 1; - right_ptr -= 1; - break; - } - } while (left_ptr <= right_ptr); - } - - /* Set up pointers for next iteration. First determine whether - left and right partitions are below the threshold size. If so, - ignore one or both. Otherwise, push the larger partition's - bounds on the stack and continue sorting the smaller one. */ - - if ((right_ptr - lo) <= max_thresh) { - if ((hi - left_ptr) <= max_thresh) /* Ignore both small partitions. */ - POP(lo, hi); - else /* Ignore small left partition. */ - lo = left_ptr; - } else if ((hi - left_ptr) <= max_thresh) /* Ignore small right partition. */ - hi = right_ptr; - else if ((right_ptr - lo) > (hi - left_ptr)) /* Push larger left partition indices. */ - { - PUSH(lo, right_ptr); - lo = left_ptr; - } else /* Push larger right partition indices. */ - { - PUSH(left_ptr, hi); - hi = right_ptr; - } - } - } - - /* Once the BASE_PTR array is partially sorted by quicksort the rest - is completely sorted using insertion sort, since this is efficient - for partitions below MAX_THRESH size. BASE_PTR points to the beginning - of the array to sort, and END_PTR points at the very last element in - the array (*not* one beyond it!). */ - - - { - int* end_ptr = base_ptr + 1 * (total_elems - 1); - int* run_ptr; - int* tmp_ptr = base_ptr; - int* thresh = (end_ptr < (base_ptr + max_thresh)) ? end_ptr : (base_ptr + max_thresh); - - /* Find smallest element in first threshold and place it at the - array's beginning. This is the smallest array element, - and the operation speeds up insertion sort's inner loop. */ - - for (run_ptr = tmp_ptr + 1; run_ptr <= thresh; run_ptr += 1) - if (cmp(vec[*run_ptr], vec[*tmp_ptr]) < 0) - tmp_ptr = run_ptr; - - if (tmp_ptr != base_ptr) - SWAP(tmp_ptr, base_ptr); - - /* Insertion sort, running from left-hand-side up to `right-hand-side.' - Pretty much straight out of the original GNU qsort routine. */ - - for (run_ptr = base_ptr + 1; (tmp_ptr = run_ptr += 1) <= end_ptr;) { - while (cmp(vec[*run_ptr], vec[*(tmp_ptr -= 1)]) < 0) - ; - - if ((tmp_ptr += 1) != run_ptr) { - int* trav; - - for (trav = run_ptr + 1; --trav >= run_ptr;) { - int c = *trav; - int *hi, *lo; - - for (hi = lo = trav; (lo -= 1) >= tmp_ptr; hi = lo) - *hi = *lo; - *hi = c; - } - } - } - } + std::sort(base_ptr, base_ptr + total_elems, [&](int a, int b) { + return cmp(vec[a], vec[b]) < 0; + }); return 1; } diff --git a/src/ivoc/ivocvect.h b/src/ivoc/ivocvect.h index 810c18f005..418ec3789b 100644 --- a/src/ivoc/ivocvect.h +++ b/src/ivoc/ivocvect.h @@ -114,6 +114,8 @@ extern void vector_delete(IvocVect*); extern Object** vector_temp_objvar(IvocVect*); extern int is_vector_arg(int); +extern Object** new_vect(IvocVect* v, ssize_t delta, ssize_t start, ssize_t step); + extern char* vector_get_label(IvocVect*); extern void vector_set_label(IvocVect*, char*); diff --git a/src/ivoc/ivocwin.cpp b/src/ivoc/ivocwin.cpp index 232b927e83..7953646743 100644 --- a/src/ivoc/ivocwin.cpp +++ b/src/ivoc/ivocwin.cpp @@ -215,9 +215,6 @@ extern int (*iv_bind_enqueue_)(void (*)(void*), void* w); extern void iv_bind_call(void* w, int type); extern void nrnpy_setwindowtext(void*); -extern void* (*nrnpy_save_thread)(); -extern void (*nrnpy_restore_thread)(void*); - static void* w_; static void (*nrn_gui_exec_)(void*); @@ -245,14 +242,14 @@ int iv_bind_enqueue(void (*cb)(void*), void* w) { void nrn_gui_exec(void (*cb)(void*), void* v) { assert(GetCurrentThreadId() != bind_tid_); // wait for the gui thread to handle the operation - void* gs = (*nrnpy_save_thread)(); + auto* const gs = neuron::python::methods.save_thread(); { std::unique_lock lock{mut_}; w_ = v; nrn_gui_exec_ = cb; cond_->wait(lock, [] { return !w_; }); } - (*nrnpy_restore_thread)(gs); + neuron::python::methods.restore_thread(gs); } void nrniv_bind_call() { diff --git a/src/ivoc/matrix.cpp b/src/ivoc/matrix.cpp index ee65d49fdd..3c66406403 100644 --- a/src/ivoc/matrix.cpp +++ b/src/ivoc/matrix.cpp @@ -16,17 +16,6 @@ extern int hoc_return_type_code; extern double hoc_scan(FILE*); extern Object** hoc_temp_objptr(Object*); -#if 0 - extern void install_matrix_method(const char* name, double (*)(...)); - extern void* matrix_arg(int); - extern double* matrix_pelm(void*, int i, int j); - extern int matrix_nrow(void*); - extern int matrix_ncol(void*); - extern int matrix_type(void*); // FULL 1, SPARSE 2, BAND 3 - extern MAT* matrix_full(void*); // hoc_execerror if void* not right type - extern SPMAT* matrix_sparse(void*); -#endif - static void check_domain(int i, int j) { if (i > j || i < 0) { auto const tmp = "index=" + std::to_string(i) + " max_index=" + std::to_string(j) + "\n"; @@ -48,14 +37,13 @@ Matrix* matrix_arg(int i) { return (Matrix*) (ob->u.this_pointer); } -Object** Matrix::temp_objvar() { - Matrix* m = (Matrix*) this; +static Object** temp_objvar(Matrix* m) { Object** po; if (m->obj_) { po = hoc_temp_objptr(m->obj_); } else { po = hoc_temp_objvar(nrn_matrix_sym, (void*) m); - obj_ = *po; + m->obj_ = *po; } return po; } @@ -189,7 +177,7 @@ static double m_scanf(void* v) { static Object** m_resize(void* v) { Matrix* m = (Matrix*) v; m->resize((int) (chkarg(1, 1., 1e9) + EPS), (int) (chkarg(2, 1., 1e9) + EPS)); - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_mulv(void* v) { @@ -265,7 +253,7 @@ static Object** m_add(void* v) { out = matrix_arg(2); } m->add(matrix_arg(1), out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_bcopy(void* v) { @@ -287,7 +275,7 @@ static Object** m_bcopy(void* v) { } out = get_out_mat(m, m0, n0, i); m->bcopy(out, i0, j0, m0, n0, i1, j1); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_mulm(void* v) { @@ -305,14 +293,14 @@ static Object** m_mulm(void* v) { out->resize(m->nrow(), in->ncol()); check_domain(m->ncol(), in->nrow()); m->mulm(in, out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_c(void* v) { Matrix* m = (Matrix*) v; Matrix* out = get_out_mat(m, 1); m->copy(out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_transpose(void* v) { @@ -320,7 +308,7 @@ static Object** m_transpose(void* v) { Matrix* out = get_out_mat(m, 1); out->resize(m->ncol(), m->nrow()); m->transpose(out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_symmeig(void* v) { @@ -370,12 +358,8 @@ static Object** m_muls(void* v) { if (ifarg(2)) { out = matrix_arg(2); } - // I believe meschach does this for us - // if (out != m) { - // out->resize(... - // } m->muls(*getarg(1), out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_getrow(void* v) { @@ -444,7 +428,7 @@ static Object** m_setrow(void* v) { #endif m->setrow(k, in); } - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_setcol(void* v) { @@ -461,7 +445,7 @@ static Object** m_setcol(void* v) { #endif m->setcol(k, in); } - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_setdiag(void* v) { @@ -478,7 +462,7 @@ static Object** m_setdiag(void* v) { #endif m->setdiag(k, in); } - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_getdiag(void* v) { @@ -510,20 +494,20 @@ static Object** m_getdiag(void* v) { static Object** m_zero(void* v) { Matrix* m = (Matrix*) v; m->zero(); - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_ident(void* v) { Matrix* m = (Matrix*) v; m->ident(); - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_exp(void* v) { Matrix* m = (Matrix*) v; Matrix* out = get_out_mat(m, 1, "exponentiation"); m->exp(out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_pow(void* v) { @@ -531,14 +515,14 @@ static Object** m_pow(void* v) { int k = (int) chkarg(1, 0., 100.); Matrix* out = get_out_mat(m, 2, "raising to a power"); m->pow(k, out); - return out->temp_objvar(); + return temp_objvar(out); } static Object** m_inverse(void* v) { Matrix* m = (Matrix*) v; Matrix* out = get_out_mat(m, 1); m->inverse(out); - return out->temp_objvar(); + return temp_objvar(out); } static double m_det(void* v) { @@ -619,7 +603,7 @@ static Object** m_set(void* v) { *(m->mep(i, j)) = *getarg(++k); } } - return m->temp_objvar(); + return temp_objvar(m); } static Object** m_to_vector(void* v) { @@ -657,7 +641,7 @@ static Object** m_from_vector(void* v) { for (i = 0; i < nrow; ++i) { *(m->mep(i, j)) = ve[k++]; } - return m->temp_objvar(); + return temp_objvar(m); } static Member_func m_members[] = { diff --git a/src/ivoc/mlinedit.cpp b/src/ivoc/mlinedit.cpp index 3c47f87aac..52e289761c 100644 --- a/src/ivoc/mlinedit.cpp +++ b/src/ivoc/mlinedit.cpp @@ -22,10 +22,6 @@ extern int hoc_return_type_code; #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); -extern char** (*nrnpy_gui_helper3_str_)(const char* name, Object* obj, int handle_strptr); - #if HAVE_IV class OcText: public Text { public: diff --git a/src/ivoc/mymath.h b/src/ivoc/mymath.h index e60e5e3053..a9bfb8d040 100644 --- a/src/ivoc/mymath.h +++ b/src/ivoc/mymath.h @@ -70,7 +70,7 @@ class MyMath { }; inline void MyMath::extend(Extension& e, const Extension& x) { - e.set_xy(NULL, + e.set_xy(nullptr, e.left() + x.left(), e.bottom() + x.bottom(), e.right() + x.right(), @@ -78,7 +78,7 @@ inline void MyMath::extend(Extension& e, const Extension& x) { } inline void MyMath::extend(Extension& e, Coord x) { - e.set_xy(NULL, e.left() - x, e.bottom() - x, e.right() + x, e.top() + x); + e.set_xy(nullptr, e.left() - x, e.bottom() - x, e.right() + x, e.top() + x); } inline void MyMath::minmax(Coord& x, Coord& y) { diff --git a/src/ivoc/nrnmain.cpp b/src/ivoc/nrnmain.cpp index 5b5b7ee5d6..153e9d4559 100644 --- a/src/ivoc/nrnmain.cpp +++ b/src/ivoc/nrnmain.cpp @@ -11,7 +11,7 @@ extern int nrn_main_launch; extern int nrn_noauto_dlopen_nrnmech; #if NRNMPI_DYNAMICLOAD void nrnmpi_stubs(); -void nrnmpi_load_or_exit(bool is_python); +void nrnmpi_load_or_exit(); #if NRN_MUSIC void nrnmusic_load(); #endif // NRN_MUSIC @@ -27,19 +27,13 @@ int main(int argc, char** argv, char** env) { nrn_noauto_dlopen_nrnmech = 1; #endif -#if 0 -printf("argc=%d\n", argc); -for (int i=0; i < argc; ++i) { -printf("argv[%d]=|%s|\n", i, argv[i]); -} -#endif #if NRNMPI #if NRNMPI_DYNAMICLOAD nrnmpi_stubs(); bool mpi_loaded = false; for (int i = 0; i < argc; ++i) { if (strcmp("-mpi", argv[i]) == 0) { - nrnmpi_load_or_exit(false); + nrnmpi_load_or_exit(); mpi_loaded = true; break; } @@ -65,7 +59,7 @@ printf("argv[%d]=|%s|\n", i, argv[i]); } if (load_music) { if (!mpi_loaded) { - nrnmpi_load_or_exit(false); + nrnmpi_load_or_exit(); } nrnmusic_load(); } @@ -80,42 +74,3 @@ printf("argv[%d]=|%s|\n", i, argv[i]); #if USENCS void nrn2ncs_outputevent(int, double) {} #endif - -// moving following to src/oc/ockludge.cpp since on -// Darwin Kernel Version 8.9.1 on apple i686 (and the newest config.guess -// thinks it is a i386, but that is a different story) -// including mpi.h gives some errors like: -// /Users/hines/mpich2-1.0.5p4/instl/include/mpicxx.h:26:2: error: #error -// SEEK_SET is #defined but must not be for the C++ binding of MPI" - -#if 0 && NRNMPI && DARWIN -// For DARWIN I do not really know the proper way to avoid -// dyld: lazy symbol binding failed: Symbol not found: _MPI_Init -// when the MPI functions are all used in the libnrnmpi.dylib -// but the libmpi.a is statically linked. Therefore I am forcing -// the linking here by listing all the MPI functions being used. -#include -static void work_around() { - MPI_Comm c = MPI_COMM_WORLD; - MPI_Init(0, 0); - MPI_Comm_rank(c, 0); - MPI_Comm_size(c, 0); - MPI_Wtime(); - MPI_Finalize(); - MPI_Unpack(0, 0, 0, 0, 0, 0, c); - MPI_Pack(0, 0, 0, 0, 0, 0, c); - MPI_Pack_size(0, 0, c, 0); - MPI_Send(0,0,0,0,0,c); - MPI_Probe(0, 0, c, 0); - MPI_Get_count(0, 0, 0); - MPI_Recv(0,0,0,0,0,c,0); - MPI_Sendrecv(0,0,0,0,0,0,0,0,0,0,c,0); - MPI_Iprobe(0,0,c,0,0); - MPI_Get_address(0,0); - MPI_Type_create_struct(0,0,0,0,0); - MPI_Type_commit(0); - MPI_Allgather(0,0,0,0,0,0,c); - MPI_Allgatherv(0,0,0,0,0,0,0,c); - MPI_Allreduce(0,0,0,0,0,c); -} -#endif diff --git a/src/ivoc/nrnmutdec.h b/src/ivoc/nrnmutdec.h index cce25e65ff..77ea448375 100644 --- a/src/ivoc/nrnmutdec.h +++ b/src/ivoc/nrnmutdec.h @@ -4,15 +4,15 @@ #include #include -#define MUTDEC std::unique_ptr mut_; +#define MUTDEC std::unique_ptr mut_; #define MUTCONSTRUCTED static_cast(mut_) -#define MUTCONSTRUCT(mkmut) \ - { \ - if (mkmut) { \ - mut_ = std::make_unique(); \ - } else { \ - mut_.reset(); \ - } \ +#define MUTCONSTRUCT(mkmut) \ + { \ + if (mkmut) { \ + mut_ = std::make_unique(); \ + } else { \ + mut_.reset(); \ + } \ } #define MUTDESTRUCT mut_.reset(); #define MUTLOCK \ diff --git a/src/ivoc/nrnsymdiritem.h b/src/ivoc/nrnsymdiritem.h index a5f7424235..bb7ab96871 100644 --- a/src/ivoc/nrnsymdiritem.h +++ b/src/ivoc/nrnsymdiritem.h @@ -3,9 +3,6 @@ // allow communication between src/ivoc/symdir.cpp and src/nrniv/pysecname.cpp -#include -#include - class SymbolItem { public: SymbolItem(const char*, int whole_array = 0); @@ -19,7 +16,7 @@ class SymbolItem { return ob_; } void no_object(); - const String& name() const { + const std::string& name() const { return name_; } bool is_directory() const; @@ -30,15 +27,13 @@ class SymbolItem { int pysec_type_; /* PYSECOBJ (cell prefix) or PYSECNAME (Section) */ void* pysec_; /* Name2Section* or Section* */ private: - CopyString name_; + std::string name_; Symbol* symbol_; int index_; Object* ob_; int whole_array_; }; -declarePtrList(SymbolList, SymbolItem); - -void nrn_symdir_load_pysec(SymbolList& sl, void*); +void nrn_symdir_load_pysec(std::vector& sl, void*); #endif diff --git a/src/ivoc/objcmd.cpp b/src/ivoc/objcmd.cpp index 1f7772b3fb..9b30de6fa1 100644 --- a/src/ivoc/objcmd.cpp +++ b/src/ivoc/objcmd.cpp @@ -10,14 +10,11 @@ #include #include #include +#include "nrnpy.h" #include "objcmd.h" #include "oc2iv.h" extern Object* hoc_thisobject; -int (*nrnpy_hoccommand_exec)(Object*); -int (*nrnpy_hoccommand_exec_strret)(Object*, char*, int); -void (*nrnpy_cmdtool)(Object*, int type, double x, double y, int kd); -double (*nrnpy_func_call)(Object*, int, int*); HocCommand::HocCommand(const char* cmd) { init(cmd, hoc_thisobject); @@ -34,12 +31,11 @@ HocCommand::HocCommand(Object* pobj) { } po_ = pobj; hoc_obj_ref(po_); - s_ = NULL; obj_ = NULL; } void HocCommand::init(const char* cmd, Object* obj) { - s_ = new CopyString(cmd); + s_ = std::make_unique(cmd); obj_ = obj; po_ = NULL; if (obj_) { @@ -49,17 +45,13 @@ void HocCommand::init(const char* cmd, Object* obj) { void HocCommand::update(Observable*) { // obj_ has been freed obj_ = NULL; - delete s_; - s_ = new CopyString(""); + s_ = std::make_unique(""); } HocCommand::~HocCommand() { if (obj_) { nrn_notify_pointer_disconnect(this); } - if (s_) { - delete s_; - } if (po_) { hoc_obj_unref(po_); } @@ -69,9 +61,9 @@ void HocCommand::help() { #if HAVE_IV char buf[200]; if (obj_) { - Sprintf(buf, "%s %s", s_->string(), obj_->ctemplate->sym->name); + Sprintf(buf, "%s %s", s_->c_str(), obj_->ctemplate->sym->name); } else { - Sprintf(buf, "%s", s_->string()); + Sprintf(buf, "%s", s_->c_str()); } Oc::help(buf); #endif @@ -80,7 +72,7 @@ void HocCommand::help() { const char* ccc = "PythonObject"; const char* HocCommand::name() { if (po_ == NULL) { - return s_->string(); + return s_->c_str(); } else { return ccc; } @@ -102,14 +94,14 @@ void HocCommand::audit() { int HocCommand::execute(bool notify) { int err; if (po_) { - assert(nrnpy_hoccommand_exec); - err = (*nrnpy_hoccommand_exec)(po_); + assert(neuron::python::methods.hoccommand_exec); + err = neuron::python::methods.hoccommand_exec(po_); } else { if (!s_) { return 0; } char buf[256]; - Sprintf(buf, "{%s}\n", s_->string()); + Sprintf(buf, "{%s}\n", s_->c_str()); err = hoc_obj_run(buf, obj_); } #if HAVE_IV @@ -122,7 +114,7 @@ int HocCommand::execute(bool notify) { } int HocCommand::exec_strret(char* buf, int size, bool notify) { assert(po_); - int err = (*nrnpy_hoccommand_exec_strret)(po_, buf, size); + int err = neuron::python::methods.hoccommand_exec_strret(po_, buf, size); #if HAVE_IV if (notify) { Oc oc; @@ -147,8 +139,8 @@ int HocCommand::execute(const char* s, bool notify) { double HocCommand::func_call(int narg, int* perr) { if (po_) { - if (nrnpy_func_call) { - return (*nrnpy_func_call)(po_, narg, perr); + if (neuron::python::methods.call_func) { + return neuron::python::methods.call_func(po_, narg, perr); } *perr = 1; return 0.0; @@ -219,7 +211,7 @@ bool HocCommandTool::event(Event& e) { #endif } if (hc_->pyobject()) { - (*nrnpy_cmdtool)(hc_->pyobject(), e.type(), x, y, kd); + neuron::python::methods.cmdtool(hc_->pyobject(), e.type(), x, y, kd); Oc oc; oc.notify(); } else { diff --git a/src/ivoc/objcmd.h b/src/ivoc/objcmd.h index b3bfd1985e..bb54aefa51 100644 --- a/src/ivoc/objcmd.h +++ b/src/ivoc/objcmd.h @@ -1,7 +1,8 @@ #ifndef objcmd_h #define objcmd_h -#include +#include + #include #if HAVE_IV #include @@ -39,7 +40,7 @@ class HocCommand: public Observer { private: Object* obj_; - CopyString* s_; + std::unique_ptr s_{}; Object* po_; }; diff --git a/src/ivoc/oc2iv.h b/src/ivoc/oc2iv.h index dd15ccfbe6..d75483a7b4 100644 --- a/src/ivoc/oc2iv.h +++ b/src/ivoc/oc2iv.h @@ -20,7 +20,10 @@ extern void hoc_ivfixedvalue(CChar* name, bool deflt = false, bool usepointer = false); extern void hoc_ivvalue_keep_updated(CChar* name, CChar* variable, Object* pyvar = 0); -extern void hoc_ivpvalue(CChar* name, double*, bool deflt = false, HocSymExtension* extra = NULL); +void hoc_ivpvalue(CChar* name, + neuron::container::data_handle, + bool deflt = false, + HocSymExtension* extra = NULL); extern void hoc_ivvaluerun(CChar* name, CChar* variable, CChar* action, @@ -29,32 +32,32 @@ extern void hoc_ivvaluerun(CChar* name, bool usepointer = false, Object* pyvar = 0, Object* pyact = 0); -extern void hoc_ivvaluerun_ex(CChar* name, - CChar* var, - double* pvar, - Object* pyvar, - CChar* action, - Object* pyact, - bool deflt = false, - bool canrun = false, - bool usepointer = false, - HocSymExtension* extra = NULL); -extern void hoc_ivpvaluerun(CChar* name, - double*, - CChar* action, - bool deflt = false, - bool canrun = false, - HocSymExtension* extra = NULL); +void hoc_ivvaluerun_ex(CChar* name, + CChar* var, + neuron::container::data_handle pvar, + Object* pyvar, + CChar* action, + Object* pyact, + bool deflt = false, + bool canrun = false, + bool usepointer = false, + HocSymExtension* extra = NULL); +void hoc_ivpvaluerun(CChar* name, + neuron::container::data_handle, + CChar* action, + bool deflt = false, + bool canrun = false, + HocSymExtension* extra = NULL); extern void hoc_ivlabel(CChar*); extern void hoc_ivvarlabel(char**, Object* pyvar = 0); -extern void hoc_ivstatebutton(double*, +extern void hoc_ivstatebutton(neuron::container::data_handle, CChar* name, CChar* action, int style, Object* pyvar = 0, Object* pyact = 0); -extern void hoc_ivslider(double*, +extern void hoc_ivslider(neuron::container::data_handle, float low = 0, float high = 100, float resolution = 1, diff --git a/src/ivoc/ocbox.cpp b/src/ivoc/ocbox.cpp index c776610ff8..5d21d49114 100644 --- a/src/ivoc/ocbox.cpp +++ b/src/ivoc/ocbox.cpp @@ -25,9 +25,6 @@ extern int hoc_return_type_code; -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - #if HAVE_IV class NrnFixedLayout: public Layout { @@ -777,13 +774,8 @@ void OcBox::save(std::ostream& o) { #endif window()->save_left(), window()->save_bottom(), -#if MAC - window()->canvas()->width(), - window()->canvas()->height()); -#else window()->width(), window()->height()); -#endif o << buf << std::endl; } else { o << "ocbox_.map()\n}" << std::endl; diff --git a/src/ivoc/ocdeck.cpp b/src/ivoc/ocdeck.cpp index 3673ae6643..3df06db1fe 100644 --- a/src/ivoc/ocdeck.cpp +++ b/src/ivoc/ocdeck.cpp @@ -15,9 +15,6 @@ #include "classreg.h" #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - #if HAVE_IV class SpecialPatch: public Patch { public: diff --git a/src/ivoc/ocfile.cpp b/src/ivoc/ocfile.cpp index 0a8bd28b9a..61216c9694 100644 --- a/src/ivoc/ocfile.cpp +++ b/src/ivoc/ocfile.cpp @@ -2,7 +2,7 @@ #include #include -#if MAC || defined(HAVE_UNISTD_H) +#if defined(HAVE_UNISTD_H) #include #endif @@ -36,9 +36,6 @@ extern int hoc_return_type_code; #endif #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - static Symbol* file_class_sym_; extern char* ivoc_get_temp_file(); @@ -314,7 +311,7 @@ void OcFile::close() { } void OcFile::set_name(const char* s) { close(); - if (s != filename_.string()) { + if (s != filename_.c_str()) { filename_ = s; } } @@ -328,14 +325,7 @@ void OcFile::binary_mode() { Use File.seek(0) after opening or use a binary style read/write as first\n\ access to file."); } -#if defined(__MWERKS__) - // printf("can't switch to binary mode. No setmode\n"); - mode_[1] = 'b'; - mode_[2] = '\0'; - file_ = freopen(filename_.string(), mode_, file()); -#else setmode(fileno(file()), O_BINARY); -#endif binary_ = true; } } @@ -348,20 +338,6 @@ bool OcFile::open(const char* name, const char* type) { strcpy(mode_, type); #endif file_ = fopen(expand_env_var(name), type); -#if defined(FILE_OPEN_RETRY) && FILE_OPEN_RETRY > 0 - int i; - for (i = 0; !file_ && i < FILE_OPEN_RETRY; ++i) { - // retry occasionally needed on BlueGene - file_ = fopen(expand_env_var(name), type); - } - if (i > 0) { - if (file_) { - printf("%d opened %s after %d retries\n", nrnmpi_myid_world, name, i); - } else { - printf("%d open %s failed after %d retries\n", nrnmpi_myid_world, name, i); - } - } -#endif return is_open(); } @@ -474,13 +450,13 @@ void OcFile::file_chooser_style(const char* type, const char* OcFile::dir() { #if HAVE_IV if (fc_) { - dirname_ = *fc_->dir(); + dirname_ = *fc_->dir()->string(); } else #endif { dirname_ = ""; } - return dirname_.string(); + return dirname_.c_str(); } bool OcFile::file_chooser_popup() { diff --git a/src/ivoc/ocfile.h b/src/ivoc/ocfile.h index f7269adcbb..046fe083fc 100644 --- a/src/ivoc/ocfile.h +++ b/src/ivoc/ocfile.h @@ -1,7 +1,6 @@ #ifndef ocfile_h #define ocfile_h -#include #include class File; class FileChooser; @@ -13,7 +12,7 @@ class OcFile { bool open(const char* filename, const char* type); void set_name(const char* s); const char* get_name() { - return filename_.string(); + return filename_.c_str(); } const char* dir(); void close(); @@ -48,8 +47,8 @@ class OcFile { #if HAVE_IV int chooser_type_; #endif - CopyString filename_; - CopyString dirname_; + std::string filename_; + std::string dirname_; FILE* file_; #ifdef WIN32 bool binary_; diff --git a/src/ivoc/ochelp.cpp b/src/ivoc/ochelp.cpp index fa9b5d540e..33c6c6974e 100644 --- a/src/ivoc/ochelp.cpp +++ b/src/ivoc/ochelp.cpp @@ -15,10 +15,6 @@ static FILE* help_pipe; -#if MAC -#define WIN32 1 -#endif - #if defined(WIN32) && !defined(MINGW) #include "nrnbbs.h" #endif @@ -30,55 +26,13 @@ static FILE* help_pipe; extern const char* hoc_current_xopen(); -declareList(CopyStringList, CopyString) -implementList(CopyStringList, CopyString) - -static CopyStringList* filequeue; - void ivoc_help(const char* s) { -#if 1 // printf("online help not currently working\n"); return; -#else - char buf[256]; - strncpy(buf, s + 4, 256); - char* p; - for (p = buf; *p; ++p) { // eliminate trailing newline - if (*p == '\n') { - *p = '\0'; - break; - } - } - for (p = buf; *p; ++p) { // start at first character - if (!isspace(*p)) { - break; - } - } - // queue up the help files if haven't invoked help - if (!help_pipe) { - if (!filequeue) { - filequeue = new CopyStringList(); - } - if (strncmp(p, "?0", 2) == 0) { - Sprintf(buf, "?0 %s", hoc_current_xopen()); - String str(buf); - filequeue->append(str); - return; - } else if (strncmp(p, "?1", 2) == 0) { - filequeue->append(p); - return; - } - } - if (*p) { - Oc::help(p); - } else { - Oc::help("Help_root"); - } -#endif } static void readmore() { -#if !defined(WIN32) && !defined(MAC) +#if !defined(WIN32) char buf[1024]; char* cmd = "ls $NEURONHOME/doc/help/*.help"; FILE* p = popen(cmd, "r"); @@ -92,89 +46,15 @@ static void readmore() { #endif } -#if !defined(WIN32) && !defined(MAC) +#if !defined(WIN32) void Oc::help(const char* s) { -#if 1 printf("online help not currently working\n"); -#else - if (help_pipe && ferror(help_pipe)) { - printf( - "error on the help pipe, restarting\n\ -but will be missing this sessions hoc help text\n"); - pclose(help_pipe); - help_pipe = NULL; - } - if (!help_pipe) { - printf("Starting the help system\n"); - char buf[200]; - Sprintf(buf, "%s/ochelp", "$NEURONHOME/bin/$CPU"); - if ((help_pipe = popen(buf, "w")) == (FILE*) 0) { - printf("Could not start %s\n", buf); - } - // printf("help_pipe = %p\n", help_pipe); - readmore(); - if (filequeue) { - for (long i = 0; i < filequeue->count(); ++i) { - fprintf(help_pipe, "%s\n", filequeue->item_ref(i).string()); - } - filequeue->remove_all(); - } - } - if (help_pipe) { - // printf("|%s|\n", s); - if (strncmp(s, "?0", 2) == 0) { - char buf[1024]; - Sprintf(buf, "?0 %s", hoc_current_xopen()); - fprintf(help_pipe, "%s\n", buf); - } else { - fprintf(help_pipe, "%s\n", s); - } - fflush(help_pipe); - } -#endif } #endif -#if defined(WIN32) || defined(MAC) - -void Oc::help(const char* s) { -#if 0 -#ifndef MINGW - static bool ran_ochelp = false; - char buf[1024]; - nrnbbs_connect(); // benign if already connected - if (!nrnbbs_connected()) { - printf("Could not connect to nrnbbs service\n"); - return; - } - if (!ran_ochelp && !nrnbbs_look("ochelp running")) { - ran_ochelp = true; - printf("Starting the help system\n"); - nrnbbs_exec("ochelp"); - }else if (!nrnbbs_look("ochelp running")) { - printf("proper ochelp version not running\n"); - return; - } - - readmore(); - if (filequeue) { - for (long i = 0; i < filequeue->count(); ++i) { -Sprintf(buf,"%s\n", filequeue->item_ref(i).string()); - nrnbbs_post_string("ochelp", buf); - } - filequeue->remove_all(); - } - - if (strncmp(s, "?0", 2) == 0) { - Sprintf(buf,"?0 %s", hoc_current_xopen()); - nrnbbs_post_string("ochelp", buf); - }else{ - nrnbbs_post_string("ochelp", s); - } -#endif // MINGW -#endif -} -#endif // WIN32 or MAC +#if defined(WIN32) +void Oc::help(const char* s) {} +#endif // WIN32 void Oc::helpmode(bool b) { helpmode_ = b; @@ -213,7 +93,7 @@ static const CursorPattern question_pat = {0x0000, static const CursorPattern question_mask = { // 0x0000, 0x0000, 0x7c00, 0xfe00, 0xff00, 0xcf00, 0x0f00, 0x3e00, // 0x7c00, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000, 0xf000 -#if !defined(UNIX) && (defined(WIN32) || defined(MAC)) +#if !defined(UNIX) && defined(WIN32) 0xffff, 0xffff, 0xffff, diff --git a/src/ivoc/ocinput.h b/src/ivoc/ocinput.h deleted file mode 100644 index 74d956d512..0000000000 --- a/src/ivoc/ocinput.h +++ /dev/null @@ -1,59 +0,0 @@ -#ifndef ocinput_h -#define ocinput_h - -#include -#include -#include - -class HandlerList; - -public -StandardInputHandler: public InputHandler { - public: - StandardInputHandler(Glyph*, Style*); - virtual ~StandardInputHandler(); - - virtual void bind_select(Handler * h) { - bind_press(Event::left, h); - } - virtual void bind_adjust(Handler * h) { - bind_press(Event::middle, h); - } - virtual void bind_menu(Handler * h) { - bind_press(Event::right, h); - } - - virtual void move(const Event& e) { - mouse(0, e); - } - virtual void press(const Event& e) { - mouse(1, e); - } - virtual void drag(const Event& e) { - mouse(2, e); - } - virtual void release(const Event& e) { - mouse(3, e); - } - void mouse(int, const Event&); - - void bind_move(EventButton eb, Handler * h) { - bind(0, eb, h); - } - void bind_press(EventButton eb, Handler * h) { - bind(0, eb, h); - } - void bind_drag(EventButton eb, Handler * h) { - bind(0, eb, h); - } - void bind_release(EventButton eb, Handler * h) { - bind(0, eb, h); - } - void bind(int, EventButton eb, Handler* h) { - bind(0, eb, h); - } - void remove_all(EventButton); - - private: - HandlerList* handlers_[4]; -}; diff --git a/src/ivoc/oclist.cpp b/src/ivoc/oclist.cpp index 886917bf5d..0d80eee882 100644 --- a/src/ivoc/oclist.cpp +++ b/src/ivoc/oclist.cpp @@ -1,7 +1,6 @@ #include <../../nrnconf.h> #include -#include #include "classreg.h" #include "oclist.h" #include "oc2iv.h" @@ -12,9 +11,7 @@ #include #include #include "ocglyph.h" -#if !MAC #include "checkpnt.h" -#endif #include "apwindow.h" #include "ocbrowsr.h" #include "objcmd.h" @@ -27,10 +24,6 @@ extern Object** hoc_temp_objptr(Object*); extern Symlist* hoc_top_level_symlist; int ivoc_list_count(Object*); - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - extern int hoc_return_type_code; void handle_old_focus(); @@ -461,7 +454,7 @@ OcList::~OcList() { } static int l_chkpt(void** vp) { -#if HAVE_IV && !MAC +#if HAVE_IV OcList* o; Checkpoint& chk = *Checkpoint::instance(); if (chk.out()) { diff --git a/src/ivoc/ocmatrix.cpp b/src/ivoc/ocmatrix.cpp index a735199fa2..7afa87498a 100644 --- a/src/ivoc/ocmatrix.cpp +++ b/src/ivoc/ocmatrix.cpp @@ -4,47 +4,25 @@ #define v_elem(v, i) (*(vector_vec(v) + i)) -#include "ivocvect.h" -#include "oc2iv.h" - -#undef error +#include +#include -extern "C" { -#undef OUT /* /usr/x86_64-w64-mingw32/sys-root/mingw/include/windef.h */ -#include "matrix.h" //meschach -#include "matrix2.h" -#include "sparse.h" -#include "sparse2.h" -extern MAT* m_get(int, int); -} // extern "C" - -int nrn_matrix_dim(void*, int); +#include "ivocvect.h" +#include "oc_ansi.h" #include "ocmatrix.h" -using std::vector; int nrn_matrix_dim(void* vm, int d) { OcMatrix* m = (OcMatrix*) vm; return d ? m->ncol() : m->nrow(); } -static void Vect2VEC(Vect* v1, VEC& v2) { -#ifdef WIN32 - v2.ve = vector_vec(v1); - v2.dim = vector_capacity(v1); - v2.max_dim = vector_buffer_size(v1); -#else - v2.ve = v1->data(); - v2.dim = v1->size(); - v2.max_dim = v1->buffer_size(); -#endif +static Eigen::Map Vect2VEC(Vect* v1) { + return Eigen::Map(v1->data(), v1->size()); } -OcMatrix::OcMatrix(int type) { - obj_ = NULL; - type_ = type; -} -OcMatrix::~OcMatrix() {} +OcMatrix::OcMatrix(int type) + : type_(type) {} OcMatrix* OcMatrix::instance(int nrow, int ncol, int type) { switch (type) { @@ -60,12 +38,12 @@ void OcMatrix::unimp() { hoc_execerror("Matrix method not implemented for this type matrix", 0); } -void OcMatrix::nonzeros(vector& m, vector& n) { +void OcMatrix::nonzeros(std::vector& m, std::vector& n) { m.clear(); n.clear(); for (int i = 0; i < nrow(); i++) { for (int j = 0; j < ncol(); j++) { - if (getval(i, j)) { + if (getval(i, j) != 0) { m.push_back(i); n.push_back(j); } @@ -73,21 +51,6 @@ void OcMatrix::nonzeros(vector& m, vector& n) { } } -void OcSparseMatrix::nonzeros(vector& m, vector& n) { - m.clear(); - n.clear(); - for (int i = 0; i < m_->m; i++) { - SPROW* const r = m_->row + i; - row_elt* r_elt = r->elt; - for (int k = 0; k < r->len; k++) { - int j = r_elt[k].col; - m.push_back(i); - n.push_back(j); - } - } -} - - OcFullMatrix* OcMatrix::full() { if (type_ != MFULL) { // could clone one maybe hoc_execerror("Matrix is not a FULL matrix (type 1)", 0); @@ -96,248 +59,176 @@ OcFullMatrix* OcMatrix::full() { } OcFullMatrix::OcFullMatrix(int nrow, int ncol) - : OcMatrix(MFULL) { - lu_factor_ = NULL; - lu_pivot_ = NULL; - m_ = m_get(nrow, ncol); -} -OcFullMatrix::~OcFullMatrix() { - if (lu_factor_) { - M_FREE(lu_factor_); - PX_FREE(lu_pivot_); - } - M_FREE(m_); + : OcMatrix(MFULL) + , m_(nrow, ncol) { + // The constructor of Eigen::Matrix does not initialize values + m_.setZero(); } + double* OcFullMatrix::mep(int i, int j) { - return &m_->me[i][j]; + return &m_(i, j); } double OcFullMatrix::getval(int i, int j) { - return m_->me[i][j]; + return m_(i, j); } int OcFullMatrix::nrow() { - return m_->m; + return m_.rows(); } int OcFullMatrix::ncol() { - return m_->n; + return m_.cols(); } void OcFullMatrix::resize(int i, int j) { - m_resize(m_, i, j); + // This is here because we want that new values are initialized to 0 + auto v = Eigen::MatrixXd::Zero(i, j); + m_.conservativeResizeLike(v); } void OcFullMatrix::mulv(Vect* vin, Vect* vout) { - VEC v1, v2; - Vect2VEC(vin, v1); - Vect2VEC(vout, v2); - mv_mlt(m_, &v1, &v2); + auto v1 = Vect2VEC(vin); + auto v2 = Vect2VEC(vout); + v2 = m_ * v1; } void OcFullMatrix::mulm(Matrix* in, Matrix* out) { - m_mlt(m_, in->full()->m_, out->full()->m_); + out->full()->m_ = m_ * in->full()->m_; } void OcFullMatrix::muls(double s, Matrix* out) { - sm_mlt(s, m_, out->full()->m_); + out->full()->m_ = s * m_; } void OcFullMatrix::add(Matrix* in, Matrix* out) { - m_add(m_, in->full()->m_, out->full()->m_); + out->full()->m_ = m_ + in->full()->m_; } void OcFullMatrix::copy(Matrix* out) { - m_copy(m_, out->full()->m_); + out->full()->m_ = m_; } void OcFullMatrix::bcopy(Matrix* out, int i0, int j0, int n0, int m0, int i1, int j1) { - m_move(m_, i0, j0, n0, m0, out->full()->m_, i1, j1); + out->full()->m_.block(i1, j1, n0, m0) = m_.block(i0, j0, n0, m0); } void OcFullMatrix::transpose(Matrix* out) { - m_transp(m_, out->full()->m_); + if (out->full()->m_ == m_) { + m_.transposeInPlace(); + } else { + out->full()->m_ = m_.transpose(); + } } +// As only symmetric matrix are accepted, eigenvalues are not complex void OcFullMatrix::symmeigen(Matrix* mout, Vect* vout) { - VEC v1; - Vect2VEC(vout, v1); - symmeig(m_, mout->full()->m_, &v1); + auto v1 = Vect2VEC(vout); + Eigen::EigenSolver es(m_); + v1 = es.eigenvalues().real(); + mout->full()->m_ = es.eigenvectors().real(); } void OcFullMatrix::svd1(Matrix* u, Matrix* v, Vect* d) { - VEC v1; - Vect2VEC(d, v1); - svd(m_, u ? u->full()->m_ : NULL, v ? v->full()->m_ : NULL, &v1); + auto v1 = Vect2VEC(d); + Eigen::JacobiSVD svd(m_, Eigen::ComputeFullU | Eigen::ComputeFullV); + v1 = svd.singularValues(); + if (u) { + u->full()->m_ = svd.matrixU().transpose(); + } + if (v) { + v->full()->m_ = svd.matrixV().transpose(); + } } void OcFullMatrix::getrow(int k, Vect* out) { - VEC v1; - Vect2VEC(out, v1); - get_row(m_, k, &v1); + auto v1 = Vect2VEC(out); + v1 = m_.row(k); } void OcFullMatrix::getcol(int k, Vect* out) { - VEC v1; - Vect2VEC(out, v1); - get_col(m_, k, &v1); + auto v1 = Vect2VEC(out); + v1 = m_.col(k); } void OcFullMatrix::getdiag(int k, Vect* out) { - int i, j, row, col; - row = nrow(); - col = ncol(); + auto vout = m_.diagonal(k); if (k >= 0) { - for (i = 0, j = k; i < row && j < col; ++i, ++j) { -#ifdef WIN32 - v_elem(out, i) = m_entry(m_, i, j); -#else - out->elem(i) = m_entry(m_, i, j); -#endif + for (int i = 0, j = k; i < nrow() && j < ncol(); ++i, ++j) { + out->elem(i) = vout(i); } } else { - for (i = -k, j = 0; i < row && j < col; ++i, ++j) { -#ifdef WIN32 - v_elem(out, i) = m_entry(m_, i, j); -#else - out->elem(i) = m_entry(m_, i, j); -#endif + for (int i = -k, j = 0; i < nrow() && j < ncol(); ++i, ++j) { + out->elem(i) = vout(j); } } } void OcFullMatrix::setrow(int k, Vect* in) { - VEC v1; - Vect2VEC(in, v1); - set_row(m_, k, &v1); + auto v1 = Vect2VEC(in); + m_.block(k, 0, 1, v1.size()) = v1.transpose(); } void OcFullMatrix::setcol(int k, Vect* in) { - VEC v1; - Vect2VEC(in, v1); - set_col(m_, k, &v1); + auto v1 = Vect2VEC(in); + m_.block(0, k, v1.size(), 1) = v1; } void OcFullMatrix::setdiag(int k, Vect* in) { - int i, j, row, col; - row = nrow(); - col = ncol(); + auto out = m_.diagonal(k); if (k >= 0) { - for (i = 0, j = k; i < row && j < col; ++i, ++j) { -#ifdef WIN32 - m_set_val(m_, i, j, v_elem(in, i)); -#else - m_set_val(m_, i, j, in->elem(i)); -#endif + for (int i = 0, j = k; i < nrow() && j < ncol() && i < in->size(); ++i, ++j) { + out(i) = in->elem(i); } } else { - for (i = -k, j = 0; i < row && j < col; ++i, ++j) { -#ifdef WIN32 - m_set_val(m_, i, j, v_elem(in, i)); -#else - m_set_val(m_, i, j, in->elem(i)); -#endif + for (int i = -k, j = 0; i < nrow() && j < ncol() && i < in->size(); ++i, ++j) { + out(j) = in->elem(i); } } + m_.diagonal(k) = out; } void OcFullMatrix::setrow(int k, double in) { - int i, col = ncol(); - for (i = 0; i < col; ++i) { - m_set_val(m_, k, i, in); - } + m_.row(k).fill(in); } void OcFullMatrix::setcol(int k, double in) { - int i, row = nrow(); - for (i = 0; i < row; ++i) { - m_set_val(m_, i, k, in); - } + m_.col(k).fill(in); } void OcFullMatrix::setdiag(int k, double in) { - int i, j, row, col; - row = nrow(); - col = ncol(); - if (k >= 0) { - for (i = 0, j = k; i < row && j < col; ++i, ++j) { - m_set_val(m_, i, j, in); - } - } else { - for (i = -k, j = 0; i < row && j < col; ++i, ++j) { - m_set_val(m_, i, j, in); - } - } + m_.diagonal(k).fill(in); } void OcFullMatrix::zero() { - m_zero(m_); + m_.setZero(); } void OcFullMatrix::ident() { - m_ident(m_); + m_.setIdentity(); } void OcFullMatrix::exp(Matrix* out) { - m_exp(m_, 0., out->full()->m_); + out->full()->m_ = m_.exp(); } void OcFullMatrix::pow(int i, Matrix* out) { - m_pow(m_, i, out->full()->m_); + out->full()->m_ = m_.pow(i).eval(); } void OcFullMatrix::inverse(Matrix* out) { - m_inverse(m_, out->full()->m_); + out->full()->m_ = m_.inverse(); } void OcFullMatrix::solv(Vect* in, Vect* out, bool use_lu) { - bool call_lufac = true; - if (!lu_factor_) { - lu_factor_ = m_get(nrow(), nrow()); - lu_pivot_ = px_get(nrow()); - } else if (use_lu && lu_factor_->m == nrow()) { - call_lufac = false; + if (!lu_ || !use_lu || lu_->rows() != m_.rows()) { + lu_ = std::make_unique>(m_); } - VEC v1, v2; - Vect2VEC(in, v1); - Vect2VEC(out, v2); - if (call_lufac) { - m_resize(lu_factor_, nrow(), nrow()); - m_copy(m_, lu_factor_); - px_resize(lu_pivot_, nrow()); - LUfactor(lu_factor_, lu_pivot_); - } - LUsolve(lu_factor_, lu_pivot_, &v1, &v2); + auto v1 = Vect2VEC(in); + auto v2 = Vect2VEC(out); + v2 = lu_->solve(v1); } double OcFullMatrix::det(int* e) { - int n = nrow(); - MAT* lu = m_get(n, n); - PERM* piv = px_get(n); - m_copy(m_, lu); - LUfactor(lu, piv); -#if 0 -printf("LU\n"); -for (int i = 0; i < n; ++i) { - for (int j = 0; j < n; ++j) { - printf(" %g", lu->me[i][j]); - } - printf("\t%d\n", piv->pe[i]); -} -#endif - double m = 1.0; *e = 0; - for (int i = 0; i < n; ++i) { - m *= lu->me[i][i]; - if (m == 0.0) { - break; - } - while (std::abs(m) >= 1e12) { - m *= 1e-12; - *e += 12; - } - while (std::abs(m) < 1e-12) { - m *= 1e12; - *e -= 12; - } - } + double m = m_.determinant(); if (m) { while (std::abs(m) >= 10.0) { m *= 0.1; @@ -348,211 +239,142 @@ for (int i = 0; i < n; ++i) { *e -= 1; } } - m *= double(px_sign(piv)); - M_FREE(lu); - PX_FREE(piv); return m; } //-------------------------- OcSparseMatrix::OcSparseMatrix(int nrow, int ncol) - : OcMatrix(MSPARSE) { - /* sp_get -- get sparse matrix - -- len is number of elements available for each row without - allocating further memory */ - - int len = 4; - m_ = sp_get(nrow, ncol, len); - lu_factor_ = NULL; - lu_pivot_ = NULL; -} -OcSparseMatrix::~OcSparseMatrix() { - if (lu_factor_) { - SP_FREE(lu_factor_); - PX_FREE(lu_pivot_); - } - SP_FREE(m_); -} - -// returns pointer to sparse element. NULL if it does not exist. -double* OcSparseMatrix::pelm(int i, int j) { - SPROW* r = m_->row + i; - int idx = sprow_idx(r, j); - if (idx >= 0) { - return &r->elt[idx].val; - } else { - return NULL; - } -} + : OcMatrix(MSPARSE) + , m_(nrow, ncol) {} double* OcSparseMatrix::mep(int i, int j) { - SPROW* r = m_->row + i; - int idx = sprow_idx(r, j); - if (idx >= 0) { - return &r->elt[idx].val; - } - // does not exist so create it with a value of 0 - sp_set_val(m_, i, j, 0.); - // and try again - idx = sprow_idx(r, j); - return &r->elt[idx].val; + return &m_.coeffRef(i, j); } void OcSparseMatrix::zero() { - sp_zero(m_); + for (int k = 0; k < m_.outerSize(); ++k) { + for (decltype(m_)::InnerIterator it(m_, k); it; ++it) { + it.valueRef() = 0.; + } + } } double OcSparseMatrix::getval(int i, int j) { - return sp_get_val(m_, i, j); + return m_.coeff(i, j); } + int OcSparseMatrix::nrow() { - return m_->m; + return m_.rows(); } + int OcSparseMatrix::ncol() { - return m_->n; + return m_.cols(); } + void OcSparseMatrix::mulv(Vect* vin, Vect* vout) { - VEC v1, v2; - Vect2VEC(vin, v1); - Vect2VEC(vout, v2); - sp_mv_mlt(m_, &v1, &v2); + auto v1 = Vect2VEC(vin); + auto v2 = Vect2VEC(vout); + v2 = m_ * v1; } void OcSparseMatrix::solv(Vect* in, Vect* out, bool use_lu) { - bool call_lufac = true; - if (!lu_factor_) { - lu_factor_ = sp_get(nrow(), nrow(), 4); - lu_pivot_ = px_get(nrow()); - } else if (use_lu && lu_factor_->m == nrow()) { - call_lufac = false; + if (!lu_ || !use_lu || lu_->rows() != m_.rows()) { + m_.makeCompressed(); + lu_ = std::make_unique>(m_); } - VEC v1, v2; - Vect2VEC(in, v1); - Vect2VEC(out, v2); - if (call_lufac) { - sp_resize(lu_factor_, nrow(), nrow()); - sp_copy2(m_, lu_factor_); - px_resize(lu_pivot_, nrow()); - spLUfactor(lu_factor_, lu_pivot_, .9); - } - spLUsolve(lu_factor_, lu_pivot_, &v1, &v2); + auto v1 = Vect2VEC(in); + auto v2 = Vect2VEC(out); + v2 = lu_->solve(v1); } void OcSparseMatrix::setrow(int k, Vect* in) { - VEC v1; - Vect2VEC(in, v1); - int i, n = ncol(); - double* p; - for (i = 0; i < n; ++i) { - if ((p = pelm(k, i)) != NULL) { -#ifdef WIN32 - *p = v_elem(in, i); - } else if (v_elem(in, i)) { - sp_set_val(m_, k, i, v_elem(in, i)); -#else - *p = in->elem(i); - } else if (in->elem(i)) { - sp_set_val(m_, k, i, in->elem(i)); -#endif - } + int col = m_.cols(); + for (int i = 0; i < col; ++i) { + m_.coeffRef(k, i) = in->elem(i); } } void OcSparseMatrix::setcol(int k, Vect* in) { - VEC v1; - Vect2VEC(in, v1); - int i, n = nrow(); - double* p; - for (i = 0; i < n; ++i) { - if ((p = pelm(i, k)) != NULL) { -#ifdef WIN32 - *p = v_elem(in, i); - } else if (v_elem(in, i)) { - sp_set_val(m_, i, k, v_elem(in, i)); -#else - *p = in->elem(i); - } else if (in->elem(i)) { - sp_set_val(m_, i, k, in->elem(i)); -#endif - } + int row = m_.rows(); + for (int i = 0; i < row; ++i) { + m_.coeffRef(i, k) = in->elem(i); } } void OcSparseMatrix::setdiag(int k, Vect* in) { - int i, j, row, col; - row = nrow(); - col = ncol(); - double* p; + int row = m_.rows(); + int col = m_.cols(); if (k >= 0) { - for (i = 0, j = k; i < row && j < col; ++i, ++j) { - if ((p = pelm(i, j)) != NULL) { -#ifdef WIN32 - *p = v_elem(in, i); - } else if (v_elem(in, i)) { - sp_set_val(m_, i, j, v_elem(in, i)); -#else - *p = in->elem(i); - } else if (in->elem(i)) { - sp_set_val(m_, i, j, in->elem(i)); -#endif - } + for (int i = 0, j = k; i < row && j < col; ++i, ++j) { + m_.coeffRef(i, j) = in->elem(i); } } else { - for (i = -k, j = 0; i < row && j < col; ++i, ++j) { - if ((p = pelm(i, j)) != NULL) { -#ifdef WIN32 - *p = v_elem(in, i); - } else if (v_elem(in, i)) { - sp_set_val(m_, i, j, v_elem(in, i)); -#else - *p = in->elem(i); - } else if (in->elem(i)) { - sp_set_val(m_, i, j, in->elem(i)); -#endif - } + for (int i = -k, j = 0; i < row && j < col; ++i, ++j) { + m_.coeffRef(i, j) = in->elem(i); } } } void OcSparseMatrix::setrow(int k, double in) { - int i, col = ncol(); - for (i = 0; i < col; ++i) { - sp_set_val(m_, k, i, in); + int col = m_.cols(); + for (int i = 0; i < col; ++i) { + m_.coeffRef(k, i) = in; } } void OcSparseMatrix::setcol(int k, double in) { - int i, row = nrow(); - for (i = 0; i < row; ++i) { - sp_set_val(m_, i, k, in); + int row = m_.rows(); + for (int i = 0; i < row; ++i) { + m_.coeffRef(i, k) = in; } } void OcSparseMatrix::ident(void) { - setdiag(0, 1); + m_.setIdentity(); } void OcSparseMatrix::setdiag(int k, double in) { - int i, j, row, col; - row = nrow(); - col = ncol(); + int row = m_.rows(); + int col = m_.cols(); if (k >= 0) { - for (i = 0, j = k; i < row && j < col; ++i, ++j) { - sp_set_val(m_, i, j, in); + for (int i = 0, j = k; i < row && j < col; ++i, ++j) { + m_.coeffRef(i, j) = in; } } else { - for (i = -k, j = 0; i < row && j < col; ++i, ++j) { - sp_set_val(m_, i, j, in); + for (int i = -k, j = 0; i < row && j < col; ++i, ++j) { + m_.coeffRef(i, j) = in; } } } int OcSparseMatrix::sprowlen(int i) { - return m_->row[i].len; + int acc = 0; + for (decltype(m_)::InnerIterator it(m_, i); it; ++it) { + acc += 1; + } + return acc; } double OcSparseMatrix::spgetrowval(int i, int jindx, int* j) { - *j = m_->row[i].elt[jindx].col; - return m_->row[i].elt[jindx].val; + int acc = 0; + for (decltype(m_)::InnerIterator it(m_, i); it; ++it) { + if (acc == jindx) { + *j = it.col(); + return it.value(); + } + acc += 1; + } + return 0; +} + +void OcSparseMatrix::nonzeros(std::vector& m, std::vector& n) { + m.clear(); + n.clear(); + for (int k = 0; k < m_.outerSize(); ++k) { + for (decltype(m_)::InnerIterator it(m_, k); it; ++it) { + m.push_back(it.row()); + n.push_back(it.col()); + } + } } diff --git a/src/ivoc/ocmatrix.h b/src/ivoc/ocmatrix.h index 994c691176..5754e51b96 100644 --- a/src/ivoc/ocmatrix.h +++ b/src/ivoc/ocmatrix.h @@ -1,30 +1,29 @@ #ifndef ocmatrix_h #define ocmatrix_h -#ifndef MATRIXH -#define MAT void -#define SPMAT void -#define PERM void -#endif - +#include #include -using std::vector; + +#include +#include +#include struct Object; class IvocVect; +class OcMatrix; +using Matrix = OcMatrix; class OcFullMatrix; -#define Vect IvocVect -#define Matrix OcMatrix +using Vect = IvocVect; class OcMatrix { public: enum { MFULL = 1, MSPARSE, MBAND }; static OcMatrix* instance(int nrow, int ncol, int type = MFULL); - virtual ~OcMatrix(); + virtual ~OcMatrix() = default; virtual double* mep(int i, int j) { unimp(); - return NULL; + return nullptr; } // matrix element pointer inline double& operator()(int i, int j) { return *mep(i, j); @@ -46,7 +45,7 @@ class OcMatrix { unimp(); } - virtual void nonzeros(vector& m, vector& n); + virtual void nonzeros(std::vector& m, std::vector& n); OcFullMatrix* full(); @@ -139,100 +138,91 @@ class OcMatrix { } void unimp(); - Object** temp_objvar(); protected: OcMatrix(int type); public: - Object* obj_; + Object* obj_{}; private: - int type_; + int type_{}; }; extern Matrix* matrix_arg(int); -class OcFullMatrix: public OcMatrix { // type 1 +class OcFullMatrix final: public OcMatrix { // type 1 public: OcFullMatrix(int, int); - virtual ~OcFullMatrix(); - - virtual double* mep(int, int); - virtual double getval(int i, int j); - virtual int nrow(); - virtual int ncol(); - virtual void resize(int, int); - - virtual void mulv(Vect* in, Vect* out); - virtual void mulm(Matrix* in, Matrix* out); - virtual void muls(double, Matrix* out); - virtual void add(Matrix*, Matrix* out); - virtual void getrow(int, Vect* out); - virtual void getcol(int, Vect* out); - virtual void getdiag(int, Vect* out); - virtual void setrow(int, Vect* in); - virtual void setcol(int, Vect* in); - virtual void setdiag(int, Vect* in); - virtual void setrow(int, double in); - virtual void setcol(int, double in); - virtual void setdiag(int, double in); - virtual void zero(); - virtual void ident(); - virtual void exp(Matrix* out); - virtual void pow(int, Matrix* out); - virtual void inverse(Matrix* out); - virtual void solv(Vect* vin, Vect* vout, bool use_lu); - virtual void copy(Matrix* out); - virtual void bcopy(Matrix* mout, int i0, int j0, int n0, int m0, int i1, int j1); - virtual void transpose(Matrix* out); - virtual void symmeigen(Matrix* mout, Vect* vout); - virtual void svd1(Matrix* u, Matrix* v, Vect* d); - virtual double det(int* exponent); + ~OcFullMatrix() override = default; + + double* mep(int, int) override; + double getval(int i, int j) override; + int nrow() override; + int ncol() override; + void resize(int, int) override; + + void mulv(Vect* in, Vect* out) override; + void mulm(Matrix* in, Matrix* out) override; + void muls(double, Matrix* out) override; + void add(Matrix*, Matrix* out) override; + void getrow(int, Vect* out) override; + void getcol(int, Vect* out) override; + void getdiag(int, Vect* out) override; + void setrow(int, Vect* in) override; + void setcol(int, Vect* in) override; + void setdiag(int, Vect* in) override; + void setrow(int, double in) override; + void setcol(int, double in) override; + void setdiag(int, double in) override; + void zero() override; + void ident() override; + void exp(Matrix* out) override; + void pow(int, Matrix* out) override; + void inverse(Matrix* out) override; + void solv(Vect* vin, Vect* vout, bool use_lu) override; + void copy(Matrix* out) override; + void bcopy(Matrix* mout, int i0, int j0, int n0, int m0, int i1, int j1) override; + void transpose(Matrix* out) override; + void symmeigen(Matrix* mout, Vect* vout) override; + void svd1(Matrix* u, Matrix* v, Vect* d) override; + double det(int* exponent) override; private: - MAT* m_; - MAT* lu_factor_; - PERM* lu_pivot_; + Eigen::Matrix m_{}; + std::unique_ptr> lu_{}; }; -class OcSparseMatrix: public OcMatrix { // type 2 +class OcSparseMatrix final: public OcMatrix { // type 2 public: OcSparseMatrix(int, int); - virtual ~OcSparseMatrix(); + ~OcSparseMatrix() override = default; - virtual double* mep(int, int); - virtual double* pelm(int, int); // NULL if element does not exist - virtual int nrow(); - virtual int ncol(); - virtual double getval(int, int); - virtual void ident(void); - virtual void mulv(Vect* in, Vect* out); - virtual void solv(Vect* vin, Vect* vout, bool use_lu); + double* mep(int, int) override; + int nrow() override; + int ncol() override; + double getval(int, int) override; + void ident(void) override; + void mulv(Vect* in, Vect* out) override; + void solv(Vect* vin, Vect* vout, bool use_lu) override; - virtual void setrow(int, Vect* in); - virtual void setcol(int, Vect* in); - virtual void setdiag(int, Vect* in); - virtual void setrow(int, double in); - virtual void setcol(int, double in); - virtual void setdiag(int, double in); + void setrow(int, Vect* in) override; + void setcol(int, Vect* in) override; + void setdiag(int, Vect* in) override; + void setrow(int, double in) override; + void setcol(int, double in) override; + void setdiag(int, double in) override; - virtual void nonzeros(vector& m, vector& n); + void nonzeros(std::vector& m, std::vector& n) override; - virtual int sprowlen(int); // how many elements in row - virtual double spgetrowval(int i, int jindx, int* j); + int sprowlen(int) override; // how many elements in row + double spgetrowval(int i, int jindx, int* j) override; - virtual void zero(); + void zero() override; private: - SPMAT* m_; - SPMAT* lu_factor_; - PERM* lu_pivot_; + Eigen::SparseMatrix m_{}; + std::unique_ptr> lu_{}; }; -#ifndef MATRIXH -#undef MAT -#undef SPMAT -#endif - #endif diff --git a/src/ivoc/ocnoiv1.cpp b/src/ivoc/ocnoiv1.cpp index 82563dfd5a..ccbe2820ed 100644 --- a/src/ivoc/ocnoiv1.cpp +++ b/src/ivoc/ocnoiv1.cpp @@ -5,13 +5,11 @@ // things we DO NOT want #include "hocdec.h" +#include "nrnpy.h" extern void hoc_ret(); extern void hoc_pushx(double); extern "C" void nrn_shape_update(); -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern Object** (*nrnpy_gui_helper3_)(const char* name, Object* obj, int handle_strptr); -extern double (*nrnpy_object_to_double_)(Object*); void ivoc_help(const char*) {} void ivoc_cleanup() {} @@ -25,153 +23,115 @@ void hoc_notify_iv() { hoc_pushx(0.); } void hoc_xpvalue() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xpvalue", NULL); - } + neuron::python::methods.try_gui_helper("xpvalue", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xlabel() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xlabel", NULL); - } + neuron::python::methods.try_gui_helper("xlabel", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xbutton() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xbutton", NULL); - } + neuron::python::methods.try_gui_helper("xbutton", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xcheckbox() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xcheckbox", NULL); - } + neuron::python::methods.try_gui_helper("xcheckbox", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xstatebutton() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xstatebutton", NULL); - } + neuron::python::methods.try_gui_helper("xstatebutton", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xmenu() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xmenu", NULL); - } + neuron::python::methods.try_gui_helper("xmenu", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xvalue() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xvalue", NULL); - } + neuron::python::methods.try_gui_helper("xvalue", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xpanel() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xpanel", NULL); - } + neuron::python::methods.try_gui_helper("xpanel", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xradiobutton() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xradiobutton", NULL); - } + neuron::python::methods.try_gui_helper("xradiobutton", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xfixedvalue() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xfixedvalue", NULL); - } + neuron::python::methods.try_gui_helper("xfixedvalue", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_xvarlabel() { - if (nrnpy_gui_helper3_) { - nrnpy_gui_helper3_("xvarlabel", NULL, 1); + if (neuron::python::methods.gui_helper3) { + neuron::python::methods.gui_helper3("xvarlabel", nullptr, 1); } hoc_ret(); hoc_pushx(0.); } void hoc_xslider() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("xslider", NULL); - } + neuron::python::methods.try_gui_helper("xslider", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_boolean_dialog() { - if (nrnpy_gui_helper_) { - Object** const result = nrnpy_gui_helper_("boolean_dialog", NULL); - if (result) { - hoc_ret(); - hoc_pushx(nrnpy_object_to_double_(*result)); - return; - } + if (auto* const result = neuron::python::methods.try_gui_helper("boolean_dialog", nullptr)) { + hoc_ret(); + hoc_pushx(neuron::python::methods.object_to_double(*result)); + } else { + hoc_ret(); + hoc_pushx(0.); } - hoc_ret(); - hoc_pushx(0.); } void hoc_continue_dialog() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("continue_dialog", NULL); - } + neuron::python::methods.try_gui_helper("continue_dialog", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_string_dialog() { // TODO: needs to work with strrefs so can actually change the string - if (nrnpy_gui_helper_) { - Object** const result = nrnpy_gui_helper_("string_dialog", NULL); - if (result) { - hoc_ret(); - hoc_pushx(nrnpy_object_to_double_(*result)); - } + if (auto* const result = neuron::python::methods.try_gui_helper("string_dialog", nullptr); + result) { + hoc_ret(); + hoc_pushx(neuron::python::methods.object_to_double(*result)); + } else { + hoc_ret(); + hoc_pushx(0.); } - hoc_ret(); - hoc_pushx(0.); } void hoc_checkpoint() { // not redirecting checkpoint because not a GUI function - /*if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("checkpoint", NULL); - } */ + // neuron::python::methods.try_gui_helper("checkpoint", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_pwman_place() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("pwman_place", NULL); - } + neuron::python::methods.try_gui_helper("pwman_place", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_save_session() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("save_session", NULL); - } + neuron::python::methods.try_gui_helper("save_session", nullptr); hoc_ret(); hoc_pushx(0.); } void hoc_print_session() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("print_session", NULL); - } + neuron::python::methods.try_gui_helper("print_session", nullptr); hoc_ret(); hoc_pushx(0.); } void ivoc_style() { - if (nrnpy_gui_helper_) { - nrnpy_gui_helper_("ivoc_style", NULL); - } + neuron::python::methods.try_gui_helper("ivoc_style", nullptr); hoc_ret(); hoc_pushx(0.); } diff --git a/src/ivoc/ocnotify.h b/src/ivoc/ocnotify.h index 96459b6686..80cdad2401 100644 --- a/src/ivoc/ocnotify.h +++ b/src/ivoc/ocnotify.h @@ -1,5 +1,6 @@ #pragma once #include +#include "neuron/container/data_handle.hpp" #include // std::size_t @@ -10,3 +11,7 @@ void nrn_notify_pointer_disconnect(Observer* ob); void notify_pointer_freed(void* pt); void notify_freed(void* p); void notify_freed_val_array(double* p, std::size_t); + +namespace neuron::container { +void notify_when_handle_dies(data_handle, Observer*); +} diff --git a/src/ivoc/ocpicker.cpp b/src/ivoc/ocpicker.cpp index 1896d3fe64..1271335d3d 100644 --- a/src/ivoc/ocpicker.cpp +++ b/src/ivoc/ocpicker.cpp @@ -1,6 +1,7 @@ #include <../../nrnconf.h> #if HAVE_IV // to end of file +#include #include #include #include @@ -36,20 +37,16 @@ ButtonHandler::~ButtonHandler() { Resource::unref(rband_); } -declarePtrList(HandlerList, ButtonHandler); -implementPtrList(HandlerList, ButtonHandler); - StandardPicker::StandardPicker() { ms_ = unknown; for (int i = 0; i < unknown; ++i) { - handlers_[i] = new HandlerList(1); + handlers_[i] = new std::vector(); } } StandardPicker::~StandardPicker() { for (int i = 0; i < unknown; ++i) { - long cnt = handlers_[i]->count(); - for (long j = 0; j < cnt; j++) { - delete handlers_[i]->item(j); + for (auto& item: *handlers_[i]) { + delete item; } delete handlers_[i]; } @@ -65,15 +62,13 @@ bool StandardPicker::pick(Canvas* c, Glyph* glyph, int depth, Hit& h) { } event(e); - long cnt = handlers_[ms_]->count(); - for (long i = 0; i < cnt; ++i) { - ButtonHandler& b = *handlers_[ms_]->item(i); - if (b.eb_ == Event::any || b.eb_ == mb_) { - if (b.handler_) { - h.target(depth, glyph, 0, b.handler_); + for (const auto& b: *handlers_[ms_]) { + if (b->eb_ == Event::any || b->eb_ == mb_) { + if (b->handler_) { + h.target(depth, glyph, 0, b->handler_); } else { - b.rband_->canvas(c); - h.target(depth, glyph, 0, b.rband_); + b->rband_->canvas(c); + h.target(depth, glyph, 0, b->rband_); } return true; } @@ -109,23 +104,20 @@ void StandardPicker::event(const Event& e) { } void StandardPicker::unbind(int m, EventButton eb) { - long cnt = handlers_[m]->count(); - long i, j; - for (i = 0, j = 0; i < cnt; ++i) { - ButtonHandler& b = *handlers_[m]->item(j); - if (b.eb_ == Event::any || b.eb_ == eb) { - delete handlers_[m]->item(j); - handlers_[m]->remove(j); - } else { - ++j; + for (auto& b: *handlers_[m]) { + if (b->eb_ == Event::any || b->eb_ == eb) { + delete b; + b = nullptr; } } + handlers_[m]->erase(std::remove(handlers_[m]->begin(), handlers_[m]->end(), nullptr), + handlers_[m]->end()); } void StandardPicker::bind(int m, EventButton eb, OcHandler* h) { unbind(m, eb); if (h) { - handlers_[m]->append(new ButtonHandler(eb, h)); + handlers_[m]->push_back(new ButtonHandler(eb, h)); } } @@ -133,7 +125,7 @@ void StandardPicker::bind_press(EventButton eb, Rubberband* rb) { int m = 1; unbind(m, eb); if (rb) { - handlers_[m]->append(new ButtonHandler(eb, rb)); + handlers_[m]->push_back(new ButtonHandler(eb, rb)); } } diff --git a/src/ivoc/ocpicker.h b/src/ivoc/ocpicker.h index daa2323cbe..c885d12eb6 100644 --- a/src/ivoc/ocpicker.h +++ b/src/ivoc/ocpicker.h @@ -1,15 +1,17 @@ #ifndef ocpicker_h #define ocpicker_h +#include + #include #include #include #include "rubband.h" -class HandlerList; class Canvas; class Allocation; class Hit; +class ButtonHandler; /* steer to the right method in response to a mouse action */ @@ -66,6 +68,6 @@ class StandardPicker { enum { motion, press, drag, release, unknown }; State ms_; EventButton mb_; - HandlerList* handlers_[unknown]; + std::vector* handlers_[unknown]; }; #endif diff --git a/src/ivoc/ocpointer.cpp b/src/ivoc/ocpointer.cpp index b1edf7be92..a1cc1b04f9 100644 --- a/src/ivoc/ocpointer.cpp +++ b/src/ivoc/ocpointer.cpp @@ -117,13 +117,12 @@ void OcPointer_reg() { sv->u.ctemplate->steer = steer_val; } -StmtInfo::StmtInfo(const char* s) { - stmt_ = new CopyString(s); +StmtInfo::StmtInfo(const char* s) + : stmt_(s) { parse(); } StmtInfo::~StmtInfo() { - delete stmt_; hoc_free_list(&symlist_); } @@ -134,7 +133,7 @@ void StmtInfo::parse() { symlist_ = NULL; ParseTopLevel ptl; bool see_arg = false; - for (s = stmt_->string(), d = buf; *s; ++s, ++d) { + for (s = stmt_.c_str(), d = buf; *s; ++s, ++d) { if (*s == '$' && s[1] == '1') { strcpy(d, "hoc_ac_"); s++; diff --git a/src/ivoc/ocpointer.h b/src/ivoc/ocpointer.h index 09529bc325..9a7dabf905 100644 --- a/src/ivoc/ocpointer.h +++ b/src/ivoc/ocpointer.h @@ -2,7 +2,6 @@ #define ocpointer_h #include -#include #include "oc2iv.h" class StmtInfo; @@ -24,7 +23,7 @@ class StmtInfo { virtual ~StmtInfo(); void play_one(double); void parse(); - CopyString* stmt_; + std::string stmt_{}; Symlist* symlist_; Symbol* symstmt_; }; diff --git a/src/ivoc/ocptrvector.cpp b/src/ivoc/ocptrvector.cpp index 30d07a4acb..04932301a9 100644 --- a/src/ivoc/ocptrvector.cpp +++ b/src/ivoc/ocptrvector.cpp @@ -18,8 +18,6 @@ #include "graph.h" #endif #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); extern int hoc_return_type_code; @@ -27,78 +25,46 @@ static double dummy; static Symbol* pv_class_sym_; -OcPtrVector::OcPtrVector(int sz) { - label_ = NULL; - pd_ = new double*[sz]; - size_ = sz; - update_cmd_ = NULL; - for (int i = 0; i < sz; ++i) { - pd_[i] = &dummy; - } -} +OcPtrVector::OcPtrVector(std::size_t sz) + : pd_{sz, neuron::container::data_handle{neuron::container::do_not_search, &dummy}} {} OcPtrVector::~OcPtrVector() { - delete[] pd_; - ptr_update_cmd(NULL); if (label_) { free(label_); } // allocated by strdup } void OcPtrVector::resize(int sz) { - if (size_ == sz) { - return; - } - delete[] pd_; - pd_ = new double*[sz]; - size_ = sz; - for (int i = 0; i < sz; ++i) { - pd_[i] = &dummy; - } + pd_.resize(sz, + neuron::container::data_handle{neuron::container::do_not_search, &dummy}); } -void OcPtrVector::ptr_update_cmd(HocCommand* hc) { - if (update_cmd_) { - delete update_cmd_; - update_cmd_ = NULL; - } - update_cmd_ = hc; -} - -void OcPtrVector::ptr_update() { - if (update_cmd_) { - update_cmd_->execute(false); - } else { - hoc_warning("PtrVector has no ptr_update callback", NULL); - } -} - -void OcPtrVector::pset(int i, double* px) { - assert(i < size_); - pd_[i] = px; +void OcPtrVector::pset(int i, neuron::container::data_handle dh) { + assert(i < pd_.size()); + pd_[i] = std::move(dh); } void OcPtrVector::scatter(double* src, int sz) { - assert(size_ == sz); + assert(pd_.size() == sz); for (int i = 0; i < sz; ++i) { *pd_[i] = src[i]; } } void OcPtrVector::gather(double* dest, int sz) { - assert(size_ == sz); + assert(pd_.size() == sz); for (int i = 0; i < sz; ++i) { dest[i] = *pd_[i]; } } void OcPtrVector::setval(int i, double x) { - assert(i < size_); + assert(i < pd_.size()); *pd_[i] = x; } double OcPtrVector::getval(int i) { - assert(i < size_); + assert(i < pd_.size()); return *pd_[i]; } @@ -132,7 +98,7 @@ static double get_size(void* v) { static double pset(void* v) { OcPtrVector* opv = (OcPtrVector*) v; int i = int(chkarg(1, 0., opv->size())); - opv->pset(i, hoc_pgetarg(2)); + opv->pset(i, hoc_hgetarg(2)); return opv->getval(i); } @@ -163,22 +129,6 @@ static double gather(void* v) { return 0.; } -static double ptr_update_callback(void* v) { - OcPtrVector* opv = (OcPtrVector*) v; - HocCommand* hc = NULL; - if (ifarg(1) && hoc_is_object_arg(1)) { - hc = new HocCommand(*hoc_objgetarg(1)); - } else if (ifarg(1)) { - Object* o = NULL; - if (ifarg(2)) { - o = *hoc_objgetarg(2); - } - hc = new HocCommand(hoc_gargstr(1), o); - } - opv->ptr_update_cmd(hc); - return 0.; -} - // a copy of ivocvect::v_plot with y+i replaced by y[i] static int narg() { int i = 0; @@ -193,8 +143,8 @@ static double ptr_plot(void* v) { #if HAVE_IV IFGUI int i; - double** y = opv->pd_; - auto n = opv->size_; + auto const& y = opv->pd_; + auto n = opv->size(); char* label = opv->label_; Object* ob1 = *hoc_objgetarg(1); @@ -255,7 +205,6 @@ static Member_func members[] = {{"size", get_size}, {"getval", getval}, {"scatter", scatter}, {"gather", gather}, - {"ptr_update_callback", ptr_update_callback}, {"plot", ptr_plot}, {0, 0}}; diff --git a/src/ivoc/ocptrvector.h b/src/ivoc/ocptrvector.h index 0e45013dfa..b0ee978d05 100644 --- a/src/ivoc/ocptrvector.h +++ b/src/ivoc/ocptrvector.h @@ -1,30 +1,23 @@ -#ifndef ocptrvector_h -#define ocptrvector_h +#pragma once +#include "neuron/container/data_handle.hpp" +#include "objcmd.h" -#include "oc2iv.h" -class HocCommand; +#include -class OcPtrVector { - public: - OcPtrVector(int sz); +struct OcPtrVector { + OcPtrVector(std::size_t sz); virtual ~OcPtrVector(); - int size() { - return size_; + [[nodiscard]] std::size_t size() const { + return pd_.size(); } void resize(int); - void pset(int i, double*); - double getval(int); + void pset(int i, neuron::container::data_handle dh); + [[nodiscard]] double getval(int); void setval(int, double); void scatter(double*, int sz); void gather(double*, int sz); - void ptr_update_cmd(HocCommand*); - void ptr_update(); public: - size_t size_; - double** pd_; - HocCommand* update_cmd_; - char* label_; + std::vector> pd_{}; + char* label_{}; }; - -#endif diff --git a/src/ivoc/pwman.cpp b/src/ivoc/pwman.cpp index 98c3ad9541..898ece83f1 100644 --- a/src/ivoc/pwman.cpp +++ b/src/ivoc/pwman.cpp @@ -5,13 +5,13 @@ extern char* ivoc_get_temp_file(); extern int hoc_return_type_code; #if HAVE_IV -#if MAC || defined(WIN32) +#if defined(WIN32) #define MACPRINT 1 #else #define MACPRINT 0 #endif -#if defined(WIN32) || MAC +#if defined(WIN32) #define SNAPSHOT 0 #else #define SNAPSHOT 1 @@ -41,19 +41,10 @@ void iv_display_scale(Coord, Coord); // Make if fit into the screen char* hoc_back2forward(char*); #endif -#if MAC -#include -#include -#define IOS_OUT (ios::out | ios::trunc) -extern char* mktemp(char*); -extern int unlink(const char*); -#include -extern void debugfile(const char*, ...); -#else //! MAC +#ifdef HAVE_UNISTD_H #include +#endif #define IOS_OUT std::ios::out -#endif // MAC - #include #include @@ -433,10 +424,6 @@ char* hoc_back2forward(char*); #endif #endif // HAVE_IV -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); -extern char** (*nrnpy_gui_helper3_str_)(const char* name, Object* obj, int handle_strptr); - static void* pwman_cons(Object*) { TRY_GUI_REDIRECT_OBJ("PWManager", NULL); void* v = NULL; @@ -1166,13 +1153,11 @@ PrintableWindowManager::PrintableWindowManager() { Menu *mbar, *mprint, *mses, *mother; #if 0 -#if !MAC if (q->value_is_on("pwm_help")) { vb->append(kit.push_button("Help", new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::help) )); } -#endif #endif hb->append(mbar = kit.menubar()); @@ -1212,7 +1197,7 @@ PrintableWindowManager::PrintableWindowManager() { mi = K::menu_item("To Printer"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::do_print0)); -#if 1 || !MAC + mi = K::menu_item("PostScript"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::file_control)); @@ -1221,7 +1206,6 @@ PrintableWindowManager::PrintableWindowManager() { mi = K::menu_item("PS snapshot"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::snapshot_control)); -#endif #endif mi = K::menu_item("Idraw"); @@ -1231,15 +1215,10 @@ PrintableWindowManager::PrintableWindowManager() { mi = K::menu_item("Ascii"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::ascii_control)); -#if MAC - mi = K::menu_item("Setup Printer"); - mprint->append_item(mi); - mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::paperscale)); -#else + mi = K::menu_item("Select Printer"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::printer_control)); -#endif mi = K::check_menu_item("Window Titles Printed"); mprint->append_item(mi); @@ -1273,11 +1252,9 @@ PrintableWindowManager::PrintableWindowManager() { mses->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::virt_screen)); -#if 1 || !MAC mi = K::menu_item("Land/Port"); mprint->append_item(mi); mi->action(new ActionCallback(PWMImpl)(pwmi_, &PWMImpl::landscape)); -#endif mi = K::menu_item("Tray"); mses->append_item(mi); @@ -1296,12 +1273,8 @@ PrintableWindowManager::PrintableWindowManager() { } #if OCSMALL pwmi_->w_->xplace(-800, 0); -#else -#if MAC - pwmi_->w_->xplace(0, 40); #else pwmi_->w_->xplace(0, 0); -#endif #endif // pwmi_->w_->map(); PrintableWindow::intercept(ocg); @@ -1430,9 +1403,6 @@ void PrintableWindowManager::xplace(int left, int top, bool m) { } else { w->hide(); } -#if MAC - reconfigured(w); -#endif } void PrintableWindowManager::update(Observable* o) { @@ -1631,13 +1601,7 @@ void PWMImpl::do_print0() { if (none_selected("No windows to print", "Print Anyway")) { return; } -#if MAC - if (!mprinter_) { - continue_dialog("First select SetupPrinter"); - } else { - mac_do_print(); - } -#else + if (!b_printer_) { printer_control(); if (!printer_control_accept_) { @@ -1648,7 +1612,6 @@ void PWMImpl::do_print0() { } CopyString name(b_printer_->text()->string()); do_print(use_printer, name.string()); -#endif } else { if (!fc_print_) { file_control(); @@ -1659,12 +1622,6 @@ void PWMImpl::do_print0() { } void PWMImpl::do_print(bool use_printer, const char* name) { -#if MAC - if (use_printer) { - mac_do_print(); - return; - } -#endif #if defined(WIN32) if (use_printer && strcmp(name, "Windows") == 0) { mac_do_print(); @@ -1699,12 +1656,7 @@ void PWMImpl::do_print_session(bool also_leader) { float yoff = mprinter()->height() / 2 / sfac - (e.top() + e.bottom() + 23.) / 2.; Transformer t; t.translate(xoff, yoff); -#if MAC - mprinter()->prolog(); - t.scale(sfac, sfac); -#else mprinter()->prolog(sfac); -#endif mprinter()->push_transform(); mprinter()->transform(t); common_print(mprinter(), false, true); @@ -1712,7 +1664,7 @@ void PWMImpl::do_print_session(bool also_leader) { mprinter()->epilog(); #endif -#if (!MAC || DARWIN) && !defined(WIN32) +#if !defined(WIN32) // must be a postscript printer so can use landscape mode if (!b_printer_) { printer_control(); @@ -1743,9 +1695,6 @@ void PWMImpl::ps_file_print(bool use_printer, const char* name, bool land_style, Style* s = Session::instance()->style(); static char* tmpfile = (char*) 0; std::filebuf obuf; -#if MAC && !DARWIN - obuf.open(name, std::ios::out | std::ios::trunc); -#else if (!tmpfile) { tmpfile = ivoc_get_temp_file(); } @@ -1753,7 +1702,6 @@ void PWMImpl::ps_file_print(bool use_printer, const char* name, bool land_style, unlink(tmpfile); #endif obuf.open(tmpfile, IOS_OUT); -#endif std::ostream o(&obuf); Printer* pr = new Printer(&o); pr->prolog(); @@ -1795,7 +1743,7 @@ void PWMImpl::ps_file_print(bool use_printer, const char* name, bool land_style, } pr->epilog(); obuf.close(); -#if !MAC || DARWIN + String filt("cat"); s->find_attribute("pwm_postscript_filter", filt); auto const buf_size = 200 + strlen(name) + strlen(filt.string()) + 2 * strlen(tmpfile); @@ -1820,7 +1768,6 @@ void PWMImpl::ps_file_print(bool use_printer, const char* name, bool land_style, delete[] buf; #ifdef WIN32 unlink(tmpfile); -#endif #endif delete pr; // input handlers later crash doing pr->damage() } @@ -1934,9 +1881,7 @@ void PWMImpl::common_print(Printer* pr, bool land_style, bool ses_style) { // flush the allocation tables for InputHandler glyphs so // no glyphs try to use the Printer after it has been deleted pw->print_glyph()->undraw(); -#if !MAC redraw(pw); -#endif // print the window titles if ((ses_style || p_title_->test(TelltaleState::is_chosen) == true) #if DECO @@ -2058,7 +2003,7 @@ void PrintableWindowManager::psfilter(const char* filename) { } } -#if defined(WIN32) || MAC +#if defined(WIN32) void pwmimpl_redraw(Window* pw); #endif @@ -2069,7 +2014,7 @@ void PWMImpl::redraw(Window* pw) { } Canvas* c = pw->canvas(); c->damage_all(); -#if defined(WIN32) || MAC +#if defined(WIN32) pwmimpl_redraw(pw); #else Requisition req; @@ -3391,22 +3336,10 @@ Window* PWMImpl::snap_owned(Printer* pr, Window* wp) { char* ivoc_get_temp_file() { char* tmpfile; -#if MAC - FSSpec spec; - tmpfile = new char[512]; - __temp_file_name(tmpfile, &spec); -#else const char* tdir = getenv("TEMP"); if (!tdir) { tdir = "/tmp"; } -#if defined(WIN32) && defined(__MWERKS__) - char tname[L_tmpnam + 1]; - tmpnam(tname); - auto const length = strlen(tdir) + 1 + strlen(tname) + 1; - tmpfile = new char[length]; - std::snprintf(tmpfile, length, "%s/%s", tdir, tname); -#else auto const length = strlen(tdir) + 1 + 9 + 1; tmpfile = new char[length]; std::snprintf(tmpfile, length, "%s/nrnXXXXXX", tdir); @@ -3419,10 +3352,8 @@ char* ivoc_get_temp_file() { #else mktemp(tmpfile); #endif -#endif #if defined(WIN32) tmpfile = hoc_back2forward(tmpfile); -#endif #endif return tmpfile; } diff --git a/src/ivoc/rect.h b/src/ivoc/rect.h index 37715f3d0e..74865e1db7 100644 --- a/src/ivoc/rect.h +++ b/src/ivoc/rect.h @@ -36,11 +36,6 @@ class Appear: public Glyph { static const Brush* db_; }; -#if defined(__MWERKS__) -#undef Rect -#define Rect ivoc_Rect -#endif - class Rect: public Appear { public: Rect(Coord left, @@ -63,11 +58,6 @@ class Rect: public Appear { Coord l_, b_, w_, h_; }; -#if defined(__MWERKS__) -#undef Line -#define Line ivoc_Line -#endif - class Line: public Appear { public: Line(Coord dx, Coord dy, const Color* color = NULL, const Brush* brush = NULL); @@ -117,11 +107,6 @@ class Triangle: public Appear { bool filled_; }; -#if defined(__MWERKS__) -#undef Rectangle -#define Rectangle ivoc_Rectangle -#endif - class Rectangle: public Appear { public: Rectangle(float height, diff --git a/src/ivoc/rubband.h b/src/ivoc/rubband.h index 0ae95f840f..50b9ca721b 100644 --- a/src/ivoc/rubband.h +++ b/src/ivoc/rubband.h @@ -123,7 +123,7 @@ inline Coord Rubberband::y_begin() const { * Used the FieldEditorAction as a template */ -#if defined(__STDC__) || defined(__ANSI_CPP__) || defined(WIN32) || MAC +#if defined(__STDC__) || defined(__ANSI_CPP__) || defined(WIN32) #define __RubberCallback(T) T##_RubberCallback #define RubberCallback(T) __RubberCallback(T) #define __RubberMemberFunction(T) T##_RubberMemberFunction diff --git a/src/ivoc/scene.cpp b/src/ivoc/scene.cpp index df7c84cecd..8011ae81eb 100644 --- a/src/ivoc/scene.cpp +++ b/src/ivoc/scene.cpp @@ -51,6 +51,7 @@ #include "scenepic.h" #include "idraw.h" #include "ivoc.h" +#include "utils/enumerate.h" #define Scene_Move_Text_ "MoveText Graph" #define Scene_ChangeColor_ "ChangeColor Graph" @@ -92,15 +93,8 @@ SceneInfo::SceneInfo(Glyph* g, Coord x, Coord y) { status_ = SceneInfoShowing; } -declareList(SceneInfo_List, SceneInfo); -implementList(SceneInfo_List, SceneInfo); -declarePtrList(XYView_PtrList, XYView); -implementPtrList(XYView_PtrList, XYView); -declarePtrList(Scene_PtrList, Scene); -implementPtrList(Scene_PtrList, Scene); - static const float epsilon = 0.001; -static Scene_PtrList* scene_list; +static std::vector* scene_list; Coord Scene::mbs_; Coord Scene::mbs() const { @@ -112,7 +106,7 @@ static const Color* mb_color_; void Scene::check_allocation(GlyphIndex index) { // will not redraw unless allocation is changed // use damage(index) to do a definite redraw on a constant allocation - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = (*info_)[index]; Requisition s; info.glyph_->request(s); Allocation a_old = info.allocation_; @@ -139,7 +133,7 @@ void Scene::check_allocation(GlyphIndex index) { void Scene::modified(GlyphIndex index) { // will not redraw unless allocation is changed // use damage(index) to do a definite redraw on a constant allocation - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = (*info_)[index]; Requisition s; info.glyph_->request(s); Allocation a_old = info.allocation_; @@ -199,8 +193,8 @@ Scene::Scene(Coord x1, Coord y1, Coord x2, Coord y2, Glyph* bg) tool_ = NOTOOL; background_ = NULL; background(bg); - info_ = new SceneInfo_List(); - views_ = new XYView_PtrList(); + info_ = new std::vector(); + views_ = new std::vector(); x1_orig_ = x1; x2_orig_ = x2; y1_orig_ = y1; @@ -210,12 +204,9 @@ Scene::Scene(Coord x1, Coord y1, Coord x2, Coord y2, Glyph* bg) y1_ = y1; y2_ = y2; if (!scene_list) { - scene_list = new Scene_PtrList; + scene_list = new std::vector(); } if (mbs_ == 0.) { -#if MAC - mbs_ = 10.; -#endif Session::instance()->style()->find_attribute("scene_menu_box_size", mbs_); if (mbs_ > 0.) { mb_color_ = new Color(ColorIntensity(.5), ColorIntensity(.5), ColorIntensity(.5)); @@ -227,7 +218,7 @@ Scene::Scene(Coord x1, Coord y1, Coord x2, Coord y2, Glyph* bg) } // printf ("mbs_=%g\n", mbs_); } - scene_list->append(this); + scene_list->push_back(this); picker_ = NULL; mark_ = false; hoc_obj_ptr_ = NULL; @@ -269,15 +260,14 @@ void Scene::help() { } XYView* Scene::sceneview(int i) const { - if (views_->count()) { - return views_->item(i); + if (views_->size() > i) { + return views_->at(i); } else { return NULL; } } void Scene::new_size(Coord x1, Coord y1, Coord x2, Coord y2) { -#if 1 if (x1 == x2) { x1 -= 1.; x2 += 1.; @@ -290,11 +280,10 @@ void Scene::new_size(Coord x1, Coord y1, Coord x2, Coord y2) { y1_ = y1; x2_ = x2; y2_ = y2; -#endif -#if 1 + // resize first view - if (views_->count()) { - XYView* v = views_->item(0); + if (!views_->empty()) { + XYView* v = views_->front(); // v->origin(x1, y1); // v->x_span(x2 - x1); // v->y_span(y2 - y1); @@ -303,34 +292,17 @@ void Scene::new_size(Coord x1, Coord y1, Coord x2, Coord y2) { v->damage_all(); } } -#endif -#if 0 - //resize all views to correspond to the new size - damage_all(); - for (long i = 0; i < views_->count(); ++i) { - XYView* v = views_->item(i); - v->x_span(x2 - x1); - v->y_span(y2 - y1); - v->origin(x1, y1); - } - GlyphIndex count = info_->count(); - for (i=0; i < count; ++i) { - modified(i); - } -#endif notify(); } Scene::~Scene() { // printf("~Scene\n"); - GlyphIndex count = info_->count(); - for (GlyphIndex i = 0; i < count; ++i) { - SceneInfo& info = info_->item_ref(i); - Resource::unref(info.glyph_); + for (auto& item: *info_) { + Resource::unref(item.glyph_); } delete info_; - info_ = NULL; + info_ = nullptr; Resource::unref(background_); if (picker_) { delete picker_; @@ -338,22 +310,9 @@ Scene::~Scene() { // only xyview can manipulate this list. when xyview is deleted it // will remove itself from this list. There is no way to delete scene // without first deleteing all the views. - assert(views_->count() == 0); + assert(views_->empty()); -#if 0 - count = views_->count(); - for (i = 0; i < count; ++i) { - XYView* view = views_->item(i); - Resource::unref(view); - } - views_->remove_all(); -#endif - for (long j = 0; j < scene_list->count(); ++j) { - if (scene_list->item(j) == this) { - scene_list->remove(j); - break; - } - } + erase_first(*scene_list, this); delete views_; } @@ -365,29 +324,21 @@ void Scene::wholeplot(Coord& l, Coord& b, Coord& r, Coord& t) const { } int Scene::view_count() const { - return int(views_->count()); + return int(views_->size()); } void Scene::append_view(XYView* v) { - views_->append(v); + views_->push_back(v); // Resource::ref(v); } void Scene::remove_view(XYView* v) { - long count = views_->count(); - for (long i = 0; i < count; ++i) { - if (v == views_->item(i)) { - views_->remove(i); - break; - // Resource::unref(v); - } - } + erase_first(*views_, v); } void Scene::dismiss() { - long count = views_->count(); - for (long i = count - 1; i >= 0; --i) { - OcViewGlyph* g = views_->item(i)->parent(); + for (auto&& item: reverse(*views_)) { + OcViewGlyph* g = item->parent(); if (g && g->has_window()) { g->window()->dismiss(); g->window(NULL); @@ -396,13 +347,11 @@ void Scene::dismiss() { } void Scene::damage(GlyphIndex index) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); Allocation& a = info.allocation_; - long count = views_->count(); - for (long i = 0; i < count; ++i) { + for (auto& item: *views_) { // printf("damage view\n"); - XYView* view = views_->item(i); - view->damage(info.glyph_, + item->damage(info.glyph_, a, (info.status_ & SceneInfoFixed) != 0, (info.status_ & SceneInfoViewFixed) != 0); @@ -410,11 +359,9 @@ void Scene::damage(GlyphIndex index) { } void Scene::damage(GlyphIndex index, const Allocation& a) { - SceneInfo& info = info_->item_ref(index); - long count = views_->count(); - for (long i = 0; i < count; ++i) { - XYView* view = views_->item(i); - view->damage(info.glyph_, + SceneInfo& info = info_->at(index); + for (auto& item: *views_) { + item->damage(info.glyph_, a, (info.status_ & SceneInfoFixed) != 0, (info.status_ & SceneInfoViewFixed) != 0); @@ -422,24 +369,21 @@ void Scene::damage(GlyphIndex index, const Allocation& a) { } void Scene::damage_all() { - for (long i = 0; i < views_->count(); ++i) { - XYView* v = views_->item(i); - if (v->canvas()) { - v->damage_all(); + for (auto& item: *views_) { + if (item->canvas()) { + item->damage_all(); } } } void Scene::damage(Coord x1, Coord y1, Coord x2, Coord y2) { - long count = views_->count(); - for (long i = 0; i < count; ++i) { - XYView* view = views_->item(i); - view->damage(x1, y1, x2, y2); + for (auto& item: *views_) { + item->damage(x1, y1, x2, y2); } } void Scene::show(GlyphIndex index, bool showing) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); if (((info.status_ & SceneInfoShowing) == SceneInfoShowing) != showing) { // printf("show %d showing=%d want %d\n", index, (info.status_ & SceneInfoHidden) == 0, // showing); info.pinfo(); @@ -453,11 +397,11 @@ void Scene::show(GlyphIndex index, bool showing) { } bool Scene::showing(GlyphIndex index) const { - return (info_->item_ref(index).status_ & SceneInfoShowing) != 0; + return (info_->at(index).status_ & SceneInfoShowing) != 0; } void Scene::move(GlyphIndex index, Coord x, Coord y) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); float x1 = info.x_, y1 = info.y_; info.x_ = x; info.y_ = y; @@ -468,21 +412,21 @@ void Scene::move(GlyphIndex index, Coord x, Coord y) { } void Scene::location(GlyphIndex index, Coord& x, Coord& y) const { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); x = info.x_; y = info.y_; } GlyphIndex Scene::count() const { - return info_->count(); + return info_->size(); } Glyph* Scene::component(GlyphIndex index) const { - return info_->item_ref(index).glyph_; + return info_->at(index).glyph_; } void Scene::allotment(GlyphIndex index, DimensionName res, Allotment& a) const { - a = info_->item_ref(index).allocation_.allotment(res); + a = info_->at(index).allocation_.allotment(res); } void Scene::change(GlyphIndex index) { @@ -490,7 +434,7 @@ void Scene::change(GlyphIndex index) { } void Scene::change_to_fixed(GlyphIndex index, XYView* v) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); if (info.status_ & SceneInfoViewFixed) { info.status_ &= ~SceneInfoViewFixed; printf("changed to fixed\n"); @@ -502,7 +446,7 @@ void Scene::change_to_fixed(GlyphIndex index, XYView* v) { } void Scene::change_to_vfixed(GlyphIndex index, XYView* v) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); if (!(info.status_ & SceneInfoViewFixed)) { info.status_ |= SceneInfoViewFixed; info.status_ |= SceneInfoFixed; @@ -515,51 +459,48 @@ void Scene::change_to_vfixed(GlyphIndex index, XYView* v) { void Scene::append(Glyph* glyph) { SceneInfo info(glyph); - info_->append(info); + info_->push_back(info); Resource::ref(glyph); - // modified(info_->count() - 1); } void Scene::append_fixed(Glyph* glyph) { SceneInfo info(glyph); info.status_ |= SceneInfoFixed; - info_->append(info); + info_->push_back(info); Resource::ref(glyph); - // modified(info_->count() - 1); } void Scene::append_viewfixed(Glyph* glyph) { // printf("Scene::append_viewfixed\n"); SceneInfo info(glyph); info.status_ |= SceneInfoFixed | SceneInfoViewFixed; - info_->append(info); + info_->push_back(info); Resource::ref(glyph); - // modified(info_->count() - 1); } void Scene::prepend(Glyph* glyph) { SceneInfo info(glyph); - info_->prepend(info); + info_->insert(info_->begin(), info); Resource::ref(glyph); // modified(0); } void Scene::insert(GlyphIndex index, Glyph* glyph) { SceneInfo info(glyph); - info_->insert(index, info); + info_->insert(info_->begin() + index, info); Resource::ref(glyph); // modified(index); } void Scene::remove(GlyphIndex index) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); damage(index); Resource::unref(info.glyph_); - info_->remove(index); + info_->erase(info_->begin() + index); } void Scene::replace(GlyphIndex index, Glyph* glyph) { - SceneInfo& info = info_->item_ref(index); + SceneInfo& info = info_->at(index); damage(index); Resource::ref(glyph); Resource::unref(info.glyph_); @@ -568,10 +509,8 @@ void Scene::replace(GlyphIndex index, Glyph* glyph) { } GlyphIndex Scene::glyph_index(const Glyph* g) { - GlyphIndex i, cnt = info_->count(); - ; - for (i = 0; i < cnt; ++i) { - if (info_->item_ref(i).glyph_ == g) { + for (const auto&& [i, info]: enumerate(*info_)) { + if (info.glyph_ == g) { return i; } } @@ -593,8 +532,7 @@ void Scene::request(Requisition& req) const { void Scene::allocate(Canvas* c, const Allocation& a, Extension& ext) { // printf("Scene::allocate\n"); - GlyphIndex count = info_->count(); - for (GlyphIndex index = 0; index < count; ++index) { + for (std::size_t index: range(*info_)) { check_allocation(index); } ext.set(c, a); @@ -633,18 +571,14 @@ void Scene::draw(Canvas* canvas, const Allocation& a) const { canvas->pop_transform(); } } - GlyphIndex count = info_->count(); bool are_fixed = false; - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto& info: *info_) { if (info.status_ & SceneInfoFixed) { are_fixed = true; } else if (info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { Allocation& a = info.allocation_; Extension b; b.set(canvas, a); - // printf("%d alloc %g %g %g %g\n", index, a.left(), a.bottom(), a.right(), a.top()); - // printf("%d exten %g %g %g %g\n", index, b.left(), b.bottom(), b.right(), b.top()); if (canvas->damaged(b)) { info.glyph_->draw(canvas, a); } @@ -659,8 +593,7 @@ void Scene::draw(Canvas* canvas, const Allocation& a) const { const Transformer& tv = XYView::current_draw_view()->s2o(); canvas->transform(tv); IfIdraw(pict(tv)); - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto& info: *info_) { if ((info.status_ & SceneInfoFixed) && info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { Allocation a = info.allocation_; @@ -692,10 +625,8 @@ void Scene::print(Printer* canvas, const Allocation& a) const { if (background_ != NULL) { background_->print(canvas, a); } - GlyphIndex count = info_->count(); bool are_fixed = false; - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto& info: *info_) { if (info.status_ & SceneInfoFixed) { are_fixed = true; } else if (info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { @@ -715,8 +646,7 @@ void Scene::print(Printer* canvas, const Allocation& a) const { // view_transform(canvas, 2, tv); const Transformer& tv = XYView::current_draw_view()->s2o(); canvas->transform(tv); - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto& info: *info_) { if ((info.status_ & SceneInfoFixed) && info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { Allocation a = info.allocation_; @@ -759,30 +689,12 @@ void Scene::pick(Canvas* c, const Allocation& a, int depth, Hit& h) { if (background_ != NULL) { background_->pick(c, a, depth, h); } - GlyphIndex count = info_->count(); -#if 0 - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); - if (info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { - Allocation& a = info.allocation_; - if ( - h.right() >= a.left() && h.left() < a.right() - && h.top() >= a.bottom() && h.bottom() < a.top() - ) { - h.begin(depth, this, index); - info.glyph_->pick(c, a, depth + 1, h); - h.end(); - } - } - } -#else // pick with some extra epsilon in canvas coords Coord epsx = XYView::current_pick_view()->x_pick_epsilon(); Coord epsy = XYView::current_pick_view()->y_pick_epsilon(); bool are_fixed = false; - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto&& [index, info]: enumerate(*info_)) { if (info.status_ & SceneInfoFixed) { are_fixed = true; } else if (info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { @@ -802,8 +714,7 @@ void Scene::pick(Canvas* c, const Allocation& a, int depth, Hit& h) { const Transformer& tv = XYView::current_pick_view()->s2o(); float scx, scy, tmp; tv.matrix(scx, tmp, tmp, scy, tmp, tmp); - for (GlyphIndex index = 0; index < count; ++index) { - SceneInfo& info = info_->item_ref(index); + for (auto&& [index, info]: enumerate(*info_)) { if ((info.status_ & SceneInfoFixed) && info.glyph_ != NULL && (info.status_ & SceneInfoShowing)) { Allocation a = info.allocation_; @@ -831,17 +742,14 @@ void Scene::pick(Canvas* c, const Allocation& a, int depth, Hit& h) { } } } -#endif } long Scene::scene_list_index(Scene* s) { - long i, cnt = scene_list->count(); - for (i = 0; i < cnt; ++i) { - if (s == scene_list->item(i)) { - return i; - } + auto it = std::find(scene_list->begin(), scene_list->end(), s); + if (it == scene_list->end()) { + return -1; } - return -1; + return std::distance(scene_list->begin(), it); } void Scene::save_all(std::ostream& o) { @@ -850,19 +758,16 @@ void Scene::save_all(std::ostream& o) { if (!scene_list) { return; } - long count = scene_list->count(); - if (count) { - Sprintf(buf, "objectvar scene_vector_[%ld]", count); + if (!scene_list->empty()) { + Sprintf(buf, "objectvar scene_vector_[%ld]", scene_list->size()); o << buf << std::endl; } - for (long i = 0; i < count; ++i) { - Scene* s = scene_list->item(i); - s->mark(false); + for (auto& scene: *scene_list) { + scene->mark(false); } } void Scene::save_class(std::ostream& o, const char* s) { - long count = views_->count(); // PrintableWindow* w = (PrintableWindow*)canvas()->window(); o << "save_window_ = new " << s << "(0)" << std::endl; char buf[256]; @@ -884,8 +789,8 @@ void Scene::save_phase1(std::ostream&) {} void Scene::save_phase2(std::ostream&) {} void Scene::printfile(const char* fname) { - if (view_count()) { - views_->item(0)->printfile(fname); + if (!views_->empty()) { + views_->front()->printfile(fname); } } diff --git a/src/ivoc/scenepic.cpp b/src/ivoc/scenepic.cpp index ac071c35db..bd120ca726 100644 --- a/src/ivoc/scenepic.cpp +++ b/src/ivoc/scenepic.cpp @@ -16,6 +16,7 @@ #include "apwindow.h" #include "utility.h" #include "oc2iv.h" +#include "utils/enumerate.h" #define Scene_SceneMover_ "Translate Scene" #define Scene_SceneZoom_ "ZoomInOut Scene" @@ -85,9 +86,6 @@ GlyphIndex ButtonItemInfo::menu_index() { return -1; } -declarePtrList(ButtonItemInfoList, ButtonItemInfo) -implementPtrList(ButtonItemInfoList, ButtonItemInfo) - /*static*/ class SceneMover: public OcHandler { public: SceneMover(); @@ -193,7 +191,7 @@ ScenePicker* Scene::picker() { Scene* scene_; TelltaleGroup* tg_; CopyString sel_name_; - ButtonItemInfoList* bil_; + std::vector* bil_; static DismissableWindow* window_; }; @@ -276,7 +274,7 @@ MenuItem* ScenePicker::add_menu(const char* name, MenuItem* mi, Menu* m) { mm = spi_->menu_->menu(); } mm->append_item(mi); - spi_->bil_->append(new ButtonItemInfo(name, mi->action(), mi->state(), mi, mm)); + spi_->bil_->push_back(new ButtonItemInfo(name, mi->action(), mi->state(), mi, mm)); return mi; } @@ -294,7 +292,7 @@ Button* ScenePicker::radio_button(const char* name, Action* a) { Button* mi = WidgetKit::instance()->radio_button(spi_->tg_, name, new RadioSelect(name, a, spi_->scene_)); - spi_->bil_->append(new ButtonItemInfo(name, mi->action(), mi->state())); + spi_->bil_->push_back(new ButtonItemInfo(name, mi->action(), mi->state())); return mi; } MenuItem* ScenePicker::add_radio_menu(const char* name, @@ -312,10 +310,7 @@ MenuItem* ScenePicker::add_radio_menu(const char* name, OcHandler* h, int tool, } long ScenePickerImpl::info_index(const char* name) { - long i, cnt; - cnt = bil_->count(); - for (i = 0; i < cnt; ++i) { - ButtonItemInfo* b = bil_->item(i); + for (const auto&& [i, b]: enumerate(*bil_)) { if (strcmp(b->name_.string(), name) == 0) { return i; } @@ -337,7 +332,7 @@ void ScenePicker::exec_item(const char* name) { } i = spi_->info_index(name); if (i > -1) { - ButtonItemInfo* b = spi_->bil_->item(i); + ButtonItemInfo* b = spi_->bil_->at(i); TelltaleState* t = b->s_; bool chosen = t->test(TelltaleState::is_chosen); bool act = !chosen; @@ -355,11 +350,10 @@ void ScenePicker::exec_item(const char* name) { } void ScenePicker::remove_item(const char* name) { - long i; - i = spi_->info_index(name); + long i = spi_->info_index(name); if (i > -1) { - ButtonItemInfo* b = spi_->bil_->item(i); - spi_->bil_->remove(i); + ButtonItemInfo* b = spi_->bil_->at(i); + spi_->bil_->erase(spi_->bil_->begin() + i); GlyphIndex j = b->menu_index(); if (j > -1) { b->parent_->remove_item(j); @@ -375,11 +369,11 @@ void ScenePicker::insert_item(const char* insert, const char* name, MenuItem* mi long i; i = spi_->info_index(insert); if (i > -1) { - ButtonItemInfo* b = spi_->bil_->item(i); + ButtonItemInfo* b = spi_->bil_->at(i); GlyphIndex j = b->menu_index(); if (j > -1) { b->parent_->insert_item(j, mi); - spi_->bil_->insert(i, + spi_->bil_->insert(spi_->bil_->begin() + i, new ButtonItemInfo(name, mi->action(), mi->state(), mi, b->parent_)); } } @@ -408,14 +402,15 @@ ScenePickerImpl::ScenePickerImpl(Scene* scene) tg_ = new TelltaleGroup(); tg_->ref(); scene_ = scene; // not ref'ed since picker deleted when scene is deleted - bil_ = new ButtonItemInfoList(20); + bil_ = new std::vector(); + bil_->reserve(20); } ScenePickerImpl::~ScenePickerImpl() { Resource::unref(menu_); Resource::unref(tg_); - for (long i = bil_->count() - 1; i >= 0; --i) { - delete bil_->item(i); + for (ButtonItemInfo* bii: *bil_) { + delete bii; } delete bil_; } @@ -798,11 +793,6 @@ PopupMenu::~PopupMenu() { bool PopupMenu::event(Event& e) { if (!w_) { w_ = new PopupWindow(menu_); -#if MAC - w_->place(10000, 10000); - w_->map(); - w_->unmap(); -#endif } switch (e.type()) { case Event::down: @@ -810,7 +800,7 @@ bool PopupMenu::event(Event& e) { Coord l, b; w_->place(e.pointer_root_x(), e.pointer_root_y()); w_->align(.8, .9); -#if defined(WIN32) || MAC +#if defined(WIN32) l = w_->left(); b = w_->bottom(); if (b < 0. || l < 0.) { diff --git a/src/ivoc/scenevie.h b/src/ivoc/scenevie.h index ffffc88fb6..51de7ebaf7 100644 --- a/src/ivoc/scenevie.h +++ b/src/ivoc/scenevie.h @@ -74,11 +74,11 @@ with the common append/move. #include #include "apwindow.h" #include "ocglyph.h" +#include #undef Scene class Scene; -class SceneInfo_List; class SceneInfo; class XYView; class XYView_PtrList; @@ -330,8 +330,8 @@ class Scene: public Glyph, public Observable { private: Coord x1_, y1_, x2_, y2_; - SceneInfo_List* info_; - XYView_PtrList* views_; + std::vector* info_; + std::vector* views_; Glyph* background_; ScenePicker* picker_; int tool_; diff --git a/src/ivoc/strfun.cpp b/src/ivoc/strfun.cpp index f9c1618d93..cb5375eb82 100644 --- a/src/ivoc/strfun.cpp +++ b/src/ivoc/strfun.cpp @@ -1,11 +1,10 @@ #include <../../nrnconf.h> -#include -#include #include #include #include "classreg.h" #include "oc2iv.h" #include +#include // for alias #include #include @@ -21,10 +20,6 @@ extern int nrn_is_artificial(int); extern int hoc_return_type_code; -inline unsigned long key_to_hash(String& s) { - return s.hash(); -} - static double l_substr(void*) { char* s1 = gargstr(1); char* s2 = gargstr(2); @@ -43,51 +38,94 @@ static double l_len(void*) { } static double l_head(void*) { - String text(gargstr(1)); - Regexp r(gargstr(2)); - r.Search(text.string(), text.length(), 0, text.length()); - int i = r.BeginningOfMatch(); - // text.set_to_left(i); doesnt work - char** head = hoc_pgargstr(3); - if (i > 0) { - char* buf = new char[i + 1]; - strncpy(buf, text.string(), i); - buf[i] = '\0'; - hoc_assign_str(head, buf); - delete[] buf; - } else { - hoc_assign_str(head, ""); + std::string text(gargstr(1)); + { // Clean the text so we keep only the first line + // Imitation of std::multiline in our case + std::regex r("^(.*)(\n|$)"); + std::smatch sm; + std::regex_search(text, sm, r); + text = sm[1]; } + int i = -1; + std::string result{}; + try { + std::regex r(gargstr(2), std::regex::egrep); + if (std::smatch sm; std::regex_search(text, sm, r)) { + i = sm.position(); + result = sm.prefix().str(); + } + } catch (const std::regex_error& e) { + std::cerr << e.what() << std::endl; + } + char** head = hoc_pgargstr(3); + hoc_assign_str(head, result.c_str()); hoc_return_type_code = 1; // integer return double(i); } static double l_tail(void*) { - CopyString text(gargstr(1)); - Regexp r(gargstr(2)); - r.Search(text.string(), text.length(), 0, text.length()); - int i = r.EndOfMatch(); - char** tail = hoc_pgargstr(3); - if (i >= 0) { - hoc_assign_str(tail, text.string() + i); - } else { - hoc_assign_str(tail, ""); + std::string text(gargstr(1)); + { // Clean the text so we keep only the first line + // Imitation of std::multiline in our case + std::regex r("^(.*)(\n|$)"); + std::smatch sm; + std::regex_search(text, sm, r); + text = sm[1]; + } + int i = -1; + std::string result{}; + try { + std::regex r(gargstr(2), std::regex::egrep); + if (std::smatch sm; std::regex_search(text, sm, r)) { + i = sm.position() + sm.length(); + result = sm.suffix().str(); + } + } catch (const std::regex_error& e) { + std::cerr << e.what() << std::endl; } + char** tail = hoc_pgargstr(3); + hoc_assign_str(tail, result.c_str()); hoc_return_type_code = 1; // integer return double(i); } +static double l_ltrim(void*) { + std::string s(gargstr(1)); + std::string chars = " \r\n\t\f\v"; + if (ifarg(3)) { + chars = gargstr(3); + } + s.erase(0, s.find_first_not_of(chars)); + + char** ret = hoc_pgargstr(2); + hoc_assign_str(ret, s.c_str()); + return 0.; +} + +static double l_rtrim(void*) { + std::string s(gargstr(1)); + std::string chars = " \r\n\t\f\v"; + if (ifarg(3)) { + chars = gargstr(3); + } + s.erase(s.find_last_not_of(chars) + 1); + + char** ret = hoc_pgargstr(2); + hoc_assign_str(ret, s.c_str()); + return 0.; +} + static double l_left(void*) { - CopyString text(gargstr(1)); - CopyString newtext = text.left(int(chkarg(2, 0, strlen(gargstr(1))))); - hoc_assign_str(hoc_pgargstr(1), newtext.string()); + std::string text(gargstr(1)); + std::string newtext = text.substr(0, int(chkarg(2, 0, strlen(gargstr(1))))); + hoc_assign_str(hoc_pgargstr(1), newtext.c_str()); return 1.; } static double l_right(void*) { - CopyString text(gargstr(1)); - CopyString newtext = text.right(int(chkarg(2, 0, strlen(gargstr(1))))); - hoc_assign_str(hoc_pgargstr(1), newtext.string()); + std::string text(gargstr(1)); + std::string newtext = text.substr(int(chkarg(2, 0, strlen(gargstr(1))))); + hoc_assign_str(hoc_pgargstr(1), newtext.c_str()); return 1.; } @@ -102,11 +140,12 @@ extern Object* hoc_newobj1(Symbol*, int); extern Symlist* hoc_top_level_symlist; extern Symbol* ivoc_alias_lookup(const char* name, Object* ob) { + Symbol* s{}; IvocAliases* a = (IvocAliases*) ob->aliases; if (a) { - return a->lookup(name); + s = a->lookup(name); } - return NULL; + return s; } extern void ivoc_free_alias(Object* ob) { @@ -159,14 +198,12 @@ static Object** l_alias_list(void*) { Symbol* sl = hoc_lookup("List"); Symbol* st = hoc_table_lookup("String", hoc_top_level_symlist); if (!st || st->type != TEMPLATE) { - printf("st=%p %s %d\n", st, st ? st->name : "NULL", st ? st->type : 0); - hoc_execerror("String is not a template", 0); + hoc_execerror("String is not a HOC template", 0); } Object** po = hoc_temp_objvar(sl, list); (*po)->refcount++; int id = (*po)->index; if (a) { - char buf[256]; for (auto& kv: a->symtab_) { Symbol* sym = kv.second; hoc_pushstr(&sym->name); @@ -321,6 +358,8 @@ static Member_func l_members[] = {{"substr", l_substr}, {"len", l_len}, {"head", l_head}, {"tail", l_tail}, + {"ltrim", l_ltrim}, + {"rtrim", l_rtrim}, {"right", l_right}, {"left", l_left}, {"is_name", l_is_name}, @@ -333,13 +372,11 @@ static Member_func l_members[] = {{"substr", l_substr}, static Member_ret_obj_func l_obj_members[] = {{"alias_list", l_alias_list}, {0, 0}}; static void* l_cons(Object*) { - return NULL; + return nullptr; } -static void l_destruct(void*) {} - void StringFunctions_reg() { - class2oc("StringFunctions", l_cons, l_destruct, l_members, NULL, l_obj_members, NULL); + class2oc("StringFunctions", l_cons, nullptr, l_members, nullptr, l_obj_members, nullptr); } @@ -349,7 +386,7 @@ IvocAliases::IvocAliases(Object* ob) { } IvocAliases::~IvocAliases() { - ob_->aliases = NULL; + ob_->aliases = nullptr; for (auto& kv: symtab_) { Symbol* sym = kv.second; hoc_free_symspace(sym); @@ -358,8 +395,7 @@ IvocAliases::~IvocAliases() { } } Symbol* IvocAliases::lookup(const char* name) { - String s(name); - const auto& it = symtab_.find(s); + const auto& it = symtab_.find(name); if (it != symtab_.end()) { return it->second; } @@ -372,16 +408,14 @@ Symbol* IvocAliases::install(const char* name) { strcpy(sp->name, name); sp->type = VARALIAS; sp->cpublic = 0; // cannot be 2 or cannot be freed - sp->extra = 0; - sp->arayinfo = 0; - String s(sp->name); - symtab_.emplace(s, sp); + sp->extra = nullptr; + sp->arayinfo = nullptr; + symtab_.try_emplace(sp->name, sp); return sp; } void IvocAliases::remove(Symbol* sym) { hoc_free_symspace(sym); - String s(sym->name); - auto it = symtab_.find(s); + auto it = symtab_.find(sym->name); symtab_.erase(it); free(sym->name); free(sym); diff --git a/src/ivoc/symchoos.cpp b/src/ivoc/symchoos.cpp index 4dff156668..ea9048bf58 100644 --- a/src/ivoc/symchoos.cpp +++ b/src/ivoc/symchoos.cpp @@ -60,8 +60,6 @@ #include "classreg.h" #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); #if HAVE_IV @@ -83,7 +81,7 @@ class SymChooserImpl { int* filter_map_; SymDirectory** dir_; SymChooserAction* action_; - const String* selected_; + std::string selected_; CopyString last_selected_; int last_index_; Style* style_; @@ -202,7 +200,7 @@ static double text(void* v) { #if HAVE_IV IFGUI SymChooser* sc = (SymChooser*) v; - hoc_assign_str(hoc_pgargstr(1), sc->selected()->string()); + hoc_assign_str(hoc_pgargstr(1), sc->selected().c_str()); ENDGUI return 0.; #else @@ -249,7 +247,7 @@ SymChooser::~SymChooser() { delete impl_; } -const String* SymChooser::selected() const { +const std::string& SymChooser::selected() const { return impl_->selected_; } @@ -508,7 +506,7 @@ void SymChooserImpl::load(int bindex) { filter_map_ = index; // printf("loading %d\n", bindex); for (int i = 0; i < dircount; i++) { - const String& f = d.name(i); + const String& f = d.name(i).c_str(); bool is_dir = d.is_directory(i); if ((is_dir && filtered(f, directory_filter_)) || (!is_dir && filtered(f, filter_))) { Glyph* name = kit.label(f); @@ -531,7 +529,7 @@ void SymChooserImpl::load(int bindex) { // to avoid a premature browser request which ends up showing an // empty list. fbrowser_[bindex]->refresh(); - editor_->field(d.path()); + editor_->field(d.path().c_str()); kit.pop_style(); } @@ -565,7 +563,7 @@ bool SymChooserImpl::filtered(const String& name, FieldEditor* e) { if (s == NULL || s->length() == 0) { return true; } - return s == NULL || s->length() == 0 || SymDirectory::match(name, *s); + return s == NULL || s->length() == 0 || SymDirectory::match(name.string(), s->string()); } void SymChooserImpl::accept_browser_index(int bindex) { @@ -575,8 +573,8 @@ void SymChooserImpl::accept_browser_index(int bindex) { } // i = filter_map_[i]; SymDirectory* dir = dir_[bindex]; - const String& path = dir->path(); - const String& name = dir->name(i); + const String& path = dir->path().c_str(); + const String& name = dir->name(i).c_str(); Symbol* sym = dir->symbol(i); int length = path.length() + name.length(); auto const tmp_len = length + 2; @@ -586,7 +584,7 @@ void SymChooserImpl::accept_browser_index(int bindex) { editor_->field(tmp); last_selected_ = tmp; last_index_ = i; - selected_ = editor_->text(); + selected_ = editor_->text()->string(); if (dir->is_directory(i)) { if (chdir(bindex, i)) { fchooser_->focus(editor_); @@ -602,7 +600,7 @@ void SymChooserImpl::accept_browser_index(int bindex) { } double* SymChooserImpl::selected_var() { - if (last_index_ != -1 && strcmp(selected_->string(), last_selected_.string()) == 0) { + if (last_index_ != -1 && selected_ == last_selected_.string()) { SymDirectory* dir = dir_[browser_index_]; return dir->variable(last_index_); } else { @@ -611,7 +609,7 @@ double* SymChooserImpl::selected_var() { } int SymChooserImpl::selected_vector_count() { - if (last_index_ != -1 && strcmp(selected_->string(), last_selected_.string()) == 0) { + if (last_index_ != -1 && selected_ == last_selected_.string()) { SymDirectory* dir = dir_[browser_index_]; return dir->whole_vector(last_index_); } else { @@ -627,15 +625,15 @@ void SymChooserImpl::accept_browser() { return; } // i = filter_map_[i]; - const String& path = dir_[bi]->path(); - const String& name = dir_[bi]->name(i); + const String& path = dir_[bi]->path().c_str(); + const String& name = dir_[bi]->name(i).c_str(); int length = path.length() + name.length(); char* tmp = new char[length + 1]; std::snprintf( tmp, length + 1, "%.*s%.*s", path.length(), path.string(), name.length(), name.string()); // printf("accept_browser %s\n", tmp); editor_->field(tmp); - selected_ = editor_->text(); + selected_ = editor_->text()->string(); if (dir_[bi]->is_directory(i)) { if (chdir(bi, i)) { fchooser_->focus(editor_); @@ -649,21 +647,19 @@ void SymChooserImpl::accept_browser() { } void SymChooserImpl::cancel_browser() { - selected_ = NULL; + selected_.clear(); fchooser_->dismiss(false); } void SymChooserImpl::editor_accept(FieldEditor* e) { - int i; - int bi = browser_index_; - if ((i = dir_[bi]->index(*e->text())) >= 0) { - if (!chdir(bi, i)) { - selected_ = &dir_[bi]->name(i); + if (int i = dir_[browser_index_]->index(e->text()->string()); i >= 0) { + if (!chdir(browser_index_, i)) { + selected_ = dir_[browser_index_]->name(i); fchooser_->dismiss(true); } return; } else { - selected_ = e->text(); + selected_ = e->text()->string(); fchooser_->dismiss(true); } } diff --git a/src/ivoc/symchoos.h b/src/ivoc/symchoos.h index 2d94449d09..dc465fe355 100644 --- a/src/ivoc/symchoos.h +++ b/src/ivoc/symchoos.h @@ -55,7 +55,7 @@ class SymChooser: public Dialog { SymChooser(SymDirectory*, WidgetKit*, Style*, SymChooserAction* = NULL, int nbrowser = 3); virtual ~SymChooser(); - virtual const String* selected() const; + virtual const std::string& selected() const; virtual double* selected_var(); virtual int selected_vector_count(); virtual void reread(); diff --git a/src/ivoc/symdir.cpp b/src/ivoc/symdir.cpp index 6c2ce29db9..5fb09510a9 100644 --- a/src/ivoc/symdir.cpp +++ b/src/ivoc/symdir.cpp @@ -1,15 +1,14 @@ #include <../../nrnconf.h> #include #include -#include -#include #include #include "ocobserv.h" +#include "utils/enumerate.h" +#include "nrniv_mf.h" #include "nrnoc2iv.h" #include "membfunc.h" -extern double* point_process_pointer(Point_process*, Symbol*, int); #include "parse.hpp" #include "hoclist.h" extern Symlist* hoc_symlist; @@ -19,7 +18,6 @@ extern Symlist *hoc_built_in_symlist, *hoc_top_level_symlist; #include "symdir.h" #include "nrnsymdiritem.h" -implementPtrList(SymbolList, SymbolItem); const char* concat(const char* s1, const char* s2) { static char* tmp = 0; @@ -43,8 +41,8 @@ class SymDirectoryImpl: public Observer { Object* obj_; cTemplate* t_; - SymbolList symbol_list_; - CopyString path_; + std::vector symbol_lists_; + std::string path_; void load(int type); void load(int type, Symlist*); @@ -61,37 +59,20 @@ class SymDirectoryImpl: public Observer { void sort(); }; -static int compare_entries(const void* k1, const void* k2) { - SymbolItem* e1 = *((SymbolItem**) k1); - SymbolItem* e2 = *((SymbolItem**) k2); - int i = strcmp(e1->name().string(), e2->name().string()); +static int compare_entries(const SymbolItem* e1, const SymbolItem* e2) { + int i = strcmp(e1->name().c_str(), e2->name().c_str()); if (i == 0) { - if (e1->array_index() > e2->array_index()) { - i = 1; - } else { - i = -1; - } + return e1->array_index() > e2->array_index(); } - return i; + return i > 0; }; void SymDirectoryImpl::sort() { - long cnt, i; - cnt = symbol_list_.count(); - SymbolItem** slist = new SymbolItem*[cnt]; - for (i = 0; i < cnt; ++i) { - slist[i] = symbol_list_.item(i); - } - qsort(slist, cnt, sizeof(SymbolItem*), compare_entries); - symbol_list_.remove_all(); - for (i = 0; i < cnt; ++i) { - symbol_list_.append(slist[i]); - } - delete[] slist; + std::sort(symbol_lists_.begin(), symbol_lists_.end(), compare_entries); } // SymDirectory -SymDirectory::SymDirectory(const String& parent_path, +SymDirectory::SymDirectory(const std::string& parent_path, Object* parent_obj, Symbol* sym, int array_index, @@ -111,7 +92,7 @@ SymDirectory::SymDirectory(const String& parent_path, if (sym->type == TEMPLATE) { suffix = '_'; } - impl_->make_pathname(parent_path.string(), + impl_->make_pathname(parent_path.c_str(), sym->name, hoc_araystr(sym, array_index, obd), suffix); @@ -143,7 +124,7 @@ SymDirectory::SymDirectory(const String& parent_path, } break; default: - hoc_execerror("Don't know how to make a directory out of", path().string()); + hoc_execerror("Don't know how to make a directory out of", path().c_str()); break; } impl_->sort(); @@ -161,21 +142,21 @@ SymDirectory::SymDirectory(Object* ob) { } bool SymDirectory::is_pysec(int index) const { - SymbolItem* si = impl_->symbol_list_.item(index); + SymbolItem* si = impl_->symbol_lists_.at(index); return si->pysec_ ? true : false; } SymDirectory* SymDirectory::newsymdir(int index) { - SymbolItem* si = impl_->symbol_list_.item(index); + SymbolItem* si = impl_->symbol_lists_.at(index); SymDirectory* d = new SymDirectory(); if (si->pysec_type_ == PYSECOBJ) { - nrn_symdir_load_pysec(d->impl_->symbol_list_, si->pysec_); + nrn_symdir_load_pysec(d->impl_->symbol_lists_, si->pysec_); } else { d->impl_->sec_ = (Section*) si->pysec_; section_ref(d->impl_->sec_); d->impl_->load_section(); } - d->impl_->path_ = concat(path().string(), si->name().string()); - d->impl_->path_ = concat(d->impl_->path_.string(), "."); + d->impl_->path_ = concat(path().c_str(), si->name().c_str()); + d->impl_->path_ = concat(d->impl_->path_.c_str(), "."); d->impl_->sort(); return d; } @@ -201,11 +182,11 @@ SymDirectory::SymDirectory(int type) { } SymDirectory::~SymDirectory() { - long cnt = count(); - for (long i = 0; i < cnt; ++i) { - delete impl_->symbol_list_.item(i); + for (auto& item: impl_->symbol_lists_) { + delete item; } - impl_->symbol_list_.remove_all(); + impl_->symbol_lists_.clear(); + impl_->symbol_lists_.shrink_to_fit(); if (impl_->obj_) { ObjObservable::Detach(impl_->obj_, impl_); } @@ -218,11 +199,11 @@ SymDirectory::~SymDirectory() { delete impl_; } void SymDirectoryImpl::disconnect(Observable*) { - long cnt = symbol_list_.count(); - for (long i = 0; i < cnt; ++i) { - delete symbol_list_.item(i); + for (auto& item: symbol_lists_) { + delete item; } - symbol_list_.remove_all(); + symbol_lists_.clear(); + symbol_lists_.shrink_to_fit(); obj_ = NULL; } @@ -268,15 +249,14 @@ double* SymDirectory::variable(int index) { } case RANGEVAR: if (ob && ob->ctemplate->is_point_) { - return point_process_pointer((Point_process*) ob->u.this_pointer, - sym, - array_index(index)); + return static_cast(point_process_pointer( + (Point_process*) ob->u.this_pointer, sym, array_index(index))); } break; } else { char buf[256], *cp; - Sprintf(buf, "%s%s", path().string(), name(index).string()); + Sprintf(buf, "%s%s", path().c_str(), name(index).c_str()); if (whole_vector(index)) { // rangevar case for [all] // replace [all] with [0] cp = strstr(buf, "[all]"); @@ -293,51 +273,50 @@ double* SymDirectory::variable(int index) { } int SymDirectory::whole_vector(int index) { - return impl_->symbol_list_.item(index)->whole_vector(); + return impl_->symbol_lists_.at(index)->whole_vector(); } -const String& SymDirectory::path() const { +const std::string& SymDirectory::path() const { return impl_->path_; } int SymDirectory::count() const { - return impl_->symbol_list_.count(); + return impl_->symbol_lists_.size(); } -const String& SymDirectory::name(int index) const { - return impl_->symbol_list_.item(index)->name(); +const std::string& SymDirectory::name(int index) const { + return impl_->symbol_lists_.at(index)->name(); } int SymDirectory::array_index(int i) const { - return impl_->symbol_list_.item(i)->array_index(); + return impl_->symbol_lists_.at(i)->array_index(); } -int SymDirectory::index(const String& name) const { - long cnt = count(); - for (long i = 0; i < cnt; ++i) { - if (name == impl_->symbol_list_.item(i)->name()) { +int SymDirectory::index(const std::string& name) const { + for (const auto&& [i, symbol]: enumerate(impl_->symbol_lists_)) { + if (name == symbol->name()) { return i; } } return -1; } -void SymDirectory::whole_name(int index, CopyString& s) const { - const String& s1 = impl_->path_; - const String& s2 = name(index); - s = concat(s1.string(), s2.string()); +void SymDirectory::whole_name(int index, std::string& s) const { + auto s1 = impl_->path_; + auto s2 = name(index); + s = s1 + s2; } bool SymDirectory::is_directory(int index) const { - return impl_->symbol_list_.item(index)->is_directory(); + return impl_->symbol_lists_.at(index)->is_directory(); } -bool SymDirectory::match(const String&, const String&) { +bool SymDirectory::match(const std::string&, const std::string&) { return true; } Symbol* SymDirectory::symbol(int index) const { - return impl_->symbol_list_.item(index)->symbol(); + return impl_->symbol_lists_.at(index)->symbol(); } Object* SymDirectory::object() const { return impl_->obj_; } Object* SymDirectory::obj(int index) { - return impl_->symbol_list_.item(index)->object(); + return impl_->symbol_lists_.at(index)->object(); } // SymbolItem @@ -437,7 +416,7 @@ void SymDirectoryImpl::load(int type) { break; case PYSEC: path_ = "_pysec."; - nrn_symdir_load_pysec(symbol_list_, NULL); + nrn_symdir_load_pysec(symbol_lists_, NULL); break; default: load(type, hoc_symlist); @@ -454,8 +433,7 @@ void SymDirectoryImpl::load(int type) { } void SymDirectoryImpl::load(int type, Symlist* sl) { - Symbol* sym; - for (sym = sl->first; sym; sym = sym->next) { + for (Symbol* sym = sl->first; sym; sym = sym->next) { if (type == -1) { switch (sym->type) { case SECTION: @@ -493,9 +471,8 @@ void SymDirectoryImpl::load_aliases() { IvocAliases* a = (IvocAliases*) obj_->aliases; if (!a) return; - for (const auto& kv: a->symtab_) { - Symbol* s = kv.second; - append(s, NULL, obj_); + for (const auto& [_, s]: a->symtab_) { + append(s, nullptr, obj_); } } @@ -516,7 +493,7 @@ void SymDirectoryImpl::load_section() { double x = nrn_arc_position(sec, sec->pnode[0]); Sprintf(xarg, "( %g )", x); Sprintf(buf, "v%s", xarg); - symbol_list_.append(new SymbolItem(buf)); + symbol_lists_.push_back(new SymbolItem(buf)); nrn_pushsec(sec); Node* nd = sec->pnode[i]; for (Prop* p = nd->prop; p; p = p->next) { @@ -537,15 +514,15 @@ void SymDirectoryImpl::load_mechanism(Prop* p, int type, const char* xarg) { int n = hoc_total_array_data(sym, 0); if (n > 5) { Sprintf(buf, "%s[all]%s", sym->name, xarg); - symbol_list_.append(new SymbolItem(buf, n)); + symbol_lists_.push_back(new SymbolItem(buf, n)); } Sprintf(buf, "%s[%d]%s", sym->name, 0, xarg); - symbol_list_.append(new SymbolItem(buf)); + symbol_lists_.push_back(new SymbolItem(buf)); Sprintf(buf, "%s[%d]%s", sym->name, n - 1, xarg); - symbol_list_.append(new SymbolItem(buf)); + symbol_lists_.push_back(new SymbolItem(buf)); } else { Sprintf(buf, "%s%s", sym->name, xarg); - symbol_list_.append(new SymbolItem(buf)); + symbol_lists_.push_back(new SymbolItem(buf)); } } } @@ -563,30 +540,29 @@ void SymDirectoryImpl::append(Symbol* sym, Objectdata* od, Object* o) { } } if (n > 5 && sym->type == VAR) { - symbol_list_.append(new SymbolItem(sym, od, 0, n)); + symbol_lists_.push_back(new SymbolItem(sym, od, 0, n)); } for (i = 0; i < n; ++i) { - symbol_list_.append(new SymbolItem(sym, od, i)); + symbol_lists_.push_back(new SymbolItem(sym, od, i)); if (i > 5) { break; } } if (i < n - 1) { - symbol_list_.append(new SymbolItem(sym, od, n - 1)); + symbol_lists_.push_back(new SymbolItem(sym, od, n - 1)); } } else { - symbol_list_.append(new SymbolItem(sym, od, 0)); + symbol_lists_.push_back(new SymbolItem(sym, od, 0)); } } void SymDirectoryImpl::append(Object* ob) { - symbol_list_.append(new SymbolItem(ob)); + symbol_lists_.push_back(new SymbolItem(ob)); } void SymDirectoryImpl::un_append(Object* ob) { - long i, cnt = symbol_list_.count(); - for (i = 0; i < cnt; ++i) { - if (symbol_list_.item(i)->object() == ob) { - symbol_list_.item(i)->no_object(); + for (auto& symbol: symbol_lists_) { + if (symbol->object() == ob) { + symbol->no_object(); break; } } diff --git a/src/ivoc/symdir.h b/src/ivoc/symdir.h index 20b269b4fc..02b1c1e44a 100644 --- a/src/ivoc/symdir.h +++ b/src/ivoc/symdir.h @@ -3,7 +3,6 @@ #include #include -#include struct Object; class SymDirectoryImpl; @@ -20,14 +19,14 @@ class IvocAliases { Symbol* symbol(int); Object* ob_; // not referenced - std::map symtab_; + std::map symtab_; }; /* List of Symbols considered as a directory */ class SymDirectory: public Resource { public: - SymDirectory(const String& parent_path, + SymDirectory(const std::string& parent_path, Object* parent_object, Symbol*, int array_index = 0, @@ -37,16 +36,16 @@ class SymDirectory: public Resource { SymDirectory(); virtual ~SymDirectory(); - virtual const String& path() const; + virtual const std::string& path() const; virtual int count() const; - virtual const String& name(int index) const; - virtual int index(const String&) const; - virtual void whole_name(int index, CopyString&) const; + virtual const std::string& name(int index) const; + virtual int index(const std::string&) const; + virtual void whole_name(int index, std::string&) const; virtual bool is_directory(int index) const; virtual double* variable(int index); virtual int whole_vector(int index); - static bool match(const String& name, const String& pattern); + static bool match(const std::string& name, const std::string& pattern); Symbol* symbol(int index) const; int array_index(int index) const; Object* object() const; // the parent_object diff --git a/src/ivoc/utility.cpp b/src/ivoc/utility.cpp index 82008dd816..8a1a4a9e5b 100644 --- a/src/ivoc/utility.cpp +++ b/src/ivoc/utility.cpp @@ -23,10 +23,6 @@ #include "oc2iv.h" #include "ivoc.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); -extern Object** (*nrnpy_gui_helper3_)(const char* name, Object* obj, int handle_strptr); - bool nrn_spec_dialog_pos(Coord& x, Coord& y) { Style* s = Session::instance()->style(); if (s->value_is_on("dialog_spec_position")) { @@ -286,13 +282,10 @@ void FieldDialog::cancel(FieldEditor*) { void hoc_boolean_dialog() { bool b = false; - if (nrnpy_gui_helper_) { - Object** const result = nrnpy_gui_helper_("boolean_dialog", NULL); - if (result) { - hoc_ret(); - hoc_pushx(nrnpy_object_to_double_(*result)); - return; - } + if (auto* const result = neuron::python::methods.try_gui_helper("boolean_dialog", nullptr)) { + hoc_ret(); + hoc_pushx(neuron::python::methods.object_to_double(*result)); + return; } IFGUI if (ifarg(3)) { diff --git a/src/ivoc/xmenu.cpp b/src/ivoc/xmenu.cpp index 360343eb0c..82d50997f9 100644 --- a/src/ivoc/xmenu.cpp +++ b/src/ivoc/xmenu.cpp @@ -1,18 +1,9 @@ - #include <../../nrnconf.h> #include "oc2iv.h" #include "classreg.h" -double (*nrnpy_guigetval)(Object*); -void (*nrnpy_guisetval)(Object*, double); -int (*nrnpy_guigetstr)(Object*, char**); - #include "gui-redirect.h" - -Object** (*nrnpy_gui_helper_)(const char* name, Object* obj) = NULL; -double (*nrnpy_object_to_double_)(Object*) = NULL; -Object** (*nrnpy_gui_helper3_)(const char* name, Object* obj, int handle_strptr) = NULL; -char** (*nrnpy_gui_helper3_str_)(const char* name, Object* obj, int handle_strptr) = NULL; +#include "utils/enumerate.h" #if HAVE_IV // to end of file except for a few small fragments. @@ -46,12 +37,11 @@ char** (*nrnpy_gui_helper3_str_)(const char* name, Object* obj, int handle_strpt #include "datapath.h" #include "ivoc.h" #include "bndedval.h" +#include "nrnpy.h" #include "objcmd.h" #include "parse.hpp" #include "utility.h" #include "scenepic.h" -#include "treeset.h" - // The problem this overcomes is that the pick of an input handler normally // succeeds for a keystroke only if the mouse is over one of the child @@ -270,7 +260,8 @@ void hoc_xstatebutton() { s1 = gargstr(1); if (hoc_is_object_arg(2)) { - hoc_ivstatebutton(NULL, + neuron::container::data_handle ptr1{}; + hoc_ivstatebutton(ptr1, s1, NULL, HocStateButton::PALETTE, @@ -280,7 +271,7 @@ void hoc_xstatebutton() { if (ifarg(3)) { s2 = gargstr(3); } - hoc_ivstatebutton(hoc_pgetarg(2), s1, s2, HocStateButton::PALETTE); + hoc_ivstatebutton(hoc_hgetarg(2), s1, s2, HocStateButton::PALETTE); } ENDGUI hoc_ret(); @@ -303,7 +294,8 @@ void hoc_xcheckbox() { s1 = gargstr(1); if (hoc_is_object_arg(2)) { - hoc_ivstatebutton(NULL, + neuron::container::data_handle ptr1{}; + hoc_ivstatebutton(ptr1, s1, NULL, HocStateButton::CHECKBOX, @@ -313,7 +305,7 @@ void hoc_xcheckbox() { if (ifarg(3)) { s2 = gargstr(3); } - hoc_ivstatebutton(hoc_pgetarg(2), s1, s2, HocStateButton::CHECKBOX); + hoc_ivstatebutton(hoc_hgetarg(2), s1, s2, HocStateButton::CHECKBOX); } ENDGUI hoc_ret(); @@ -354,7 +346,8 @@ static void hoc_xvalue_helper() { IFGUI // prompt, variable, deflt,action,canrun,usepointer char *s1, *s2, *s3; - double* ptr2 = NULL; /*allow variable arg2 to be double* */ + // allow variable arg2 to be data_handle + neuron::container::data_handle ptr2{}; Object* pyvar = NULL; Object* pyact = NULL; s2 = s3 = NULL; @@ -363,7 +356,7 @@ static void hoc_xvalue_helper() { if (hoc_is_object_arg(2)) { pyvar = *hoc_objgetarg(2); } else if (hoc_is_pdouble_arg(2)) { - ptr2 = hoc_pgetarg(2); + ptr2 = hoc_hgetarg(2); } else { s2 = gargstr(2); } @@ -432,15 +425,15 @@ static void hoc_xpvalue_helper() { IFGUI // prompt,variable,deflt,action,canrun char *s1, *s3; - double* pd; + neuron::container::data_handle pd{}; HocSymExtension* extra = NULL; Symbol* sym; s1 = gargstr(1); if (ifarg(2)) { - pd = hoc_pgetarg(2); + pd = hoc_hgetarg(2); sym = hoc_get_last_pointer_symbol(); } else { - pd = hoc_val_pointer(s1); + pd = hoc_val_handle(s1); sym = hoc_get_symbol(s1); } if (sym) { @@ -511,7 +504,7 @@ void hoc_xslider() { int nsteps = 10; char* send = NULL; Object* pysend = NULL; - double* pval = NULL; + neuron::container::data_handle pval{}; Object* pyvar = NULL; bool vert = 0; if (ifarg(3)) { @@ -539,7 +532,7 @@ void hoc_xslider() { if (hoc_is_object_arg(1)) { pyvar = *hoc_objgetarg(1); } else { - pval = hoc_pgetarg(1); + pval = hoc_hgetarg(1); } hoc_ivslider(pval, low, high, resolution, nsteps, send, vert, slow, pyvar, pysend); ENDGUI @@ -577,8 +570,7 @@ void HocButton::print(Printer* pr, const Allocation& a) const { l_->print(pr, a); } -implementPtrList(HocPanelList, HocPanel); -static HocPanelList* hoc_panel_list; +static std::vector* hoc_panel_list; static HocPanel* curHocPanel; static HocValEditor* last_fe_constructed_; static void checkOpenPanel() { @@ -587,40 +579,38 @@ static void checkOpenPanel() { } } -declarePtrList(HocMenuList, HocMenu) -implementPtrList(HocMenuList, HocMenu) /*static*/ class MenuStack { public: bool isEmpty() { - return l_.count() == 0; + return l_.empty(); } void push(HocMenu* m); void pop() { - if (l_.count()) { - l_.item(0)->unref(); - l_.remove(0); + if (!l_.empty()) { + l_.front()->unref(); + l_.erase(l_.begin()); } } Menu* top() { - return (l_.count()) ? l_.item(0)->menu() : NULL; + return (l_.empty()) ? nullptr : l_.front()->menu(); } HocItem* hoc_item() { - return (l_.count()) ? l_.item(0) : NULL; + return (l_.empty()) ? nullptr : l_.front(); } void clean(); private: - HocMenuList l_; + std::vector l_; }; void MenuStack::push(HocMenu* m) { m->ref(); - l_.prepend(m); + l_.insert(l_.begin(), m); } void MenuStack::clean() { - for (long i = 0; i < l_.count(); i++) { - l_.item(i)->unref(); + for (auto& item: l_) { + item->unref(); } - l_.remove_all(); + l_.clear(); } static MenuStack* menuStack; static Menu* hocmenubar; @@ -776,7 +766,7 @@ void hoc_ivbutton(const char* name, const char* action, Object* pyact) { } } -void hoc_ivstatebutton(double* pd, +void hoc_ivstatebutton(neuron::container::data_handle pd, const char* name, const char* action, int style, @@ -837,7 +827,7 @@ void hoc_ivvalue_keep_updated(const char* name, const char* variable, Object* py variable, NULL, false, - hoc_val_pointer(variable), + hoc_val_handle(variable), false, true, (s ? s->extra : NULL), @@ -852,7 +842,10 @@ void hoc_ivfixedvalue(const char* name, const char* variable, bool deflt, bool u hoc_ivvaluerun(name, variable, NULL, deflt, false, usepointer); } -void hoc_ivpvalue(const char* name, double* pd, bool deflt, HocSymExtension* extra) { +void hoc_ivpvalue(const char* name, + neuron::container::data_handle pd, + bool deflt, + HocSymExtension* extra) { hoc_ivpvaluerun(name, pd, 0, deflt, false, extra); } @@ -864,12 +857,12 @@ void hoc_ivvaluerun(const char* name, bool usepointer, Object* pyvar, Object* pyact) { - hoc_ivvaluerun_ex(name, variable, NULL, pyvar, action, pyact, deflt, canRun, usepointer); + hoc_ivvaluerun_ex(name, variable, {}, pyvar, action, pyact, deflt, canRun, usepointer); } void hoc_ivvaluerun_ex(CChar* name, CChar* variable, - double* pvar, + neuron::container::data_handle pvar, Object* pyvar, CChar* action, Object* pyact, @@ -883,7 +876,7 @@ void hoc_ivvaluerun_ex(CChar* name, if (!pvar && !pyvar) { s = hoc_get_symbol(variable); if (usepointer) { - pvar = hoc_val_pointer(variable); + pvar = hoc_val_handle(variable); } } HocSymExtension* xtra = extra; @@ -894,7 +887,7 @@ void hoc_ivvaluerun_ex(CChar* name, } void hoc_ivpvaluerun(const char* name, - double* pd, + neuron::container::data_handle pd, const char* action, bool deflt, bool canRun, @@ -917,7 +910,7 @@ void hoc_ivvarlabel(char** s, Object* pyvar) { } // ZFM added vert -void hoc_ivslider(double* pd, +void hoc_ivslider(neuron::container::data_handle pd, float low, float high, float resolution, @@ -957,55 +950,19 @@ void HocPanel::save_all(std::ostream&) { long i, cnt; HocDataPaths* data_paths = new HocDataPaths(); - cnt = hoc_panel_list->count(); - if (hoc_panel_list) - for (i = 0; i < cnt; ++i) { - hoc_panel_list->item(i)->data_path(data_paths, true); - } - data_paths->search(); - if (hoc_panel_list) - for (i = 0; i < cnt; ++i) { - hoc_panel_list->item(i)->data_path(data_paths, false); - } - delete data_paths; -} - -void HocPanel::update_ptrs() { - if (!hoc_panel_list) - return; - int i, j; - for (i = 0; i < hoc_panel_list->count(); ++i) { - HocUpdateItemList& ul = hoc_panel_list->item(i)->elist_; - for (j = 0; j < ul.count(); ++j) { - ul.item(j)->update_ptrs(); + if (hoc_panel_list) { + for (auto& item: *hoc_panel_list) { + item->data_path(data_paths, true); } } -} - -#if MAC -void HocPanel::mac_menubar() { - int i = 1; - int mindex = 0; - printf("menubar 0 %s\n", getName()); - mac_menubar(mindex, i, 0); -} - -void HocPanel::mac_menubar(int& mindex, int& i, int m) { - int mr; - int mi = 0; - while (i < ilist_.count()) { - mr = ilist_.item(i)->mac_menubar(mindex, m, mi); - ++i; - ++mi; - if (mr > m) { - mac_menubar(mindex, i, mr); - } else if (mr < m) { - return; + data_paths->search(); + if (hoc_panel_list) { + for (auto& item: *hoc_panel_list) { + item->data_path(data_paths, false); } } - return; + delete data_paths; } -#endif void HocPanel::map_window(int scroll) { // switch to scrollbox if too many items @@ -1033,15 +990,12 @@ void HocPanel::map_window(int scroll) { } // HocPanel - -implementPtrList(HocUpdateItemList, HocUpdateItem); -implementPtrList(HocItemList, HocItem); - static void var_freed(void* pd, int size) { - if (hoc_panel_list) - for (long i = hoc_panel_list->count() - 1; i >= 0; --i) { - hoc_panel_list->item(i)->check_valid_pointers(pd, size); + if (hoc_panel_list) { + for (auto&& elem: reverse(*hoc_panel_list)) { + elem->check_valid_pointers(pd, size); } + } } HocPanel::HocPanel(const char* name, bool h) @@ -1061,11 +1015,11 @@ HocPanel::HocPanel(const char* name, bool h) wk.background()), wk.style())); if (!hoc_panel_list) { - hoc_panel_list = new HocPanelList; + hoc_panel_list = new std::vector; Oc oc; oc.notify_freed(var_freed); } - hoc_panel_list->append(this); + hoc_panel_list->push_back(this); item_append(new HocItem(name)); left_ = -1000.; bottom_ = -1000.; @@ -1073,22 +1027,18 @@ HocPanel::HocPanel(const char* name, bool h) } HocPanel::~HocPanel() { - long i; box_->unref(); - for (i = 0; i < ilist_.count(); i++) { - ilist_.item(i)->HocItem::unref(); - } - for (i = 0; i < elist_.count(); i++) { - elist_.item(i)->HocItem::unref(); + for (auto& item: ilist_) { + item->HocItem::unref(); } - for (i = 0; i < hoc_panel_list->count(); ++i) { - if (hoc_panel_list->item(i) == this) { - hoc_panel_list->remove(i); - break; - } + for (auto& item: elist_) { + item->HocItem::unref(); } - ilist_.remove_all(); - elist_.remove_all(); + erase_first(*hoc_panel_list, this); + ilist_.clear(); + ilist_.shrink_to_fit(); + elist_.clear(); + elist_.shrink_to_fit(); // printf("~HocPanel\n"); } @@ -1103,31 +1053,24 @@ void HocUpdateItem::check_pointer(void*, int) {} void HocUpdateItem::data_path(HocDataPaths*, bool) {} // ones that get updated on every doEvents() -HocUpdateItemList* HocPanel::update_list_; +std::vector* HocPanel::update_list_; void HocPanel::keep_updated() { static int cnt = 0; if (update_list_ && (++cnt % 10 == 0)) { - long i, cnt = update_list_->count(); - if (cnt) - for (i = 0; i < cnt; ++i) { - update_list_->item(i)->update_hoc_item(); - } + for (auto& item: *update_list_) { + item->update_hoc_item(); + } } } void HocPanel::keep_updated(HocUpdateItem* hui, bool add) { if (!update_list_) { - update_list_ = new HocUpdateItemList(); + update_list_ = new std::vector(); } if (add) { - update_list_->append(hui); + update_list_->push_back(hui); } else { - for (long i = 0; i < update_list_->count(); ++i) { - if (update_list_->item(i) == hui) { - update_list_->remove(i); - break; - } - } + erase_first(*update_list_, hui); } } @@ -1161,11 +1104,11 @@ PolyGlyph* HocPanel::box() { } const char* HocPanel::getName() { - return ilist_.item(0)->getStr(); + return ilist_.front()->getStr(); } HocItem* HocPanel::hoc_item() { - return ilist_.item(0); + return ilist_.front(); } void HocPanel::pushButton(const char* name, const char* action, bool activate, Object* pyact) { @@ -1201,13 +1144,6 @@ void HocPushButton::write(std::ostream& o) { o << buf << std::endl; } -#if MAC -int HocPushButton::mac_menubar(int&, int m, int mi) { - printf("button item %d in menu %d \"%s\", \"%s\"\n", mi, m, getStr(), hideQuote(a_->name())); - return m; -} -#endif - HocRadioButton::HocRadioButton(const char* name, HocRadioAction* a, HocItem* hi) : HocItem(name, hi) { a_ = a; @@ -1224,13 +1160,6 @@ void HocRadioButton::write(std::ostream& o) { o << buf << std::endl; } -#if MAC -int HocRadioButton::mac_menubar(int&, int m, int mi) { - printf("radio item %d in menu %d \"%s\", \"%s\"\n", mi, m, getStr(), hideQuote(a_->name())); - return m; -} -#endif - void HocPanel::label(const char* name) { box()->append(LayoutKit::instance()->margin(WidgetKit::instance()->label(name), 3)); item_append(new HocLabel(name)); @@ -1239,12 +1168,12 @@ void HocPanel::label(const char* name) { void HocPanel::var_label(char** name, Object* pyvar) { HocVarLabel* l = new HocVarLabel(name, box(), pyvar); item_append(l); - elist_.append(l); + elist_.push_back(l); l->ref(); } // ZFM added vert -void HocPanel::slider(double* pd, +void HocPanel::slider(neuron::container::data_handle pd, float low, float high, float resolution, @@ -1269,7 +1198,7 @@ void HocPanel::slider(double* pd, wk->end_style(); } item_append(s); - elist_.append(s); + elist_.push_back(s); s->ref(); } @@ -1341,14 +1270,6 @@ void HocMenu::write(std::ostream& o) { o << buf << std::endl; } -#if MAC -int HocMenu::mac_menubar(int& mindex, int m, int mi) { - ++mindex; - printf("menu %d is item %d in %d %s\n", mindex, mi, m, getStr()); - return mindex; -} -#endif - static Coord xvalue_field_size; void HocPanel::valueEd(const char* prompt, @@ -1357,14 +1278,14 @@ void HocPanel::valueEd(const char* prompt, bool canrun, bool deflt, bool keep_updated) { - valueEd(prompt, NULL, NULL, canrun, NULL, deflt, keep_updated, NULL, pyvar, pyact); + valueEd(prompt, NULL, NULL, canrun, {}, deflt, keep_updated, NULL, pyvar, pyact); } void HocPanel::valueEd(const char* name, const char* variable, const char* action, bool canrun, - double* pd, + neuron::container::data_handle pd, bool deflt, bool keep_updated, HocSymExtension* extra, @@ -1408,7 +1329,7 @@ void HocPanel::valueEd(const char* name, fe = new HocValEditor(name, variable, vel, act, pd, canrun, hoc_item(), pyvar); } ih_->append_input_handler(fe->field_editor()); - elist_.append(fe); + elist_.push_back(fe); fe->ref(); act->setFieldSEditor(fe); // so button can change the editor LayoutKit* lk = LayoutKit::instance(); @@ -1445,12 +1366,13 @@ void HocPanel::save(std::ostream& o) { void HocPanel::write(std::ostream& o) { Oc oc; char buf[200]; - long i; // o << "xpanel(\"" << getName() << "\")" << std::endl; Sprintf(buf, "xpanel(\"%s\", %d)", getName(), horizontal_); o << buf << std::endl; - for (i = 1; i < ilist_.count(); i++) { - ilist_.item(i)->write(o); + if (ilist_.size() > 1) { + for (std::size_t i = 1; i < ilist_.size(); ++i) { + ilist_[i]->write(o); + } } if (has_window()) { Sprintf(buf, "xpanel(%g,%g)", window()->save_left(), window()->save_bottom()); @@ -1462,7 +1384,7 @@ void HocPanel::write(std::ostream& o) { void HocPanel::item_append(HocItem* hi) { hi->ref(); - ilist_.append(hi); + ilist_.push_back(hi); } // HocItem @@ -1477,18 +1399,6 @@ void HocItem::write(std::ostream& o) { o << str_.string() << std::endl; } -#if MAC -int HocItem::mac_menubar(int&, int m, int mi) { - if (strcmp(getStr(), "xmenu()") == 0) { - printf("end menu %d\n", m); - return -1; - } else { - printf("invalid menuitem %s\n", getStr()); - } - return m; -} -#endif - const char* HocItem::getStr() { return str_.string(); } @@ -1542,11 +1452,10 @@ HocVarLabel::HocVarLabel(char** cpp, PolyGlyph* pg, Object* pyvar) cp_ = NULL; if (pyvar_) { hoc_obj_ref(pyvar_); - (*nrnpy_guigetstr)(pyvar_, &cp_); + neuron::python::methods.guigetstr(pyvar_, &cp_); } else { cp_ = *cpp_; } - variable_ = NULL; p_ = new Patch(LayoutKit::instance()->margin(WidgetKit::instance()->label(cp_), 3)); p_->ref(); pg->append(p_); @@ -1554,9 +1463,6 @@ HocVarLabel::HocVarLabel(char** cpp, PolyGlyph* pg, Object* pyvar) HocVarLabel::~HocVarLabel() { p_->unref(); - if (variable_) { - delete variable_; - } if (pyvar_) { hoc_obj_unref(pyvar_); if (cp_) { @@ -1566,9 +1472,9 @@ HocVarLabel::~HocVarLabel() { } void HocVarLabel::write(std::ostream& o) { - if (variable_ && cpp_) { + if (!variable_.empty() && cpp_) { char buf[256]; - Sprintf(buf, "xvarlabel(%s)", variable_->string()); + Sprintf(buf, "xvarlabel(%s)", variable_.c_str()); o << buf << std::endl; } else { o << "xlabel(\"\")" << std::endl; @@ -1577,7 +1483,7 @@ void HocVarLabel::write(std::ostream& o) { void HocVarLabel::update_hoc_item() { if (pyvar_) { - if ((*nrnpy_guigetstr)(pyvar_, &cp_)) { + if (neuron::python::methods.guigetstr(pyvar_, &cp_)) { p_->body(LayoutKit::instance()->margin(WidgetKit::instance()->label(cp_), 3)); p_->redraw(); p_->reallocate(); @@ -1779,7 +1685,7 @@ HocDefaultValEditor::HocDefaultValEditor(const char* name, const char* variable, ValEdLabel* prompt, HocValAction* a, - double* pd, + neuron::container::data_handle pd, bool canrun, HocItem* hi, Object* pyvar) @@ -1866,7 +1772,7 @@ HocValEditorKeepUpdated::HocValEditorKeepUpdated(const char* name, const char* variable, ValEdLabel* prompt, HocValAction* act, - double* pd, + neuron::container::data_handle pd, HocItem* hi, Object* pyvar) : HocValEditor(name, variable, prompt, act, pd, false, hi, pyvar) { @@ -1951,11 +1857,12 @@ HocValEditor::HocValEditor(const char* name, const char* variable, ValEdLabel* prompt, HocValAction* a, - double* pd, + neuron::container::data_handle pd, bool canrun, HocItem* hi, Object* pyvar) - : HocUpdateItem(name, hi) { + : HocUpdateItem(name, hi) + , pval_{pd} { if (!xvalue_format) { set_format(); } @@ -1968,16 +1875,11 @@ HocValEditor::HocValEditor(const char* name, canrun_ = canrun; active_ = false; domain_limits_ = NULL; - variable_ = NULL; pyvar_ = pyvar; - pval_ = NULL; - if (pd) { - pval_ = pd; - } if (pyvar) { hoc_obj_ref(pyvar); } else if (variable) { - variable_ = new CopyString(variable); + variable_ = variable; Symbol* sym = hoc_get_symbol(variable); if (sym && sym->extra) { domain_limits_ = sym->extra->parmlimits; @@ -1989,9 +1891,6 @@ HocValEditor::HocValEditor(const char* name, HocValEditor::~HocValEditor() { // printf("~HocValEditor\n"); - if (variable_) { - delete variable_; - } if (pyvar_) { hoc_obj_unref(pyvar_); } @@ -2028,15 +1927,15 @@ void HocValEditor::print(Printer* p, const Allocation& a) const { void HocValEditor::set_val(double x) { char buf[200]; if (pyvar_) { - (*nrnpy_guisetval)(pyvar_, x); + neuron::python::methods.guisetval(pyvar_, x); return; } hoc_ac_ = x; Oc oc; if (pval_) { *pval_ = hoc_ac_; - } else if (variable_) { - Sprintf(buf, "%s = hoc_ac_\n", variable_->string()); + } else if (!variable_.empty()) { + Sprintf(buf, "%s = hoc_ac_\n", variable_.c_str()); oc.run(buf); } } @@ -2044,12 +1943,12 @@ void HocValEditor::set_val(double x) { double HocValEditor::get_val() { char buf[200]; if (pyvar_) { - return (*nrnpy_guigetval)(pyvar_); + return neuron::python::methods.guigetval(pyvar_); } else if (pval_) { return *pval_; - } else if (variable_) { + } else if (!variable_.empty()) { Oc oc; - Sprintf(buf, "hoc_ac_ = %s\n", variable_->string()); + Sprintf(buf, "hoc_ac_ = %s\n", variable_.c_str()); oc.run(buf); return hoc_ac_; } else { @@ -2073,15 +1972,16 @@ void HocValEditor::evalField() { } void HocValEditor::audit() { - char buf[200]; + auto sout = std::stringstream{}; if (pyvar_) { return; - } else if (variable_) { - Sprintf(buf, "%s = %s\n", variable_->string(), fe_->text()->string()); + } else if (!variable_.empty()) { + sout << variable_ << " = " << fe_->text()->string(); } else if (pval_) { - Sprintf(buf, "// %p pointer set to %s\n", pval_, fe_->text()->string()); + sout << "// " << pval_ << " set to " << fe_->text()->string(); } - hoc_audit_command(buf); + auto buf = sout.str(); + hoc_audit_command(buf.c_str()); } void HocValEditor::updateField() { @@ -2094,9 +1994,9 @@ void HocValEditor::updateField() { } else if (pval_) { Sprintf(buf, xvalue_format->string(), *pval_); hoc_ac_ = *pval_; - } else if (variable_) { + } else if (!variable_.empty()) { Oc oc; - Sprintf(buf, "hoc_ac_ = %s\n", variable_->string()); + Sprintf(buf, "hoc_ac_ = %s\n", variable_.c_str()); if (oc.run(buf, 0)) { strcpy(buf, "Doesn't exist"); } else { @@ -2113,10 +2013,10 @@ void HocValEditor::updateField() { void HocValEditor::write(std::ostream& o) { char buf[200]; Oc oc; - if (variable_) { - Sprintf(buf, "hoc_ac_ = %s\n", variable_->string()); + if (!variable_.empty()) { + Sprintf(buf, "hoc_ac_ = %s\n", variable_.c_str()); oc.run(buf); - Sprintf(buf, "%s = %g", variable_->string(), hoc_ac_); + Sprintf(buf, "%s = %g", variable_.c_str(), hoc_ac_); } else if (pval_) { Sprintf(buf, "/* don't know the hoc path to %g", *pval_); return; @@ -2136,7 +2036,7 @@ void HocValEditor::write(std::ostream& o) { 200, "xvalue(\"%s\",\"%s\", %d,\"%s\", %d, %d )", getStr(), - variable_->string(), + variable_.c_str(), hoc_default_val_editor(), hideQuote(action_->name()), (int) canrun_, @@ -2145,8 +2045,8 @@ void HocValEditor::write(std::ostream& o) { } const char* HocValEditor::variable() const { - if (variable_) { - return variable_->string(); + if (!variable_.empty()) { + return variable_.c_str(); } else { return NULL; } @@ -2364,36 +2264,39 @@ void Oc::notifyHocValue() { // printf("notifyHocValue %d\n", ++j); ParseTopLevel ptl; ptl.save(); - if (hoc_panel_list) - for (long i = hoc_panel_list->count() - 1; i >= 0; --i) { - hoc_panel_list->item(i)->notifyHocValue(); + if (hoc_panel_list) { + for (auto&& e: reverse(*hoc_panel_list)) { + e->notifyHocValue(); } + } ptl.restore(); } void HocPanel::notifyHocValue() { - for (long i = elist_.count() - 1; i >= 0; --i) { - elist_.item(i)->update_hoc_item(); + for (auto&& e: reverse(elist_)) { + e->update_hoc_item(); } } void HocPanel::check_valid_pointers(void* v, int size) { - for (long i = elist_.count() - 1; i >= 0; --i) { - elist_.item(i)->check_pointer(v, size); + for (auto&& e: reverse(elist_)) { + e->check_pointer(v, size); } } void HocValEditor::check_pointer(void* v, int size) { - if (pval_) { - double* pd = (double*) v; + auto* const pval_raw = static_cast(pval_); + if (pval_raw) { + auto* const pd = static_cast(v); if (size == 1) { - if (pd != pval_) + if (pd != pval_raw) { return; + } } else { - if (pval_ < pd || pval_ >= pd + size) + if (pval_raw < pd || pval_raw >= pd + size) return; } - pval_ = 0; + pval_ = {}; } } void HocVarLabel::check_pointer(void* v, int) { @@ -2404,33 +2307,28 @@ void HocVarLabel::check_pointer(void* v, int) { } void HocPanel::data_path(HocDataPaths* hdp, bool append) { - for (long i = elist_.count() - 1; i >= 0; --i) { - elist_.item(i)->data_path(hdp, append); + for (auto&& e: reverse(elist_)) { + e->data_path(hdp, append); } } void HocValEditor::data_path(HocDataPaths* hdp, bool append) { - if (!variable_) { + if (variable_.empty()) { + auto* const pval_raw = static_cast(pval_); if (append) { - hdp->append(pval_); + hdp->append(pval_raw); } else { - String* s = hdp->retrieve(pval_); - if (s) { - variable_ = new CopyString(s->string()); - } + variable_ = hdp->retrieve(pval_raw); } } } void HocVarLabel::data_path(HocDataPaths* hdp, bool append) { - if (cpp_ && !variable_) { + if (cpp_ && variable_.empty()) { if (append) { hdp->append(cpp_); } else { - String* s = hdp->retrieve(cpp_); - if (s) { - variable_ = new CopyString(s->string()); - } + variable_ = hdp->retrieve(cpp_); } } } @@ -2714,7 +2612,7 @@ void HocValStepper::right() {} // OcSlider // ZFM added vert_ -OcSlider::OcSlider(double* pd, +OcSlider::OcSlider(neuron::container::data_handle pd, float low, float high, float resolution, @@ -2726,7 +2624,6 @@ OcSlider::OcSlider(double* pd, Object* pysend) : HocUpdateItem("") { resolution_ = resolution; - variable_ = NULL; pval_ = pd; pyvar_ = pyvar; if (pyvar_) { @@ -2751,9 +2648,6 @@ OcSlider::~OcSlider() { delete send_; } delete bv_; - if (variable_) { - delete variable_; - } if (pyvar_) { hoc_obj_unref(pyvar_); } @@ -2768,7 +2662,7 @@ void OcSlider::update(Observable*) { if (pval_) { *pval_ = x; } else if (pyvar_) { - (*nrnpy_guisetval)(pyvar_, x); + neuron::python::methods.guisetval(pyvar_, x); } else { return; } @@ -2789,13 +2683,16 @@ void OcSlider::update(Observable*) { } void OcSlider::audit() { + auto sout = std::stringstream{}; char buf[200]; - if (variable_) { - Sprintf(buf, "%s = %g\n", variable_->string(), *pval_); + Sprintf(buf, "%g", *pval_); + if (!variable_.empty()) { + sout << variable_.c_str() << " = " << buf << "\n"; } else if (pval_) { - Sprintf(buf, "// %p pointer set to %g\n", pval_, *pval_); + sout << "// " << pval_ << " set to " << buf << "\n"; } - hoc_audit_command(buf); + auto str = sout.str(); + hoc_audit_command(str.c_str()); if (send_) { send_->audit(); } @@ -2816,10 +2713,11 @@ double OcSlider::slider_val() { void OcSlider::update_hoc_item() { Coord x = 0.; if (pyvar_) { - x = Coord((*nrnpy_guigetval)(pyvar_)); + x = Coord(neuron::python::methods.guigetval(pyvar_)); } else if (pval_) { x = Coord(*pval_); } else { + pval_ = {}; return; } if (x != bv_->cur_lower(Dimension_X)) { @@ -2830,37 +2728,36 @@ void OcSlider::update_hoc_item() { } } void OcSlider::check_pointer(void* v, int size) { - if (pval_) { - double* pd = (double*) v; + auto* const pval_raw = static_cast(pval_); + if (pval_raw) { + auto* const pd = static_cast(v); if (size == 1) { - if (pd != pval_) + if (pd != pval_raw) return; } else { - if (pval_ < pd || pval_ >= pd + size) + if (pval_raw < pd || pval_raw >= pd + size) return; } - pval_ = 0; + pval_ = {}; } } void OcSlider::data_path(HocDataPaths* hdp, bool append) { - if (!variable_ && pval_) { + if (variable_.empty() && pval_) { + auto* const pval_raw = static_cast(pval_); if (append) { - hdp->append(pval_); + hdp->append(pval_raw); } else { - String* s = hdp->retrieve(pval_); - if (s) { - variable_ = new CopyString(s->string()); - } + variable_ = hdp->retrieve(pval_raw); } } } void OcSlider::write(std::ostream& o) { - if (variable_) { + if (!variable_.empty()) { char buf[256]; if (send_) { Sprintf(buf, "xslider(&%s, %g, %g, \"%s\", %d, %d)", - variable_->string(), + variable_.c_str(), bv_->lower(Dimension_X), bv_->upper(Dimension_X), hideQuote(send_->name()), @@ -2869,7 +2766,7 @@ void OcSlider::write(std::ostream& o) { } else { Sprintf(buf, "xslider(&%s, %g, %g, %d, %d)", - variable_->string(), + variable_.c_str(), bv_->lower(Dimension_X), bv_->upper(Dimension_X), vert_, @@ -2882,7 +2779,7 @@ void OcSlider::write(std::ostream& o) { // Button with state -void HocPanel::stateButton(double* pd, +void HocPanel::stateButton(neuron::container::data_handle pd, const char* name, const char* action, int style, @@ -2898,14 +2795,14 @@ void HocPanel::stateButton(double* pd, box()->append(button); HocStateButton* hsb = new HocStateButton(pd, name, button, act, style, hoc_item(), pyvar); item_append(hsb); - elist_.append(hsb); + elist_.push_back(hsb); hsb->ref(); } declareActionCallback(HocStateButton); implementActionCallback(HocStateButton); -HocStateButton::HocStateButton(double* pd, +HocStateButton::HocStateButton(neuron::container::data_handle pd, const char* text, Button* button, HocAction* action, @@ -2919,7 +2816,6 @@ HocStateButton::HocStateButton(double* pd, if (pyvar_) { hoc_obj_ref(pyvar_); } - variable_ = NULL; name_ = new CopyString(text); action_ = action; action->hoc_item(this); @@ -2932,8 +2828,6 @@ HocStateButton::HocStateButton(double* pd, HocStateButton::~HocStateButton() { - if (variable_) - delete variable_; if (pyvar_) { hoc_obj_unref(pyvar_); } @@ -2961,16 +2855,15 @@ void HocStateButton::button_action() { b_->state()->set(TelltaleState::is_chosen, !chosen()); return; } - if (pval_) { + if (pyvar_) { TelltaleState* t = b_->state(); - if (chosen() != bool(*pval_)) { - *pval_ = double(chosen()); + if (chosen() != bool(neuron::python::methods.guigetval(pyvar_))) { + neuron::python::methods.guisetval(pyvar_, double(chosen())); } - } - if (pyvar_) { + } else if (pval_) { TelltaleState* t = b_->state(); - if (chosen() != bool((*nrnpy_guigetval)(pyvar_))) { - (*nrnpy_guisetval)(pyvar_, double(chosen())); + if (chosen() != bool(*pval_)) { + *pval_ = double(chosen()); } } if (action_) { @@ -2986,9 +2879,13 @@ void HocStateButton::button_action() { void HocStateButton::update_hoc_item() { double x = 0.; if (pyvar_) { - x = nrnpy_guigetval(pyvar_); + x = neuron::python::methods.guigetval(pyvar_); } else if (pval_) { x = *pval_; + } else { // not (no longer) valid + pval_ = {}; + b_->state()->set(TelltaleState::is_enabled_visible_active_chosen, false); + return; } if (x) { b_->state()->set(TelltaleState::is_chosen, true); @@ -2998,45 +2895,44 @@ void HocStateButton::update_hoc_item() { } void HocStateButton::check_pointer(void* v, int size) { - if (pval_) { - double* pd = (double*) v; + auto* const pval_raw = static_cast(pval_); + if (pval_raw) { + auto* const pd = static_cast(v); if (size == 1) { - if (pd != pval_) + if (pd != pval_raw) return; } else { - if (pval_ < pd || pval_ >= pd + size) + if (pval_raw < pd || pval_raw >= pd + size) return; } - pval_ = 0; + pval_ = {}; } } void HocStateButton::data_path(HocDataPaths* hdp, bool append) { - if (!variable_ && pval_) { + if (variable_.empty() && pval_) { + auto* const pval_raw = static_cast(pval_); if (append) { - hdp->append(pval_); + hdp->append(pval_raw); } else { - String* s = hdp->retrieve(pval_); - if (s) { - variable_ = new CopyString(s->string()); - } + variable_ = hdp->retrieve(pval_raw); } } } void HocStateButton::write(std::ostream& o) { - if (variable_) { + if (!variable_.empty()) { char buf[256]; if (style_ == PALETTE) { Sprintf(buf, "xstatebutton(\"%s\",&%s,\"%s\")", name_->string(), - variable_->string(), + variable_.c_str(), hideQuote(action_->name())); } else { Sprintf(buf, "xcheckbox(\"%s\",&%s,\"%s\")", name_->string(), - variable_->string(), + variable_.c_str(), hideQuote(action_->name())); } o << buf << std::endl; @@ -3047,7 +2943,7 @@ void HocStateButton::write(std::ostream& o) { // menu item with state -MenuItem* HocPanel::menuStateItem(double* pd, +MenuItem* HocPanel::menuStateItem(neuron::container::data_handle pd, const char* name, const char* action, Object* pyvar, @@ -3056,7 +2952,7 @@ MenuItem* HocPanel::menuStateItem(double* pd, HocAction* act = new HocAction(action, pyact); HocStateMenuItem* hsb = new HocStateMenuItem(pd, name, mi, act, hoc_item(), pyvar); item_append(hsb); - elist_.append(hsb); + elist_.push_back(hsb); hsb->ref(); return mi; } @@ -3065,7 +2961,7 @@ MenuItem* HocPanel::menuStateItem(double* pd, declareActionCallback(HocStateMenuItem); implementActionCallback(HocStateMenuItem); -HocStateMenuItem::HocStateMenuItem(double* pd, +HocStateMenuItem::HocStateMenuItem(neuron::container::data_handle pd, const char* text, MenuItem* mi, HocAction* action, @@ -3077,7 +2973,6 @@ HocStateMenuItem::HocStateMenuItem(double* pd, if (pyvar_) { hoc_obj_ref(pyvar_); } - variable_ = NULL; name_ = new CopyString(text); action_ = action; action->hoc_item(this); @@ -3090,8 +2985,6 @@ HocStateMenuItem::HocStateMenuItem(double* pd, HocStateMenuItem::~HocStateMenuItem() { - if (variable_) - delete variable_; delete name_; if (pyvar_) { hoc_obj_unref(pyvar_); @@ -3127,8 +3020,8 @@ void HocStateMenuItem::button_action() { } if (pyvar_) { TelltaleState* t = b_->state(); - if (chosen() != bool((*nrnpy_guigetval)(pyvar_))) { - (*nrnpy_guisetval)(pyvar_, double(chosen())); + if (chosen() != bool(neuron::python::methods.guigetval(pyvar_))) { + neuron::python::methods.guisetval(pyvar_, double(chosen())); } } if (action_) { @@ -3144,9 +3037,13 @@ void HocStateMenuItem::button_action() { void HocStateMenuItem::update_hoc_item() { double x = 0.; if (pyvar_) { - x = nrnpy_guigetval(pyvar_); + x = neuron::python::methods.guigetval(pyvar_); } else if (pval_) { x = *pval_; + } else { // not (no longer) valid + pval_ = {}; + b_->state()->set(TelltaleState::is_enabled_visible_active_chosen, false); + return; } if (x) { b_->state()->set(TelltaleState::is_chosen, true); @@ -3156,39 +3053,38 @@ void HocStateMenuItem::update_hoc_item() { } void HocStateMenuItem::check_pointer(void* v, int size) { - if (pval_) { - double* pd = (double*) v; + auto* const pval_raw = static_cast(pval_); + if (pval_raw) { + auto* const pd = static_cast(v); if (size == 1) { - if (pd != pval_) + if (pd != pval_raw) return; } else { - if (pval_ < pd || pval_ >= pd + size) + if (pval_raw < pd || pval_raw >= pd + size) return; } - pval_ = 0; + pval_ = {}; } } void HocStateMenuItem::data_path(HocDataPaths* hdp, bool append) { - if (!variable_ && pval_) { + if (variable_.empty() && pval_) { + auto* const pval_raw = static_cast(pval_); if (append) { - hdp->append(pval_); + hdp->append(pval_raw); } else { - String* s = hdp->retrieve(pval_); - if (s) { - variable_ = new CopyString(s->string()); - } + variable_ = hdp->retrieve(pval_raw); } } } void HocStateMenuItem::write(std::ostream& o) { - if (variable_) { + if (!variable_.empty()) { char buf[256]; Sprintf(buf, "xcheckbox(\"%s\",&%s,\"%s\")", name_->string(), - variable_->string(), + variable_.c_str(), hideQuote(action_->name())); o << buf << std::endl; @@ -3239,29 +3135,3 @@ static Member_func vfe_members[] = {{"default", vfe_default}, {0, 0}}; void ValueFieldEditor_reg() { class2oc("ValueFieldEditor", vfe_cons, vfe_destruct, vfe_members, NULL, NULL, NULL); } - -#if HAVE_IV -void HocValEditor::update_ptrs() { - update_ptrs_helper(&pval_); -} - -void OcSlider::update_ptrs() { - update_ptrs_helper(&pval_); -} - -void HocStateButton::update_ptrs() { - update_ptrs_helper(&pval_); -} - -void HocStateMenuItem::update_ptrs() { - update_ptrs_helper(&pval_); -} - -void HocUpdateItem::update_ptrs_helper(double** p) { - if (*p) { - double* pd = nrn_recalc_ptr(*p); - *p = pd; - } -} - -#endif diff --git a/src/ivoc/xmenu.h b/src/ivoc/xmenu.h index 2754ef3e37..929bb84511 100644 --- a/src/ivoc/xmenu.h +++ b/src/ivoc/xmenu.h @@ -9,7 +9,7 @@ #include #include #include -#if defined(MINGW) || defined(MAC) +#if defined(MINGW) #define UseFieldEditor 1 #else #define UseFieldEditor 0 // Use the FieldSEditor @@ -42,9 +42,6 @@ class ValEdLabel; class ScenePicker; struct HocSymExtension; -declarePtrList(HocUpdateItemList, HocUpdateItem) -declarePtrList(HocItemList, HocItem) -declarePtrList(HocPanelList, HocPanel) class HocPanel: public OcGlyph { public: @@ -53,7 +50,7 @@ class HocPanel: public OcGlyph { virtual void map_window(int scroll = -1); // -1 leave up to panel_scroll attribute void pushButton(const char* name, const char* action, bool activate = false, Object* pyact = 0); - void stateButton(double* pd, + void stateButton(neuron::container::data_handle pd, const char* name, const char* action, int style, @@ -64,7 +61,7 @@ class HocPanel: public OcGlyph { const char* action, bool activate = false, Object* pyact = 0); - MenuItem* menuStateItem(double* pd, + MenuItem* menuStateItem(neuron::container::data_handle pd, const char* name, const char* action, Object* pyvar = NULL, @@ -73,7 +70,7 @@ class HocPanel: public OcGlyph { const char* variable, const char* action = 0, bool canrun = false, - double* pd = NULL, + neuron::container::data_handle pd = {}, bool deflt = false, bool keep_updated = false, HocSymExtension* extra = NULL, @@ -87,7 +84,7 @@ class HocPanel: public OcGlyph { bool keep_updated = false); // ZFM added vert - void slider(double*, + void slider(neuron::container::data_handle, float low = 0, float high = 100, float resolution = 1, @@ -111,10 +108,6 @@ class HocPanel: public OcGlyph { static void save_all(std::ostream&); void data_path(HocDataPaths*, bool); void item_append(HocItem*); -#if MAC - void mac_menubar(); - void mac_menubar(int&, int&, int); // recurse per menu through list -#endif static void keep_updated(); static void keep_updated(HocUpdateItem*, bool); static void paneltool(const char* name, @@ -123,13 +116,12 @@ class HocPanel: public OcGlyph { ScenePicker*, Object* pycallback = NULL, Object* pyselact = NULL); - static void update_ptrs(); private: PolyGlyph* box_; - HocUpdateItemList elist_; - HocItemList ilist_; - static HocUpdateItemList* update_list_; + std::vector elist_; + std::vector ilist_; + static std::vector* update_list_; bool horizontal_; InputHandler* ih_; }; @@ -142,9 +134,7 @@ class HocItem: public Resource { const char* getStr(); virtual void help(const char* childpath = NULL); virtual void help_parent(HocItem*); -#if MAC - virtual int mac_menubar(int&, int, int); -#endif + private: CopyString str_; HocItem* help_parent_; @@ -155,9 +145,7 @@ class HocPushButton: public HocItem { HocPushButton(const char*, HocAction*, HocItem* parent = NULL); virtual ~HocPushButton(); virtual void write(std::ostream&); -#if MAC - virtual int mac_menubar(int&, int, int); -#endif + private: HocAction* a_; }; @@ -167,9 +155,7 @@ class HocRadioButton: public HocItem { HocRadioButton(const char*, HocRadioAction*, HocItem* parent = NULL); virtual ~HocRadioButton(); virtual void write(std::ostream&); -#if MAC - virtual int mac_menubar(int&, int, int); -#endif + private: HocRadioAction* a_; }; @@ -185,9 +171,7 @@ class HocMenu: public HocItem { virtual MenuItem* item() { return mi_; } -#if MAC - virtual int mac_menubar(int&, int, int); -#endif + private: MenuItem* mi_; Menu* menu_; @@ -201,8 +185,6 @@ class HocUpdateItem: public HocItem { virtual void update_hoc_item(); virtual void check_pointer(void*, int vector_size); virtual void data_path(HocDataPaths*, bool); - virtual void update_ptrs() {} - void update_ptrs_helper(double**); }; class HocLabel: public HocItem { @@ -225,7 +207,7 @@ class HocVarLabel: public HocUpdateItem { Patch* p_; char** cpp_; char* cp_; - CopyString* variable_; + std::string variable_{}; Object* pyvar_; }; @@ -311,7 +293,7 @@ class HocValEditor: public HocUpdateItem { const char* variable, ValEdLabel*, HocValAction*, - double* pd = 0, + neuron::container::data_handle pd = {}, bool canrun = false, HocItem* parent = NULL, Object* pvar = NULL); @@ -342,7 +324,6 @@ class HocValEditor: public HocUpdateItem { bool active() { return active_; } - virtual void update_ptrs(); private: friend class HocEditorForItem; @@ -351,8 +332,8 @@ class HocValEditor: public HocUpdateItem { bool active_; bool canrun_; HocAction* action_; - CopyString* variable_; - double* pval_; + std::string variable_{}; + neuron::container::data_handle pval_; ValEdLabel* prompt_; float* domain_limits_; Object* pyvar_; @@ -364,7 +345,7 @@ class HocDefaultValEditor: public HocValEditor { const char* variable, ValEdLabel*, HocValAction*, - double* pd = 0, + neuron::container::data_handle pd = {}, bool canrun = false, HocItem* parent = NULL, Object* pyvar = NULL); @@ -396,7 +377,7 @@ class HocValEditorKeepUpdated: public HocValEditor { const char* variable, ValEdLabel*, HocValAction*, - double*, + neuron::container::data_handle, HocItem* parent = NULL, Object* pyvar = NULL); virtual ~HocValEditorKeepUpdated(); @@ -433,7 +414,7 @@ class HocValAction: public HocAction { // ZFM added vert_ class OcSlider: public HocUpdateItem, public Observer { public: - OcSlider(double*, + OcSlider(neuron::container::data_handle, float low, float high, float resolution, @@ -454,7 +435,6 @@ class OcSlider: public HocUpdateItem, public Observer { virtual void check_pointer(void*, int vector_size); virtual void data_path(HocDataPaths*, bool); virtual double slider_val(); - virtual void update_ptrs(); private: void audit(); @@ -463,9 +443,9 @@ class OcSlider: public HocUpdateItem, public Observer { float resolution_; BoundedValue* bv_; HocCommand* send_; - double* pval_; + neuron::container::data_handle pval_; Object* pyvar_; - CopyString* variable_; + std::string variable_{}; bool scrolling_; bool vert_; bool slow_; @@ -474,7 +454,7 @@ class OcSlider: public HocUpdateItem, public Observer { class HocStateButton: public HocUpdateItem, public Observer { public: - HocStateButton(double*, + HocStateButton(neuron::container::data_handle, const char*, Button*, HocAction*, @@ -491,14 +471,13 @@ class HocStateButton: public HocUpdateItem, public Observer { virtual void check_pointer(void*, int); virtual void data_path(HocDataPaths*, bool); virtual void print(Printer*, const Allocation&) const; - virtual void update_ptrs(); enum { CHECKBOX, PALETTE }; private: int style_; - CopyString* variable_; + std::string variable_{}; CopyString* name_; - double* pval_; + neuron::container::data_handle pval_; Object* pyvar_; Button* b_; HocAction* action_; @@ -507,7 +486,7 @@ class HocStateButton: public HocUpdateItem, public Observer { class HocStateMenuItem: public HocUpdateItem, public Observer { public: - HocStateMenuItem(double*, + HocStateMenuItem(neuron::container::data_handle, const char*, MenuItem*, HocAction*, @@ -523,12 +502,11 @@ class HocStateMenuItem: public HocUpdateItem, public Observer { virtual void check_pointer(void*, int); virtual void data_path(HocDataPaths*, bool); virtual void print(Printer*, const Allocation&) const; - virtual void update_ptrs(); private: - CopyString* variable_; + std::string variable_{}; CopyString* name_; - double* pval_; + neuron::container::data_handle pval_; Object* pyvar_; MenuItem* b_; HocAction* action_; diff --git a/src/ivos/InterViews/_defines.h b/src/ivos/InterViews/_defines.h index 902e1da73c..9b2a95fa6f 100755 --- a/src/ivos/InterViews/_defines.h +++ b/src/ivos/InterViews/_defines.h @@ -5,9 +5,7 @@ #define point _lib_iv(point) #define points _lib_iv(points) #define RasterRect _lib_iv(RasterRect) -#if !MAC #define Point _lib_iv(Point) -#endif #define Line _lib_iv(Line) #define Ellipse _lib_iv(Ellipse) #define MultiLine _lib_iv(MultiLine) @@ -219,7 +217,6 @@ #define Raster _lib_iv(Raster) #define RasterRep _lib_iv(RasterRep) #define Reducer _lib_iv(Reducer) -#define Regexp _lib_iv(Regexp) #define ReqErr _lib_iv(ReqErr) #define Requirement _lib_iv(Requirement) #define Requisition _lib_iv(Requisition) diff --git a/src/ivos/InterViews/_undefs.h b/src/ivos/InterViews/_undefs.h index 4c4fc13634..6891baa495 100755 --- a/src/ivos/InterViews/_undefs.h +++ b/src/ivos/InterViews/_undefs.h @@ -5,9 +5,7 @@ #undef point #undef points #undef RasterRect -#if !MAC #undef Point -#endif #undef Line #undef Ellipse #undef MultiLine @@ -219,7 +217,6 @@ #undef Raster #undef RasterRep #undef Reducer -#undef Regexp #undef ReqErr #undef Requirement #undef Requisition diff --git a/src/ivos/InterViews/enter-scope.h b/src/ivos/InterViews/enter-scope.h index ac33ced9e6..530363dd6c 100644 --- a/src/ivos/InterViews/enter-scope.h +++ b/src/ivos/InterViews/enter-scope.h @@ -38,14 +38,9 @@ #ifndef iv_os__scope_h -/* - * Use OS bool and String definitions. - */ - #include #include -#define String _lib_os(String) #define u_char _lib_os(u_char) #endif diff --git a/src/ivos/InterViews/geometry.h b/src/ivos/InterViews/geometry.h index 26ceb621df..50c5be3eee 100644 --- a/src/ivos/InterViews/geometry.h +++ b/src/ivos/InterViews/geometry.h @@ -24,9 +24,6 @@ #ifndef iv_geometry_h #define iv_geometry_h -#if MAC -#undef require -#endif #include diff --git a/src/ivos/InterViews/observe.h b/src/ivos/InterViews/observe.h index 8a733226df..48490e717f 100755 --- a/src/ivos/InterViews/observe.h +++ b/src/ivos/InterViews/observe.h @@ -32,30 +32,30 @@ #include #include +#include class Observer; -class ObserverList; class Observable { public: - Observable(); + Observable() = default; virtual ~Observable(); virtual void attach(Observer*); virtual void detach(Observer*); virtual void notify(); private: - ObserverList* observers_; + std::vector observers_; }; class Observer { protected: - Observer(); + Observer() = default; public: - virtual ~Observer(); + virtual ~Observer() = default; - virtual void update(Observable*); - virtual void disconnect(Observable*); + virtual void update(Observable*) {}; + virtual void disconnect(Observable*) {}; }; #include diff --git a/src/ivos/InterViews/regexp.h b/src/ivos/InterViews/regexp.h deleted file mode 100755 index c1466fd7dc..0000000000 --- a/src/ivos/InterViews/regexp.h +++ /dev/null @@ -1,80 +0,0 @@ -/* - * Copyright (c) 1987, 1988, 1989, 1990, 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -/* - * Regexp - regular expression searching - */ - -#ifndef iv_regexp_h -#define iv_regexp_h - -#include - -/* - * These definitions are from Henry Spencers public-domain regular - * expression matching routines. - * - * Definitions etc. for regexp(3) routines. - * - * Caveat: this is V8 regexp(3) [actually, a reimplementation thereof], - * not the System V one. - */ -#define NSUBEXP 10 -struct regexp { - char *startp[NSUBEXP]; - char *endp[NSUBEXP]; - char *textStart; - char regstart; /* Internal use only. */ - char reganch; /* Internal use only. */ - char *regmust; /* Internal use only. */ - int regmlen; /* Internal use only. */ - char program[1]; /* Unwarranted chumminess with compiler. */ -}; - -/* - * The first byte of the regexp internal "program" is actually this magic - * number; the start node begins in the second byte. This used to be the octal - * integer literal 0234 = 156, which would be implicitly converted to -100 when - * narrowing to signed 8 bit char. This conversion was implementation defined - * before C++20. - */ -#define REGEXP_MAGIC static_cast(-100) - -class Regexp { -public: - Regexp(const char*); - Regexp(const char*, int length); - ~Regexp(); - - const char* pattern() const; - int Search(const char* text, int length, int index, int range); - int Match(const char* text, int length, int index); - int BeginningOfMatch(int subexp = 0); - int EndOfMatch(int subexp = 0); -private: - char* pattern_; - regexp* c_pattern; -}; - -#endif diff --git a/src/ivos/InterViews/resource.h b/src/ivos/InterViews/resource.h index ffa147deca..bf487f2dfb 100644 --- a/src/ivos/InterViews/resource.h +++ b/src/ivos/InterViews/resource.h @@ -33,8 +33,8 @@ class Resource { public: - Resource(); - virtual ~Resource(); + Resource() = default; + virtual ~Resource() = default; virtual void ref() const; virtual void unref() const; @@ -54,7 +54,7 @@ class Resource { virtual void Reference() const { ref(); } virtual void Unreference() const { unref(); } private: - unsigned refcount_; + unsigned refcount_{}; private: /* prohibit default assignment */ Resource& operator =(const Resource&); diff --git a/src/ivos/OS/_defines.h b/src/ivos/OS/_defines.h index e6098beebe..ca24eed4cb 100755 --- a/src/ivos/OS/_defines.h +++ b/src/ivos/OS/_defines.h @@ -1,6 +1 @@ #define u_char _lib_os(u_char) -#define CopyString _lib_os(CopyString) -#define List _lib_os(List) -#define NullTerminatedString _lib_os(NullTerminatedString) -#define PtrList _lib_os(PtrList) -#define String _lib_os(String) diff --git a/src/ivos/OS/_undefs.h b/src/ivos/OS/_undefs.h index 195b4e9ae6..0c2e5c7b88 100755 --- a/src/ivos/OS/_undefs.h +++ b/src/ivos/OS/_undefs.h @@ -1,6 +1 @@ #undef u_char -#undef CopyString -#undef List -#undef NullTerminatedString -#undef PtrList -#undef String diff --git a/src/ivos/OS/enter-scope.h b/src/ivos/OS/enter-scope.h index 7e96a28f03..856b669595 100755 --- a/src/ivos/OS/enter-scope.h +++ b/src/ivos/OS/enter-scope.h @@ -27,7 +27,6 @@ #include -#undef String #undef u_char #include diff --git a/src/ivos/OS/list.h b/src/ivos/OS/list.h deleted file mode 100644 index f5f09ca845..0000000000 --- a/src/ivos/OS/list.h +++ /dev/null @@ -1,312 +0,0 @@ -/* - * Copyright (c) 1987, 1988, 1989, 1990, 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -/* - * Generic list implemented as dynamic array - */ - -#ifndef os_list_h -#define os_list_h - -#include - -extern void ListImpl_range_error(long index); -extern long ListImpl_best_new_count(long count, unsigned int size, unsigned int m = 1); - -#if 1 || defined(__STDC__) || defined(__ANSI_CPP__) -#define __ListItr(List) List##_Iterator -#define ListItr(List) __ListItr(List) -#define __ListUpdater(List) List##_Updater -#define ListUpdater(List) __ListUpdater(List) -#else -#define __ListItr(List) List/**/_Iterator -#define ListItr(List) __ListItr(List) -#define __ListUpdater(List) List/**/_Updater -#define ListUpdater(List) __ListUpdater(List) -#endif - -#define declareList(List,T) \ -class List { \ -public: \ - List(long size = 0); \ - ~List(); \ -\ - long count() const; \ - T item(long index) const; \ - T& item_ref(long index) const; \ -\ - void prepend(const T&); \ - void append(const T&); \ - void insert(long index, const T&); \ - void remove(long index); \ - void remove_all(); \ -private: \ - T* items_; \ - long size_; \ - long count_; \ - long free_; \ -}; \ -\ -inline long List::count() const { return count_; } \ -\ -inline T List::item(long index) const { \ - if (index < 0 || index >= count_) { \ - ListImpl_range_error(index); \ - } \ - long i = index < free_ ? index : index + size_ - count_; \ - return items_[i]; \ -} \ -inline T& List::item_ref(long index) const { \ - if (index < 0 || index >= count_) { \ - ListImpl_range_error(index); \ - } \ - long i = index < free_ ? index : index + size_ - count_; \ - return items_[i]; \ -} \ -\ -inline void List::append(const T& item) { insert(count_, item); } \ -inline void List::prepend(const T& item) { insert(0, item); } \ -\ -class ListItr(List) { \ -public: \ - ListItr(List)(const List&); \ -\ - bool more() const; \ - T cur() const; \ - T& cur_ref() const; \ - void next(); \ -private: \ - const List* list_; \ - long cur_; \ -}; \ -\ -inline bool ListItr(List)::more() const { return cur_ < list_->count(); } \ -inline T ListItr(List)::cur() const { return list_->item(cur_); } \ -inline T& ListItr(List)::cur_ref() const { \ - return list_->item_ref(cur_); \ -} \ -inline void ListItr(List)::next() { ++cur_; } \ -\ -class ListUpdater(List) { \ -public: \ - ListUpdater(List)(List&); \ -\ - bool more() const; \ - T cur() const; \ - T& cur_ref() const; \ - void remove_cur(); \ - void next(); \ -private: \ - List* list_; \ - long cur_; \ -}; \ -\ -inline bool ListUpdater(List)::more() const { \ - return cur_ < list_->count(); \ -} \ -inline T ListUpdater(List)::cur() const { return list_->item(cur_); } \ -inline T& ListUpdater(List)::cur_ref() const { \ - return list_->item_ref(cur_); \ -} \ -inline void ListUpdater(List)::remove_cur() { list_->remove(cur_); } \ -inline void ListUpdater(List)::next() { ++cur_; } - -/* - * Lists of pointers - * - * Don't ask me to explain the AnyPtr nonsense. C++ compilers - * have a hard time deciding between (const void*)& and const (void*&). - * Typedefs help, though still keep me guessing. - */ - -typedef void* __AnyPtr; - -declareList(__AnyPtrList,__AnyPtr) - -#define declarePtrList(PtrList,T) \ -class PtrList { \ -public: \ - PtrList(long size = 0); \ -\ - long count() const; \ - T* item(long index) const; \ -\ - void prepend(T*); \ - void append(T*); \ - void insert(long index, T*); \ - void remove(long index); \ - void remove_all(); \ -private: \ - __AnyPtrList impl_; \ -}; \ -\ -inline PtrList::PtrList(long size) : impl_(size) { } \ -inline long PtrList::count() const { return impl_.count(); } \ -inline T* PtrList::item(long index) const { return (T*)impl_.item(index); } \ -inline void PtrList::append(T* item) { insert(impl_.count(), item); } \ -inline void PtrList::prepend(T* item) { insert(0, item); } \ -inline void PtrList::remove(long index) { impl_.remove(index); } \ -inline void PtrList::remove_all() { impl_.remove_all(); } \ -\ -class ListItr(PtrList) { \ -public: \ - ListItr(PtrList)(const PtrList&); \ -\ - bool more() const; \ - T* cur() const; \ - void next(); \ -private: \ - const PtrList* list_; \ - long cur_; \ -}; \ -\ -inline bool ListItr(PtrList)::more() const { \ - return cur_ < list_->count(); \ -} \ -inline T* ListItr(PtrList)::cur() const { return list_->item(cur_); } \ -inline void ListItr(PtrList)::next() { ++cur_; } \ -\ -class ListUpdater(PtrList) { \ -public: \ - ListUpdater(PtrList)(PtrList&); \ -\ - bool more() const; \ - T* cur() const; \ - void remove_cur(); \ - void next(); \ -private: \ - PtrList* list_; \ - long cur_; \ -}; \ -\ -inline bool ListUpdater(PtrList)::more() const { \ - return cur_ < list_->count(); \ -} \ -inline T* ListUpdater(PtrList)::cur() const { return list_->item(cur_); } \ -inline void ListUpdater(PtrList)::remove_cur() { list_->remove(cur_); } \ -inline void ListUpdater(PtrList)::next() { ++cur_; } - -/* - * List implementation - */ - -#define implementList(List,T) \ -List::List(long size) { \ - if (size > 0) { \ - size_ = ListImpl_best_new_count(size, sizeof(T)); \ - items_ = new T[size_]; \ - } else { \ - size_ = 0; \ - items_ = 0; \ - } \ - count_ = 0; \ - free_ = 0; \ -} \ -\ -List::~List() { \ - delete [] items_; \ -} \ -\ -void List::insert(long index, const T& item) { \ - if (count_ == size_) { \ - long size = ListImpl_best_new_count(size_ + 1, sizeof(T), 2); \ - T* items = new T[size]; \ - if (items_ != 0) { \ - long i; \ - for (i = 0; i < free_; ++i) { \ - items[i] = items_[i]; \ - } \ - for (i = 0; i < count_ - free_; ++i) { \ - items[free_ + size - count_ + i] = \ - items_[free_ + size_ - count_ + i]; \ - } \ - delete [] items_; \ - } \ - items_ = items; \ - size_ = size; \ - } \ - if (index >= 0 && index <= count_) { \ - if (index < free_) { \ - for (long i = free_ - index - 1; i >= 0; --i) { \ - items_[index + size_ - count_ + i] = items_[index + i]; \ - } \ - } else if (index > free_) { \ - for (long i = 0; i < index - free_; ++i) { \ - items_[free_ + i] = items_[free_ + size_ - count_ + i]; \ - } \ - } \ - free_ = index + 1; \ - count_ += 1; \ - items_[index] = item; \ - } \ -} \ -\ -void List::remove(long index) { \ - if (index >= 0 && index <= count_) { \ - if (index < free_) { \ - for (long i = free_ - index - 2; i >= 0; --i) { \ - items_[size_ - count_ + index + 1 + i] = \ - items_[index + 1 + i]; \ - } \ - } else if (index > free_) { \ - for (long i = 0; i < index - free_; ++i) { \ - items_[free_ + i] = items_[free_ + size_ - count_ + i]; \ - } \ - } \ - free_ = index; \ - count_ -= 1; \ - } \ -} \ -\ -void List::remove_all() { \ - count_ = 0; \ - free_ = 0; \ -} \ -\ -ListItr(List)::ListItr(List)(const List& list) { \ - list_ = &list; \ - cur_ = 0; \ -} \ -\ -ListUpdater(List)::ListUpdater(List)(List& list) { \ - list_ = &list; \ - cur_ = 0; \ -} - -#define implementPtrList(PtrList,T) \ -void PtrList::insert(long index, T* item) { \ - const __AnyPtr p = item; \ - impl_.insert(index, p); \ -} \ -ListItr(PtrList)::ListItr(PtrList)(const PtrList& list) { \ - list_ = &list; \ - cur_ = 0; \ -} \ -\ -ListUpdater(PtrList)::ListUpdater(PtrList)(PtrList& list) { \ - list_ = &list; \ - cur_ = 0; \ -} - -#endif diff --git a/src/ivos/OS/string.h b/src/ivos/OS/string.h deleted file mode 100644 index 97ec9a1b86..0000000000 --- a/src/ivos/OS/string.h +++ /dev/null @@ -1,154 +0,0 @@ -/* - * Copyright (c) 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -#ifndef os_string_h -#define os_string_h - -/* - * String - simple (non-copying) string class - */ - -#include - -class String { -public: -#ifdef _DELTA_EXTENSIONS -#pragma __static_class -#endif - String(); - String(const char*); - String(const char*, int length); - String(const String&); - virtual ~String(); - - const char* string() const; - int length() const; - - virtual unsigned long hash() const; - virtual String& operator =(const String&); - virtual String& operator =(const char*); - virtual bool operator ==(const String&) const; - virtual bool operator ==(const char*) const; - virtual bool operator !=(const String&) const; - virtual bool operator !=(const char*) const; - virtual bool operator >(const String&) const; - virtual bool operator >(const char*) const; - virtual bool operator >=(const String&) const; - virtual bool operator >=(const char*) const; - virtual bool operator <(const String&) const; - virtual bool operator <(const char*) const; - virtual bool operator <=(const String&) const; - virtual bool operator <=(const char*) const; - - virtual bool case_insensitive_equal(const String&) const; - virtual bool case_insensitive_equal(const char*) const; - - u_char operator [](int index) const; - virtual String substr(int start, int length) const; - String left(int length) const; - String right(int start) const; - - virtual void set_to_substr(int start, int length); - void set_to_left(int length); - void set_to_right(int start); - - virtual int search(int start, u_char) const; - int index(u_char) const; - int rindex(u_char) const; - - virtual bool convert(int&) const; - virtual bool convert(long&) const; - virtual bool convert(float&) const; - virtual bool convert(double&) const; - - virtual bool null_terminated() const; -protected: - virtual void set_value(const char*); - virtual void set_value(const char*, int); -private: - const char* data_; - int length_; -}; - -class CopyString : public String { -public: -#ifdef _DELTA_EXTENSIONS -#pragma __static_class -#endif - CopyString(); - CopyString(const char*); - CopyString(const char*, int length); - CopyString(const String&); - CopyString(const CopyString&); - virtual ~CopyString(); - - virtual String& operator =(const CopyString&); - virtual String& operator =(const String&); - virtual String& operator =(const char*); - - virtual bool null_terminated() const; -protected: - virtual void set_value(const char*); - virtual void set_value(const char*, int); -private: - void strfree(); -}; - -class NullTerminatedString : public String { -public: -#ifdef _DELTA_EXTENSIONS -#pragma __static_class -#endif - NullTerminatedString(); - NullTerminatedString(const String&); - NullTerminatedString(const NullTerminatedString&); - virtual ~NullTerminatedString(); - - virtual String& operator =(const String&); - virtual String& operator =(const char*); - - virtual bool null_terminated() const; -private: - bool allocated_; - - void assign(const String&); - void strfree(); -}; - -inline const char* String::string() const { return data_; } -inline int String::length() const { return length_; } -inline u_char String::operator [](int index) const { - return ((u_char*)data_)[index]; -} - -inline String String::left(int length) const { return substr(0, length); } -inline String String::right(int start) const { return substr(start, -1); } - -inline void String::set_to_left(int length) { set_to_substr(0, length); } -inline void String::set_to_right(int start) { set_to_substr(start, -1); } - -inline int String::index(u_char c) const { return search(0, c); } -inline int String::rindex(u_char c) const { return search(-1, c); } - -#endif diff --git a/src/ivos/listimpl.cpp b/src/ivos/listimpl.cpp deleted file mode 100755 index 6a490b36e8..0000000000 --- a/src/ivos/listimpl.cpp +++ /dev/null @@ -1,61 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include <../../config.h> -#endif -/* - * Copyright (c) 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -/* - * Support routines for lists. - */ - -#include -#include -#include - -implementList(__AnyPtrList,__AnyPtr) - -static long ListImpl_best_new_sizes[] = { - 48, 112, 240, 496, 1008, 2032, 4080, 8176, - 16368, 32752, 65520, 131056, 262128, 524272, 1048560, - 2097136, 4194288, 8388592, 16777200, 33554416, 67108848, - 134217712, 268435440, 536870896, 1073741808, 2147483632 -}; - -long ListImpl_best_new_count(long count, unsigned int size, unsigned int m) { - for (int i = 0; i < sizeof(ListImpl_best_new_sizes)/sizeof(long); i++) { - if (count * size < ListImpl_best_new_sizes[i]) { - return ListImpl_best_new_sizes[i] / size; - } - } - return count*m; -} - -void ListImpl_range_error(long i) { -#if defined(WIN32) || MAC - printf("internal error: list index %ld out of range\n", i); -#else - fprintf(stderr, "internal error: list index %ld out of range\n", i); -#endif - abort(); -} diff --git a/src/ivos/observe.cpp b/src/ivos/observe.cpp index c6b09e53bb..e5e991233f 100755 --- a/src/ivos/observe.cpp +++ b/src/ivos/observe.cpp @@ -30,58 +30,28 @@ */ #include -#include - -declarePtrList(ObserverList,Observer) -implementPtrList(ObserverList,Observer) - -Observable::Observable() { - observers_ = nil; -} +#include "utils/enumerate.h" Observable::~Observable() { - ObserverList* list = observers_; - if (list != nil) { - // in case a disconnect removes items from the ObserverList - for (long i = list->count() - 1; i >= 0; --i) { - list->item(i)->disconnect(this); - if (i > list->count()) { i = list->count(); } + // in case a disconnect removes items from the observers + for (long long i = static_cast(observers_.size()) - 1; i >= 0; --i) { + observers_[i]->disconnect(this); + if (i > observers_.size()) { + i = observers_.size(); + } } - delete list; - } } void Observable::attach(Observer* o) { - ObserverList* list = observers_; - if (list == nil) { - list = new ObserverList(5); - observers_ = list; - } - list->append(o); + observers_.push_back(o); } void Observable::detach(Observer* o) { - ObserverList* list = observers_; - if (list != nil) { - for (ListUpdater(ObserverList) i(*list); i.more(); i.next()) { - if (i.cur() == o) { - i.remove_cur(); - break; - } - } - } + erase_first(observers_, o); } void Observable::notify() { - ObserverList* list = observers_; - if (list != nil) { - for (ListItr(ObserverList) i(*list); i.more(); i.next()) { - i.cur()->update(this); - } + for (auto& obs: observers_) { + obs->update(this); } } - -Observer::Observer() { } -Observer::~Observer() { } -void Observer::update(Observable*) { } -void Observer::disconnect(Observable*) { } diff --git a/src/ivos/regexp.cpp b/src/ivos/regexp.cpp deleted file mode 100644 index 4954f7b603..0000000000 --- a/src/ivos/regexp.cpp +++ /dev/null @@ -1,1219 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif -/* - * Copyright (c) 1987, 1988, 1989, 1990, 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -/* - * Regexp - regular expression searching - */ - -#include -#include -#include - -/* - * This version is based on the Henry Spencers public domain reimplementation - * of the regular expression matching subroutines. They are included as - * static subroutines after the externally accessible routines. - */ - -/* - * Forward declarations for regcomp()'s friends. - */ -static regexp* regcomp(const char* exp); -static char* reg(int paren, int* flagp); -static char* regbranch(int* flagp); -static char* regpiece(int* flagp); -static char* regatom(int* flagp); -static char* regnode(char op); -static char* regnext(char* p); -static void regc(char b); -static void reginsert(char op, char* opnd); -static void regtail(char* p, char* val); -static void regoptail(char* p, char* val); -static void regerror(const char* s); -static int regexec(regexp* prog, char* string); -static int regtry(regexp* prog, char* string); -static int regmatch(char* prog); -static int regrepeat(char* p); - - -inline char * -FindNewline(char* s) { - return strchr(s, '\n'); -} - -inline char * -NextLine(char* s) { - char* newstart; - - if ((newstart = FindNewline(s)) != nil) - newstart++; - return newstart; -} - -Regexp::Regexp (const char* pat) { - int length = strlen(pat); - pattern_ = new char[length+1]; - strncpy(pattern_, pat, length); - pattern_[length] = '\0'; - c_pattern = regcomp(pattern_); - if (!c_pattern) { - delete [] pattern_; - pattern_ = nil; - } -} - -Regexp::Regexp (const char* pat, int length) { - pattern_ = new char[length+1]; - strncpy(pattern_, pat, length); - pattern_[length] = '\0'; - c_pattern = regcomp(pattern_); - if (!c_pattern) { - delete [] pattern_; - pattern_ = nil; - } -} - -Regexp::~Regexp () { - if (pattern_) { - delete [] pattern_; - } - if (c_pattern) { - delete [] c_pattern; - } -} - -const char* Regexp::pattern() const { return pattern_; } - -int Regexp::Search (const char* text, int length, int index, int range) { - bool forwardSearch; - bool frontAnchored; - bool endAnchored; - char* searchStart; - char* searchLimit; - char* endOfLine = nil; - char* lastMatch = nil; - char csave; - - /* - * A small sanity check. Otherwise length is unused in this function. - * This is really what the logic embedded in the old version of this - * routine enforced. - */ - if (index + range > length) { - range = length - index; - if (range < 0) - return -1; - } - - if (c_pattern == nil) { - return -1; - } - - c_pattern->startp[0] = nil; - - if (range < 0) { - forwardSearch = false; - searchLimit = (char *) text + index; - searchStart = (char *) searchLimit + range; /* range is negative */ - } else { - forwardSearch = true; - searchStart = (char *) text + index; - searchLimit = (char *) searchStart + range; - } - - /* Mark end of text string so search will stop */ - char save = *searchLimit; - *searchLimit = '\0'; - - frontAnchored = pattern_[0] == '^'; - endAnchored = pattern_[strlen(pattern_)-1] == '$'; - if (frontAnchored && (searchStart != text || searchStart[-1] == '\n')) { - searchStart = NextLine(searchStart); - } - - while (searchStart && searchStart < searchLimit) { - int result; - - if (endAnchored && (endOfLine = FindNewline(searchStart)) != nil) { - csave = *endOfLine; - *endOfLine = '\0'; - } - - result = regexec(c_pattern, searchStart); - - if (endOfLine) - *endOfLine = csave; - - if (result) { - /* Found a match */ - if (forwardSearch) - break; /* Done */ - else { - lastMatch = c_pattern->startp[0]; - searchStart = c_pattern->endp[0]; - if (frontAnchored) - searchStart = NextLine(searchStart); - continue; - } - } - /* Did not find a match */ - if (frontAnchored || endAnchored) - searchStart = NextLine(searchStart); - else - break; - } - - if (!forwardSearch && lastMatch) { - if (endAnchored && (endOfLine = FindNewline(lastMatch)) != nil) { - csave = *endOfLine; - *endOfLine = '\0'; - } - (void) regexec(c_pattern, lastMatch); /* Refill startp and endp */ - if (endOfLine) - *endOfLine = csave; - } - - *searchLimit = save; - c_pattern->textStart = (char *) text; - - return c_pattern->startp[0] - c_pattern->textStart; -} - -int Regexp::Match (const char* text, int length, int index) { - - if (c_pattern == nil) - return -1; - - c_pattern->startp[0] = nil; - - char save = *(text+length); - *(char*)(text+length) = '\0'; - - c_pattern->textStart = (char *) text; - (void) regexec(c_pattern, (char *) text + index); - - *(char*)(text+length) = save; - - if (c_pattern->startp[0] != nil) - return c_pattern->endp[0] - c_pattern->startp[0]; - else - return -1; -} - -int Regexp::BeginningOfMatch (int subexp) { - if (subexp < 0 || subexp > NSUBEXP || - c_pattern == nil || c_pattern->startp[0] == nil) - return -1; - return c_pattern->startp[subexp] - c_pattern->textStart; -} - -int Regexp::EndOfMatch (int subexp) { - if (subexp < 0 || subexp > NSUBEXP || - c_pattern == nil || c_pattern->startp[0] == nil) - return -1; - return c_pattern->endp[subexp] - c_pattern->textStart; -} - -/* - * regcomp and regexec - * - * Copyright (c) 1986 by University of Toronto. - * Written by Henry Spencer. Not derived from licensed software. - * - * Permission is granted to anyone to use this software for any - * purpose on any computer system, and to redistribute it freely, - * subject to the following restrictions: - * - * 1. The author is not responsible for the consequences of use of - * this software, no matter how awful, even if they arise - * from defects in it. - * - * 2. The origin of this software must not be misrepresented, either - * by explicit claim or by omission. - * - * 3. Altered versions must be plainly marked as such, and must not - * be misrepresented as being the original software. - * - * Beware that some of this code is subtly aware of the way operator - * precedence is structured in regular expressions. Serious changes in - * regular-expression syntax might require a total rethink. - */ - -/* - * The "internal use only" fields in regexp.h are present to pass info from - * compile to execute that permits the execute phase to run lots faster on - * simple cases. They are: - * - * regstart char that must begin a match; '\0' if none obvious - * reganch is the match anchored (at beginning-of-line only)? - * regmust string (pointer into program) that match must include, or nil - * regmlen length of regmust string - * - * Regstart and reganch permit very fast decisions on suitable starting points - * for a match, cutting down the work a lot. Regmust permits fast rejection - * of lines that cannot possibly match. The regmust tests are costly enough - * that regcomp() supplies a regmust only if the r.e. contains something - * potentially expensive (at present, the only such thing detected is * or + - * at the start of the r.e., which can involve a lot of backup). Regmlen is - * supplied because the test in regexec() needs it and regcomp() is computing - * it anyway. - */ - -/* - * Structure for regexp "program". This is essentially a linear encoding - * of a nondeterministic finite-state machine (aka syntax charts or - * "railroad normal form" in parsing technology). Each node is an opcode - * plus a "next" pointer, possibly plus an operand. "Next" pointers of - * all nodes except BRANCH implement concatenation; a "next" pointer with - * a BRANCH on both ends of it is connecting two alternatives. (Here we - * have one of the subtle syntax dependencies: an individual BRANCH (as - * opposed to a collection of them) is never concatenated with anything - * because of operator precedence.) The operand of some types of node is - * a literal string; for others, it is a node leading into a sub-FSM. In - * particular, the operand of a BRANCH node is the first node of the branch. - * (NB this is *not* a tree structure: the tail of the branch connects - * to the thing following the set of BRANCHes.) The opcodes are: - */ - -/* definition number opnd? meaning */ -#define END 0 /* no End of program. */ -#define BOL 1 /* no Match "" at beginning of line. */ -#define EOL 2 /* no Match "" at end of line. */ -#define ANY 3 /* no Match any one character. */ -#define ANYOF 4 /* str Match any character in this string. */ -#define ANYBUT 5 /* str Match any character not in this string. */ -#define BRANCH 6 /* node Match this alternative, or the next... */ -#define BACK 7 /* no Match "", "next" ptr points backward. */ -#define EXACTLY 8 /* str Match this string. */ -#define NOTHING 9 /* no Match empty string. */ -#define STAR 10 /* node Match this (simple) thing 0 or more times. */ -#define PLUS 11 /* node Match this (simple) thing 1 or more times. */ -#define OPEN 20 /* no Mark this point in input as start of #n. */ - /* OPEN+1 is number 1, etc. */ -#define CLOSE 30 /* no Analogous to OPEN. */ - -/* - * Opcode notes: - * - * BRANCH The set of branches constituting a single choice are hooked - * together with their "next" pointers, since precedence prevents - * anything being concatenated to any individual branch. The - * "next" pointer of the last BRANCH in a choice points to the - * thing following the whole choice. This is also where the - * final "next" pointer of each individual branch points; each - * branch starts with the operand node of a BRANCH node. - * - * BACK Normal "next" pointers all implicitly point forward; BACK - * exists to make loop structures possible. - * - * STAR,PLUS '?', and complex '*' and '+', are implemented as circular - * BRANCH structures using BACK. Simple cases (one character - * per match) are implemented with STAR and PLUS for speed - * and to minimize recursive plunges. - * - * OPEN,CLOSE ...are numbered at compile time. - */ - -/* - * A node is one char of opcode followed by two chars of "next" pointer. - * "Next" pointers are stored as two 8-bit pieces, high order first. The - * value is a positive offset from the opcode of the node containing it. - * An operand, if any, simply follows the node. (Note that much of the - * code generation knows about this implicit relationship.) - * - * Using two bytes for the "next" pointer is vast overkill for most things, - * but allows patterns to get big without disasters. - */ -#define OP(p) (*(p)) -#define NEXT(p) (((*((p)+1)&0377)<<8) + (*((p)+2)&0377)) -#define OPERAND(p) ((p) + 3) - -/* - * Utility definitions. - */ - -/** - * This replaces a macro of the same name with some bit manipulation magic in - * it. The does not seem well-suited now, but it's not clear that it was before - * either. - */ -inline int UCHARAT(const char* p) { - return *p; -} - -#define FAIL(m) { regerror(m); return(nil); } -#define ISMULT(c) ((c) == '*' || (c) == '+' || (c) == '?') -#define META "^$.[()|?+*\\" - -/* - * Flags to be passed up and down. - */ -#define HASWIDTH 01 /* Known never to match null string. */ -#define SIMPLE 02 /* Simple enough to be STAR/PLUS operand. */ -#define SPSTART 04 /* Starts with * or +. */ -#define WORST 0 /* Worst case. */ - -/* - * Global work variables for regcomp(). - */ -static const char *regparse; /* Input-scan pointer. */ -static int regnpar; /* () count. */ -static char regdummy; -static char *regcode; /* Code-emit pointer; ®dummy = don't. */ -static long regsize; /* Code size. */ - -/* - - regcomp - compile a regular expression into internal code - * - * We can't allocate space until we know how big the compiled form will be, - * but we can't compile it (and thus know how big it is) until we've got a - * place to put the code. So we cheat: we compile it twice, once with code - * generation turned off and size counting turned on, and once "for real". - * This also means that we don't allocate space until we are sure that the - * thing really will compile successfully, and we never have to move the - * code and thus invalidate pointers into it. (Note that it has to be in - * one piece because free() must be able to free it all.) - * - * Beware that the optimization-preparation code in here knows about some - * of the structure of the compiled regexp. - */ -static regexp * -regcomp(const char* exp) { - regexp *r; - char *scan; - char *longest; - int len; - int flags; - - if (exp == nil) - FAIL("nil argument"); - - /* First pass: determine size, legality. */ - regparse = exp; - regnpar = 1; - regsize = 0L; - regcode = ®dummy; - regc(REGEXP_MAGIC); - if (reg(0, &flags) == nil) - return(nil); - - /* Small enough for pointer-storage convention? */ - if (regsize >= 32767L) /* Probably could be 65535L. */ - FAIL("regexp too big"); - - /* Allocate space. */ - r = (regexp *) new char[sizeof(regexp) + (unsigned)regsize]; - - /* Second pass: emit code. */ - regparse = exp; - regnpar = 1; - regcode = r->program; - regc(REGEXP_MAGIC); - if (reg(0, &flags) == nil) { - delete [] r; - return(nil); - } - - /* Dig out information for optimizations. */ - r->regstart = '\0'; /* Worst-case defaults. */ - r->reganch = 0; - r->regmust = nil; - r->regmlen = 0; - scan = r->program+1; /* First BRANCH. */ - if (OP(regnext(scan)) == END) { /* Only one top-level choice. */ - scan = OPERAND(scan); - - /* Starting-point info. */ - if (OP(scan) == EXACTLY) - r->regstart = *OPERAND(scan); - else if (OP(scan) == BOL) - r->reganch++; - - /* - * If there's something expensive in the r.e., find the - * longest literal string that must appear and make it the - * regmust. Resolve ties in favor of later strings, since - * the regstart check works with the beginning of the r.e. - * and avoiding duplication strengthens checking. Not a - * strong reason, but sufficient in the absence of others. - */ - if (flags&SPSTART) { - longest = nil; - len = 0; - for (; scan != nil; scan = regnext(scan)) - if (OP(scan) == EXACTLY && strlen(OPERAND(scan)) >= len) { - longest = OPERAND(scan); - len = strlen(OPERAND(scan)); - } - r->regmust = longest; - r->regmlen = len; - } - } - - return(r); -} - -/* - - reg - regular expression, i.e. main body or parenthesized thing - * - * Caller must absorb opening parenthesis. - * - * Combining parenthesis handling with the base level of regular expression - * is a trifle forced, but the need to tie the tails of the branches to what - * follows makes it hard to avoid. - */ -static char * -reg(int paren, int* flagp) { - char *ret; - char *br; - char *ender; - int parno; - int flags; - - *flagp = HASWIDTH; /* Tentatively. */ - - /* Make an OPEN node, if parenthesized. */ - if (paren) { - if (regnpar >= NSUBEXP) - FAIL("too many ()"); - parno = regnpar; - regnpar++; - ret = regnode(OPEN+parno); - } else - ret = nil; - - /* Pick up the branches, linking them together. */ - br = regbranch(&flags); - if (br == nil) - return(nil); - if (ret != nil) - regtail(ret, br); /* OPEN -> first. */ - else - ret = br; - if (!(flags&HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags&SPSTART; - while (*regparse == '|') { - regparse++; - br = regbranch(&flags); - if (br == nil) - return(nil); - regtail(ret, br); /* BRANCH -> BRANCH. */ - if (!(flags&HASWIDTH)) - *flagp &= ~HASWIDTH; - *flagp |= flags&SPSTART; - } - - /* Make a closing node, and hook it on the end. */ - ender = regnode((paren) ? CLOSE+parno : END); - regtail(ret, ender); - - /* Hook the tails of the branches to the closing node. */ - for (br = ret; br != nil; br = regnext(br)) - regoptail(br, ender); - - /* Check for proper termination. */ - if (paren && *regparse++ != ')') { - FAIL("unmatched ()"); - } else if (!paren && *regparse != '\0') { - if (*regparse == ')') { - FAIL("unmatched ()"); - } else - FAIL("junk on end"); /* "Can't happen". */ - /* NOTREACHED */ - } - - return(ret); -} - -/* - - regbranch - one alternative of an | operator - * - * Implements the concatenation operator. - */ -static char * -regbranch(int* flagp) { - char *ret; - char *chain; - char *latest; - int flags; - - *flagp = WORST; /* Tentatively. */ - - ret = regnode(BRANCH); - chain = nil; - while (*regparse != '\0' && *regparse != '|') { - if (*regparse == '\\' && regparse[1] == ')') { - regparse++; - break; - } - latest = regpiece(&flags); - if (latest == nil) - return(nil); - *flagp |= flags&HASWIDTH; - if (chain == nil) /* First piece. */ - *flagp |= flags&SPSTART; - else - regtail(chain, latest); - chain = latest; - } - if (chain == nil) /* Loop ran zero times. */ - (void) regnode(NOTHING); - - return(ret); -} - -/* - - regpiece - something followed by possible [*+?] - * - * Note that the branching code sequences used for ? and the general cases - * of * and + are somewhat optimized: they use the same NOTHING node as - * both the endmarker for their branch list and the body of the last branch. - * It might seem that this node could be dispensed with entirely, but the - * endmarker role is not redundant. - */ -static char * -regpiece(int* flagp) { - char *ret; - char op; - char *next; - int flags; - - ret = regatom(&flags); - if (ret == nil) - return(nil); - - op = *regparse; - if (!ISMULT(op)) { - *flagp = flags; - return(ret); - } - - if (!(flags&HASWIDTH) && op != '?') - FAIL("*+ operand could be empty"); - *flagp = (op != '+') ? (WORST|SPSTART) : (WORST|HASWIDTH); - - if (op == '*' && (flags&SIMPLE)) - reginsert(STAR, ret); - else if (op == '*') { - /* Emit x* as (x&|), where & means "self". */ - reginsert(BRANCH, ret); /* Either x */ - regoptail(ret, regnode(BACK)); /* and loop */ - regoptail(ret, ret); /* back */ - regtail(ret, regnode(BRANCH)); /* or */ - regtail(ret, regnode(NOTHING)); /* null. */ - } else if (op == '+' && (flags&SIMPLE)) - reginsert(PLUS, ret); - else if (op == '+') { - /* Emit x+ as x(&|), where & means "self". */ - next = regnode(BRANCH); /* Either */ - regtail(ret, next); - regtail(regnode(BACK), ret); /* loop back */ - regtail(next, regnode(BRANCH)); /* or */ - regtail(ret, regnode(NOTHING)); /* null. */ - } else if (op == '?') { - /* Emit x? as (x|) */ - reginsert(BRANCH, ret); /* Either x */ - regtail(ret, regnode(BRANCH)); /* or */ - next = regnode(NOTHING); /* null. */ - regtail(ret, next); - regoptail(ret, next); - } - regparse++; - if (ISMULT(*regparse)) - FAIL("nested *?+"); - - return(ret); -} - -/* - - regatom - the lowest level - * - * Optimization: gobbles an entire sequence of ordinary characters so that - * it can turn them into a single node, which is smaller to store and - * faster to run. Backslashed characters are exceptions, each becoming a - * separate node; the code is simpler that way and it's not worth fixing. - */ -static char * -regatom(int* flagp) { - char *ret; - int flags; - - *flagp = WORST; /* Tentatively. */ - - switch (*regparse++) { - case '^': - ret = regnode(BOL); - break; - case '$': - ret = regnode(EOL); - break; - case '.': - ret = regnode(ANY); - *flagp |= HASWIDTH|SIMPLE; - break; - case '[': { - int classbeg; - int classend; - - if (*regparse == '^') { /* Complement of range. */ - ret = regnode(ANYBUT); - regparse++; - } else - ret = regnode(ANYOF); - if (*regparse == ']' || *regparse == '-') - regc(*regparse++); - while (*regparse != '\0' && *regparse != ']') { - if (*regparse == '-') { - regparse++; - if (*regparse == ']' || *regparse == '\0') - regc('-'); - else { - classbeg = UCHARAT(regparse-2)+1; - classend = UCHARAT(regparse); - if (classbeg > classend+1) - FAIL("invalid [] range"); - for (; classbeg <= classend; classbeg++) - regc(classbeg); - regparse++; - } - } else - regc(*regparse++); - } - regc('\0'); - if (*regparse != ']') - FAIL("unmatched []"); - regparse++; - *flagp |= HASWIDTH|SIMPLE; - } - break; - case '\0': - case '|': - FAIL("internal urp"); /* Supposed to be caught earlier. */ - break; - case '?': - case '+': - case '*': - FAIL("?+* follows nothing"); - break; - case '\\': - if (*regparse == '\0') - FAIL("trailing \\"); - if (*regparse == '(') { - regparse++; - ret = reg(1, &flags); - if (ret == nil) - return(nil); - *flagp |= flags&(HASWIDTH|SPSTART); - } else { - ret = regnode(EXACTLY); - regc(*regparse++); - regc('\0'); - *flagp |= HASWIDTH|SIMPLE; - } - break; - default: { - int len; - char ender; - - regparse--; - len = strcspn(regparse, META); - if (len <= 0) - FAIL("internal disaster"); - ender = *(regparse+len); - if (len > 1 && ISMULT(ender)) - len--; /* Back off clear of ?+* operand. */ - *flagp |= HASWIDTH; - if (len == 1) - *flagp |= SIMPLE; - ret = regnode(EXACTLY); - while (len > 0) { - regc(*regparse++); - len--; - } - regc('\0'); - } - break; - } - - return(ret); -} - -/* - - regnode - emit a node - */ -static char * /* Location. */ -regnode(char op) { - char *ret; - char *ptr; - - ret = regcode; - if (ret == ®dummy) { - regsize += 3; - return(ret); - } - - ptr = ret; - *ptr++ = op; - *ptr++ = '\0'; /* Null "next" pointer. */ - *ptr++ = '\0'; - regcode = ptr; - - return(ret); -} - -/* - - regc - emit (if appropriate) a byte of code - */ -static void -regc(char b) { - if (regcode != ®dummy) - *regcode++ = b; - else - regsize++; -} - -/* - - reginsert - insert an operator in front of already-emitted operand - * - * Means relocating the operand. - */ -static void -reginsert(char op, char* opnd) { - char *src; - char *dst; - char *place; - - if (regcode == ®dummy) { - regsize += 3; - return; - } - - src = regcode; - regcode += 3; - dst = regcode; - while (src > opnd) - *--dst = *--src; - - place = opnd; /* Op node, where operand used to be. */ - *place++ = op; - *place++ = '\0'; - *place++ = '\0'; -} - -/* - - regtail - set the next-pointer at the end of a node chain - */ -static void -regtail(char* p, char* val) { - char *scan; - char *temp; - int offset; - - if (p == ®dummy) - return; - - /* Find last node. */ - scan = p; - for (;;) { - temp = regnext(scan); - if (temp == nil) - break; - scan = temp; - } - - if (OP(scan) == BACK) - offset = scan - val; - else - offset = val - scan; - *(scan+1) = (offset>>8)&0377; - *(scan+2) = offset&0377; -} - -/* - - regoptail - regtail on operand of first argument; nop if operandless - */ -static void -regoptail(char* p, char* val) { - /* "Operandless" and "op != BRANCH" are synonymous in practice. */ - if (p == nil || p == ®dummy || OP(p) != BRANCH) - return; - regtail(OPERAND(p), val); -} - -/* - * regexec and friends - */ - -/* - * Global work variables for regexec(). - */ -static char *reginput; /* String-input pointer. */ -static char *regbol; /* Beginning of input, for ^ check. */ -static char **regstartp; /* Pointer to startp array. */ -static char **regendp; /* Ditto for endp. */ - -/* - - regexec - match a regexp against a string - */ -static int -regexec(regexp* prog, char* string) { - char *s; - - /* Be paranoid... */ - if (prog == nil || string == nil) { - regerror("nil parameter"); - return(0); - } - - /* Check validity of program. */ - if (UCHARAT(prog->program) != REGEXP_MAGIC) { - regerror("corrupted program"); - return(0); - } - - /* If there is a "must appear" string, look for it. */ - if (prog->regmust != nil) { - s = string; - while ((s = strchr(s, prog->regmust[0])) != nil) { - if (strncmp(s, prog->regmust, prog->regmlen) == 0) - break; /* Found it. */ - s++; - } - if (s == nil) /* Not present. */ - return(0); - } - - /* Mark beginning of line for ^ . */ - regbol = string; - - /* Simplest case: anchored match need be tried only once. */ - if (prog->reganch) - return(regtry(prog, string)); - - /* Messy cases: unanchored match. */ - s = string; - if (prog->regstart != '\0') - /* We know what char it must start with. */ - while ((s = strchr(s, prog->regstart)) != nil) { - if (regtry(prog, s)) - return(1); - s++; - } - else - /* We don't -- general case. */ - do { - if (regtry(prog, s)) - return(1); - } while (*s++ != '\0'); - - /* Failure. */ - return(0); -} - -/* - - regtry - try match at specific point - */ -static int /* 0 failure, 1 success */ -regtry(regexp* prog, char* string) { - int i; - char **sp; - char **ep; - - reginput = string; - regstartp = prog->startp; - regendp = prog->endp; - - sp = prog->startp; - ep = prog->endp; - for (i = NSUBEXP; i > 0; i--) { - *sp++ = nil; - *ep++ = nil; - } - if (regmatch(prog->program + 1)) { - prog->startp[0] = string; - prog->endp[0] = reginput; - return(1); - } else - return(0); -} - -/* - - regmatch - main matching routine - * - * Conceptually the strategy is simple: check to see whether the current - * node matches, call self recursively to see whether the rest matches, - * and then act accordingly. In practice we make some effort to avoid - * recursion, in particular by going through "ordinary" nodes (that don't - * need to know whether the rest of the match failed) by a loop instead of - * by recursion. - */ -static int /* 0 failure, 1 success */ -regmatch(char* prog) { - char *scan; /* Current node. */ - char *next; /* Next node. */ - - scan = prog; - while (scan != nil) { - next = regnext(scan); - - switch (OP(scan)) { - case BOL: - if (reginput != regbol) - return(0); - break; - case EOL: - if (*reginput != '\0') - return(0); - break; - case ANY: - if (*reginput == '\0') - return(0); - reginput++; - break; - case EXACTLY: { - int len; - char *opnd; - - opnd = OPERAND(scan); - /* Inline the first character, for speed. */ - if (*opnd != *reginput) - return(0); - len = strlen(opnd); - if (len > 1 && strncmp(opnd, reginput, len) != 0) - return(0); - reginput += len; - } - break; - case ANYOF: - if (*reginput == '\0') - return(0); - if (strchr(OPERAND(scan), *reginput) == nil) - return(0); - reginput++; - break; - case ANYBUT: - if (*reginput == '\0') - return(0); - if (strchr(OPERAND(scan), *reginput) != nil) - return(0); - reginput++; - break; - case NOTHING: - break; - case BACK: - break; - case OPEN+1: - case OPEN+2: - case OPEN+3: - case OPEN+4: - case OPEN+5: - case OPEN+6: - case OPEN+7: - case OPEN+8: - case OPEN+9: { - int no; - char *save; - - no = OP(scan) - OPEN; - save = reginput; - - if (regmatch(next)) { - /* - * Don't set startp if some later - * invocation of the same parentheses - * already has. - */ - if (regstartp[no] == nil) - regstartp[no] = save; - return(1); - } else - return(0); - } - break; - case CLOSE+1: - case CLOSE+2: - case CLOSE+3: - case CLOSE+4: - case CLOSE+5: - case CLOSE+6: - case CLOSE+7: - case CLOSE+8: - case CLOSE+9: { - int no; - char *save; - - no = OP(scan) - CLOSE; - save = reginput; - - if (regmatch(next)) { - /* - * Don't set endp if some later - * invocation of the same parentheses - * already has. - */ - if (regendp[no] == nil) - regendp[no] = save; - return(1); - } else - return(0); - } - break; - case BRANCH: { - char *save; - - if (OP(next) != BRANCH) /* No choice. */ - next = OPERAND(scan); /* Avoid recursion. */ - else { - do { - save = reginput; - if (regmatch(OPERAND(scan))) - return(1); - reginput = save; - scan = regnext(scan); - } while (scan != nil && OP(scan) == BRANCH); - return(0); - /* NOTREACHED */ - } - } - break; - case STAR: - case PLUS: { - char nextch; - int no; - char *save; - int min; - - /* - * Lookahead to avoid useless match attempts - * when we know what character comes next. - */ - nextch = '\0'; - if (OP(next) == EXACTLY) - nextch = *OPERAND(next); - min = (OP(scan) == STAR) ? 0 : 1; - save = reginput; - no = regrepeat(OPERAND(scan)); - while (no >= min) { - /* If it could work, try it. */ - if (nextch == '\0' || *reginput == nextch) - if (regmatch(next)) - return(1); - /* Couldn't or didn't -- back up. */ - no--; - reginput = save + no; - } - return(0); - } - break; - case END: - return(1); /* Success! */ - default: - regerror("memory corruption"); - return(0); - } - - scan = next; - } - - /* - * We get here only if there's trouble -- normally "case END" is - * the terminating point. - */ - regerror("corrupted pointers"); - return(0); -} - -/* - - regrepeat - repeatedly match something simple, report how many - */ -static int -regrepeat(char* p) { - int count = 0; - char *scan; - char *opnd; - - scan = reginput; - opnd = OPERAND(p); - switch (OP(p)) { - case ANY: - count = strlen(scan); - scan += count; - break; - case EXACTLY: - while (*opnd == *scan) { - count++; - scan++; - } - break; - case ANYOF: - while (*scan != '\0' && strchr(opnd, *scan) != nil) { - count++; - scan++; - } - break; - case ANYBUT: - while (*scan != '\0' && strchr(opnd, *scan) == nil) { - count++; - scan++; - } - break; - default: /* Oh dear. Called inappropriately. */ - regerror("internal foulup"); - count = 0; /* Best compromise. */ - break; - } - reginput = scan; - - return(count); -} - -/* - - regnext - dig the "next" pointer out of a node - */ -static char * -regnext(char* p) { - int offset; - - if (p == ®dummy) - return(nil); - - offset = NEXT(p); - if (offset == 0) - return(nil); - - if (OP(p) == BACK) - return(p-offset); - else - return(p+offset); -} - -static void -regerror(const char* s) { - std::cerr << "regexp: " << s << "\n"; -} - diff --git a/src/ivos/resource.cpp b/src/ivos/resource.cpp index 29fe48360b..d3718e7ce0 100644 --- a/src/ivos/resource.cpp +++ b/src/ivos/resource.cpp @@ -25,24 +25,19 @@ * OF THIS SOFTWARE. */ -#include -#include +#include -declarePtrList(ResourceList,Resource) -implementPtrList(ResourceList,Resource) +#include class ResourceImpl { friend class Resource; static bool deferred_; - static ResourceList* deletes_; + static std::vector deletes_; }; bool ResourceImpl::deferred_ = false; -ResourceList* ResourceImpl::deletes_; - -Resource::Resource() { refcount_ = 0; } -Resource::~Resource() { } +std::vector ResourceImpl::deletes_; void Resource::ref() const { Resource* r = (Resource*)this; @@ -52,29 +47,26 @@ void Resource::ref() const { void Resource::unref() const { Resource* r = (Resource*)this; if (r->refcount_ != 0) { - r->refcount_ -= 1; + r->refcount_ -= 1; } if (r->refcount_ == 0) { - r->cleanup(); - delete r; + r->cleanup(); + delete r; } } void Resource::unref_deferred() const { Resource* r = (Resource*)this; if (r->refcount_ != 0) { - r->refcount_ -= 1; + r->refcount_ -= 1; } if (r->refcount_ == 0) { - r->cleanup(); - if (ResourceImpl::deferred_) { - if (ResourceImpl::deletes_ == nil) { - ResourceImpl::deletes_ = new ResourceList; - } - ResourceImpl::deletes_->append(r); - } else { - delete r; - } + r->cleanup(); + if (ResourceImpl::deferred_) { + ResourceImpl::deletes_.push_back(r); + } else { + delete r; + } } } @@ -82,41 +74,38 @@ void Resource::cleanup() { } void Resource::ref(const Resource* r) { if (r != nil) { - r->ref(); + r->ref(); } } void Resource::unref(const Resource* r) { if (r != nil) { - r->unref(); + r->unref(); } } void Resource::unref_deferred(const Resource* r) { if (r != nil) { - r->unref_deferred(); + r->unref_deferred(); } } bool Resource::defer(bool b) { bool previous = ResourceImpl::deferred_; if (b != previous) { - flush(); - ResourceImpl::deferred_ = b; + flush(); + ResourceImpl::deferred_ = b; } return previous; } void Resource::flush() { - ResourceList* list = ResourceImpl::deletes_; - if (list != nil) { bool previous = ResourceImpl::deferred_; ResourceImpl::deferred_ = false; - for (ListItr(ResourceList) i(*list); i.more(); i.next()) { - Resource* r = i.cur(); + for (auto& r: ResourceImpl::deletes_) { delete r; } - list->remove_all(); + ResourceImpl::deletes_.clear(); + ResourceImpl::deletes_.shrink_to_fit(); ResourceImpl::deferred_ = previous; - } } diff --git a/src/ivos/string.cpp b/src/ivos/string.cpp deleted file mode 100644 index 090c43e2ee..0000000000 --- a/src/ivos/string.cpp +++ /dev/null @@ -1,405 +0,0 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif -/* - * Copyright (c) 1987, 1988, 1989, 1990, 1991 Stanford University - * Copyright (c) 1991 Silicon Graphics, Inc. - * - * Permission to use, copy, modify, distribute, and sell this software and - * its documentation for any purpose is hereby granted without fee, provided - * that (i) the above copyright notices and this permission notice appear in - * all copies of the software and related documentation, and (ii) the names of - * Stanford and Silicon Graphics may not be used in any advertising or - * publicity relating to the software without the specific, prior written - * permission of Stanford and Silicon Graphics. - * - * THE SOFTWARE IS PROVIDED "AS-IS" AND WITHOUT WARRANTY OF ANY KIND, - * EXPRESS, IMPLIED OR OTHERWISE, INCLUDING WITHOUT LIMITATION, ANY - * WARRANTY OF MERCHANTABILITY OR FITNESS FOR A PARTICULAR PURPOSE. - * - * IN NO EVENT SHALL STANFORD OR SILICON GRAPHICS BE LIABLE FOR - * ANY SPECIAL, INCIDENTAL, INDIRECT OR CONSEQUENTIAL DAMAGES OF ANY KIND, - * OR ANY DAMAGES WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, - * WHETHER OR NOT ADVISED OF THE POSSIBILITY OF DAMAGE, AND ON ANY THEORY OF - * LIABILITY, ARISING OUT OF OR IN CONNECTION WITH THE USE OR PERFORMANCE - * OF THIS SOFTWARE. - */ - -#include -#include -#include -#include - -/* - * Just to be sure ... - */ - -/* fails on mac osx -extern "C" { -#if !MAC -#ifndef tolower - extern int tolower(int); -#endif -#ifndef toupper - extern int toupper(int); -#endif -#endif - extern long int strtol(const char*, char**, int); - extern double strtod(const char*, char**); -} -*/ - -String::String() { - data_ = nil; - length_ = 0; -} - -String::String(const char* s) { - data_ = s; - length_ = strlen(s); -} - -String::String(const char* s, int n) { - data_ = s; - length_ = n; -} - -String::String(const String& s) { - data_ = s.data_; - length_ = s.length_; -} - -String::~String() { } - -unsigned long String::hash() const { - const char* p; - unsigned long v = 0; - if (length_ == -1) { - for (p = data_; *p != '\0'; p++) { - v = (v << 1) ^ (*p); - } - String* s = (String*)this; - s->length_ = p - data_; - } else { - const char* q = &data_[length_]; - for (p = data_; p < q; p++) { - v = (v << 1) ^ (*p); - } - } - unsigned long t = v >> 10; - t ^= (t >> 10); - return v ^ t; -} - -String& String::operator =(const String& s) { - data_ = s.data_; - length_ = s.length_; - return *this; -} - -String& String::operator =(const char* s) { - data_ = s; - length_ = strlen(s); - return *this; -} - -bool String::operator ==(const String& s) const { - return (length_ == s.length_) && (strncmp(data_, s.data_, length_) == 0); -} - -bool String::operator ==(const char* s) const { - return (strncmp(data_, s, length_) == 0) && (s[length_] == '\0'); -} - -bool String::operator !=(const String& s) const { - return (length_ != s.length_) || (strncmp(data_, s.data_, length_) != 0); -} - -bool String::operator !=(const char* s) const { - return (strncmp(data_, s, length_) != 0) || (s[length_] != '\0'); -} - -bool String::operator >(const String& s) const { - return strncmp(data_, s.data_, length_) > 0; -} - -bool String::operator >(const char* s) const { - return strncmp(data_, s, length_) > 0; -} - -bool String::operator >=(const String& s) const { - return strncmp(data_, s.data_, length_) >= 0; -} - -bool String::operator >=(const char* s) const { - return strncmp(data_, s, length_) >= 0; -} - -bool String::operator <(const String& s) const { - return strncmp(data_, s.data_, length_) < 0; -} - -bool String::operator <(const char* s) const { - return strncmp(data_, s, length_) < 0; -} - -bool String::operator <=(const String& s) const { - return strncmp(data_, s.data_, length_) <= 0; -} - -bool String::operator <=(const char* s) const { - return strncmp(data_, s, length_) <= 0; -} - -bool String::case_insensitive_equal(const String& s) const { - if (length() != s.length()) { - return false; - } - const char* p = string(); - const char* p2 = s.string(); - const char* q = p + length(); - for (; p < q; p++, p2++) { - int c1 = *p; - int c2 = *p2; - if (c1 != c2 && tolower(c1) != tolower(c2)) { - return false; - } - } - return true; -} - -bool String::case_insensitive_equal(const char* s) const { - return case_insensitive_equal(String(s)); -} - -/* - * A negative value for start initializes the position at the end - * of the string before indexing. Any negative length makes - * the substring extend to the end of the string. - */ - -String String::substr(int start, int length) const { - if (start >= length_ || start < -length_) { - /* should raise exception */ - return String(""); - } - int pos = (start >= 0) ? start : (length_ + start); - if (pos + length > length_) { - /* should raise exception */ - return String(""); - } - int len = (length >= 0) ? length : (length_ - pos); - return String(data_ + pos, len); -} - -void String::set_to_substr(int start, int length) { - if (start > length_ || start < -length_) { - /* should raise exception */ - return; - } - int pos = (start >= 0) ? start : (length_ + start); - if (pos + length > length_) { - /* should raise exception */ - return; - } - int len = (length >= 0) ? length : (length_ - pos); - data_ += pos; - length_ = len; -} - -bool String::null_terminated() const { return false; } - -void String::set_value(const char* s) { - data_ = s; - length_ = strlen(s); -} - -void String::set_value(const char* s, int len) { - data_ = s; - length_ = len; -} - -/* - * A negative value for start initializes the position to the end - * of the string before indexing and searches right-to-left. - */ - -int String::search(int start, u_char c) const { - if (start >= length_ || start < -length_) { - /* should raise exception */ - return -1; - } - if (start >= 0) { - const char* end = data_ + length_; - for (const char* p = data_ + start; p < end; p++) { - if (*p == c) { - return p - data_; - } - } - } else { - for (const char* p = data_ + length_ + start; p >= data_; p--) { - if (*p == c) { - return p - data_; - } - } - } - return -1; -} - -/* - * Convert a string to binary value. - */ - -bool String::convert(int& value) const { - NullTerminatedString s(*this); - const char* str = s.string(); - char* ptr; - value = (int)strtol(str, &ptr, 0); - return ptr != str; -} - -bool String::convert(long& value) const { - NullTerminatedString s(*this); - const char* str = s.string(); - char* ptr; - value = strtol(str, &ptr, 0); - return ptr != str; -} - -bool String::convert(float& value) const { - NullTerminatedString s(*this); - const char* str = s.string(); - char* ptr; - value = (float)strtod(str, &ptr); - return ptr != str; -} - -bool String::convert(double& value) const { - NullTerminatedString s(*this); - const char* str = s.string(); - char* ptr; - value = strtod(str, &ptr); - return ptr != str; -} - -/* class CopyString */ - -CopyString::CopyString() : String() { } - -CopyString::CopyString(const char* s) : String() { - set_value(s); -} - -CopyString::CopyString(const char* s, int length) : String() { - set_value(s, length); -} - -CopyString::CopyString(const String& s) : String() { - set_value(s.string(), s.length()); -} - -CopyString::CopyString(const CopyString& s) : String() { - set_value(s.string(), s.length()); -} - -CopyString::~CopyString() { - strfree(); -} - -String& CopyString::operator =(const CopyString& s) { - strfree(); - set_value(s.string(), s.length()); - return *this; -} - -String& CopyString::operator =(const String& s) { - strfree(); - set_value(s.string(), s.length()); - return *this; -} - -String& CopyString::operator =(const char* s) { - strfree(); - set_value(s); - return *this; -} - -bool CopyString::null_terminated() const { return true; } - -void CopyString::set_value(const char* s) { - set_value(s, strlen(s)); -} - -/* - * Guarantee null-terminated string for compatibility with printf et al. - */ - -void CopyString::set_value(const char* s, int len) { - char* ns = new char[len + 1]; - ns[len] = '\0'; - String::set_value(strncpy(ns, s, len), len); -} - -void CopyString::strfree() { - char* s = (char*)(string()); - delete [] s; -} - -/* - * class NullTerminatedString - */ - -NullTerminatedString::NullTerminatedString() : String() { - allocated_ = false; -} - -NullTerminatedString::NullTerminatedString(const String& s) : String() { - assign(s); -} - -NullTerminatedString::NullTerminatedString( - const NullTerminatedString& s -) : String() { - allocated_ = false; - String::set_value(s.string(), s.length()); -} - -NullTerminatedString::~NullTerminatedString() { - strfree(); -} - -String& NullTerminatedString::operator =(const String& s) { - strfree(); - assign(s); - return *this; -} - -String& NullTerminatedString::operator =(const char* s) { - strfree(); - allocated_ = false; - String::set_value(s, strlen(s)); - return *this; -} - -bool NullTerminatedString::null_terminated() const { return true; } - -void NullTerminatedString::assign(const String& s) { - if (s.null_terminated()) { - allocated_ = false; - String::set_value(s.string(), s.length()); - } else { - allocated_ = true; - int len = s.length(); - char* ns = new char[len + 1]; - ns[len] = '\0'; - String::set_value(strncpy(ns, s.string(), len), len); - } -} - -void NullTerminatedString::strfree() { - if (allocated_) { - char* s = (char*)(string()); - delete [] s; - allocated_ = false; - } -} diff --git a/src/mac/nrn_notarize.sh b/src/mac/nrn_notarize.sh index 7e58fc9649..d99c8ca1d6 100755 --- a/src/mac/nrn_notarize.sh +++ b/src/mac/nrn_notarize.sh @@ -1,64 +1,72 @@ #!/usr/bin/env bash set -e -# App specific password used to request notarization -password_path="$HOME/.ssh/notarization-password" + +# See https://developer.apple.com/documentation/technotes/tn3147-migrating-to-the-latest-notarization-tool + +# App specific password and credentials used to request notarization +password_path="$HOME/.ssh/apple-notarization-password" +apple_id_path="$HOME/.ssh/apple-id" +team_id_path="$HOME/.ssh/apple-team-id" + if test -f "$password_path" ; then app_specific_password=`cat "$password_path"` else echo "\"$password_path\" does not exist" exit 1 fi +if test -f "$apple_id_path" ; then + apple_id=`cat "$apple_id_path"` +else + echo "\"$apple_id_path\" does not exist" + exit 1 +fi +if test -f "$team_id_path" ; then + team_id=`cat "$team_id_path"` +else + echo "\"$team_id_path\" does not exist" + exit 1 +fi pkg="$1" pkgname="$2" echo "Notarize request" -xcrun altool --notarize-app \ - --primary-bundle-id "edu.yale.neuron.pkg.$pkgname" \ - --username "michael.hines@yale.edu" \ +xcrun notarytool submit \ + --wait \ + --apple-id "$apple_id" \ + --team-id "$team_id" \ --password "$app_specific_password" \ - --file "$pkg" + "$pkg" -# you should get -# 2021-01-31 17:43:58.537 altool[3021:3231297] CFURLRequestSetHTTPCookieStorageAcceptPolicy_block_invoke: no longer implemented and should not be called -# No errors uploading '/Users/michaelhines/neuron/notarize/build/src/mac/build/NEURON.pkg'. -# RequestUUID = dc9dd4dd-a942-475a-ab59-4996f938d606 - -# and eventually get an email in just a few minutes saying that -# Your Mac software has been notarized. +# this should end with something like +#Processing complete +# id: 87c2e5e9-f37f-4a95-9941-8c2205fc90bd +# status: Accepted # At this point the software can be installed. However it is recommended -# xcrun stapler staple "$pkg" +# to staple the pkg file # so that Gatekeeper will be able to find the whitelist in the file itself # without the need to perform an online check. -# However it is unclear how long the wait will be if you do a wait loop over -# xcrun altool --notarization-info $RequestUUID \ -# --username "michael.hines@yale.edu" \ -# --password "$app_specific_password" -# until -# Status: in progress -# changes to -# Status: success -# at which point it is ok to run the stapler. - -# For now, echo a suggestion about waiting for an email and what stapler -# command to execute manually. +xcrun stapler staple "$pkg" -echo " -After getting an email from Apple stating that: - Your Mac software has been notarized. -manually execute +# Lastly, copy the pkg file to its proper name in $HOME - xcrun stapler staple \"$pkg\" - -so that Gatekeeper will be able to find the whitelist in the file itself -without the need to perform an online check. -" +# I've read that it is a good idea to check for warnings by +# fetching the notary log (using the id generated by the above submit) +# And if there are errors, then you certainly need to fetch the log, +# address the issues it shows and try again. +if false ; then + xcrun notarytool log \ + --apple-id "$apple_id" \ + --team-id "$team_id" \ + --password "$app_specific_password" \ + "$id" +fi -# If the notarization fails. E.g. the message from Apple is -# "Your Mac software was not notarized" -# then review the notarization LogFileURL by obtaining the log url with -# xcrun altool --notarization-info $RequestUUID \ -# --username "michael.hines@yale.edu" \ -# --password "$app_specific_password" -# and address the issues it shows and try again. +# To get a history of notarizations... +if false ; then + xcrun notarytool history \ + --apple-id "$apple_id" \ + --team-id "$team_id" \ + --password "$app_specific_password" +fi diff --git a/src/mesch/.clang-format b/src/mesch/.clang-format deleted file mode 100644 index 9d159247d5..0000000000 --- a/src/mesch/.clang-format +++ /dev/null @@ -1,2 +0,0 @@ -DisableFormat: true -SortIncludes: false diff --git a/src/mesch/arnoldi.c b/src/mesch/arnoldi.c deleted file mode 100755 index 2dc6cdfb28..0000000000 --- a/src/mesch/arnoldi.c +++ /dev/null @@ -1,188 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - Arnoldi method for finding eigenvalues of large non-symmetric - matrices -*/ -#include -#include -#include "matrix.h" -#include "matrix2.h" -#include "sparse.h" - -static char rcsid[] = "arnoldi.c,v 1.1 1997/12/04 17:55:13 hines Exp"; - -/* arnoldi -- an implementation of the Arnoldi method */ -MAT *arnoldi(A,A_param,x0,m,h_rem,Q,H) -VEC *(*A)(); -void *A_param; -VEC *x0; -int m; -Real *h_rem; -MAT *Q, *H; -{ - static VEC *v=VNULL, *u=VNULL, *r=VNULL, *s=VNULL, *tmp=VNULL; - int i; - Real h_val; - - if ( ! A || ! Q || ! x0 ) - error(E_NULL,"arnoldi"); - if ( m <= 0 ) - error(E_BOUNDS,"arnoldi"); - if ( Q->n != x0->dim || Q->m != m ) - error(E_SIZES,"arnoldi"); - - m_zero(Q); - H = m_resize(H,m,m); - m_zero(H); - u = v_resize(u,x0->dim); - v = v_resize(v,x0->dim); - r = v_resize(r,m); - s = v_resize(s,m); - tmp = v_resize(tmp,x0->dim); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(v,TYPE_VEC); - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(s,TYPE_VEC); - MEM_STAT_REG(tmp,TYPE_VEC); - sv_mlt(1.0/v_norm2(x0),x0,v); - for ( i = 0; i < m; i++ ) - { - set_row(Q,i,v); - u = (*A)(A_param,v,u); - r = mv_mlt(Q,u,r); - tmp = vm_mlt(Q,r,tmp); - v_sub(u,tmp,u); - h_val = v_norm2(u); - /* if u == 0 then we have an exact subspace */ - if ( h_val == 0.0 ) - { - *h_rem = h_val; - return H; - } - /* iterative refinement -- ensures near orthogonality */ - do { - s = mv_mlt(Q,u,s); - tmp = vm_mlt(Q,s,tmp); - v_sub(u,tmp,u); - v_add(r,s,r); - } while ( v_norm2(s) > 0.1*(h_val = v_norm2(u)) ); - /* now that u is nearly orthogonal to Q, update H */ - set_col(H,i,r); - if ( i == m-1 ) - { - *h_rem = h_val; - continue; - } - /* H->me[i+1][i] = h_val; */ - m_set_val(H,i+1,i,h_val); - sv_mlt(1.0/h_val,u,v); - } - - return H; -} - -/* sp_arnoldi -- uses arnoldi() with an explicit representation of A */ -MAT *sp_arnoldi(A,x0,m,h_rem,Q,H) -SPMAT *A; -VEC *x0; -int m; -Real *h_rem; -MAT *Q, *H; -{ return arnoldi(sp_mv_mlt,A,x0,m,h_rem,Q,H); } - -/* gmres -- generalised minimum residual algorithm of Saad & Schultz - SIAM J. Sci. Stat. Comp. v.7, pp.856--869 (1986) - -- y is overwritten with the solution */ -VEC *gmres(A,A_param,m,Q,R,b,tol,x) -VEC *(*A)(); -void *A_param; -VEC *b, *x; -int m; -MAT *Q, *R; -double tol; -{ - static VEC *v=VNULL, *u=VNULL, *r=VNULL, *tmp=VNULL, *rhs=VNULL; - static VEC *diag=VNULL, *beta=VNULL; - int i; - Real h_val, norm_b; - - if ( ! A || ! Q || ! b || ! R ) - error(E_NULL,"gmres"); - if ( m <= 0 ) - error(E_BOUNDS,"gmres"); - if ( Q->n != b->dim || Q->m != m ) - error(E_SIZES,"gmres"); - - x = v_copy(b,x); - m_zero(Q); - R = m_resize(R,m+1,m); - m_zero(R); - u = v_resize(u,x->dim); - v = v_resize(v,x->dim); - tmp = v_resize(tmp,x->dim); - rhs = v_resize(rhs,m+1); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(v,TYPE_VEC); - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(tmp,TYPE_VEC); - MEM_STAT_REG(rhs,TYPE_VEC); - norm_b = v_norm2(x); - if ( norm_b == 0.0 ) - error(E_RANGE,"gmres"); - sv_mlt(1.0/norm_b,x,v); - - for ( i = 0; i < m; i++ ) - { - set_row(Q,i,v); - tracecatch(u = (*A)(A_param,v,u),"gmres"); - r = mv_mlt(Q,u,r); - tmp = vm_mlt(Q,r,tmp); - v_sub(u,tmp,u); - h_val = v_norm2(u); - set_col(R,i,r); - R->me[i+1][i] = h_val; - sv_mlt(1.0/h_val,u,v); - } - - /* use i x i submatrix of R */ - R = m_resize(R,i+1,i); - rhs = v_resize(rhs,i+1); - v_zero(rhs); - rhs->ve[0] = norm_b; - tmp = v_resize(tmp,i); - diag = v_resize(diag,i+1); - beta = v_resize(beta,i+1); - MEM_STAT_REG(beta,TYPE_VEC); - MEM_STAT_REG(diag,TYPE_VEC); - QRfactor(R,diag /* ,beta */); - tmp = QRsolve(R,diag, /* beta, */ rhs,tmp); - v_resize(tmp,m); - vm_mlt(Q,tmp,x); - - return x; -} diff --git a/src/mesch/bdfactor.c b/src/mesch/bdfactor.c deleted file mode 100755 index b24090052d..0000000000 --- a/src/mesch/bdfactor.c +++ /dev/null @@ -1,655 +0,0 @@ -#include <../../nrnconf.h> - - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Band matrix factorisation routines - */ - -/* bdfactor.c 18/11/93 */ -static char rcsid[] = "$Id: "; - -#include -#include "matrix2.h" -#include - - -/* generate band matrix - for a matrix with n columns, - lb subdiagonals and ub superdiagonals; - - Way of saving a band of a matrix: - first we save subdiagonals (from 0 to lb-1); - then main diagonal (in the lb row) - and then superdiagonals (from lb+1 to lb+ub) - in such a way that the elements which were previously - in one column are now also in one column -*/ - -BAND *bd_get(lb,ub,n) -int lb, ub, n; -{ - BAND *A; - - if (lb < 0 || ub < 0 || n <= 0) - error(E_NEG,"bd_get"); - - if ((A = NEW(BAND)) == (BAND *)NULL) - error(E_MEM,"bd_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_BAND,0,sizeof(BAND)); - mem_numvar(TYPE_BAND,1); - } - - lb = A->lb = min(n-1,lb); - ub = A->ub = min(n-1,ub); - A->mat = m_get(lb+ub+1,n); - return A; -} - -int bd_free(A) -BAND *A; -{ - if ( A == (BAND *)NULL || A->lb < 0 || A->ub < 0 ) - /* don't trust it */ - return (-1); - - if (A->mat) m_free(A->mat); - - if (mem_info_is_on()) { - mem_bytes(TYPE_BAND,sizeof(BAND),0); - mem_numvar(TYPE_BAND,-1); - } - - free((char *)A); - return 0; -} - - -/* resize band matrix */ - -BAND *bd_resize(A,new_lb,new_ub,new_n) -BAND *A; -int new_lb,new_ub,new_n; -{ - int lb,ub,i,j,l,shift,umin; - Real **Av; - - if (new_lb < 0 || new_ub < 0 || new_n <= 0) - error(E_NEG,"bd_resize"); - if ( ! A ) - return bd_get(new_lb,new_ub,new_n); - if ( A->lb+A->ub+1 > A->mat->m ) - error(E_INTERN,"bd_resize"); - - if ( A->lb == new_lb && A->ub == new_ub && A->mat->n == new_n ) - return A; - - lb = A->lb; - ub = A->ub; - Av = A->mat->me; - umin = min(ub,new_ub); - - /* ensure that unused triangles at edges are zero'd */ - - for ( i = 0; i < lb; i++ ) - for ( j = A->mat->n - lb + i; j < A->mat->n; j++ ) - Av[i][j] = 0.0; - for ( i = lb+1,l=1; l <= umin; i++,l++ ) - for ( j = 0; j < l; j++ ) - Av[i][j] = 0.0; - - new_lb = A->lb = min(new_lb,new_n-1); - new_ub = A->ub = min(new_ub,new_n-1); - A->mat = m_resize(A->mat,new_lb+new_ub+1,new_n); - Av = A->mat->me; - - /* if new_lb != lb then move the rows to get the main diag - in the new_lb row */ - - if (new_lb > lb) { - shift = new_lb-lb; - - for (i=lb+umin, l=i+shift; i >= 0; i--,l--) - MEM_COPY(Av[i],Av[l],new_n*sizeof(Real)); - for (l=shift-1; l >= 0; l--) - __zero__(Av[l],new_n); - } - else if (new_lb < lb) { - shift = lb - new_lb; - - for (i=shift, l=0; i <= lb+umin; i++,l++) - MEM_COPY(Av[i],Av[l],new_n*sizeof(Real)); - for (i=lb+umin+1; i <= new_lb+new_ub; i++) - __zero__(Av[i],new_n); - } - - return A; -} - - - -BAND *bd_copy(A,B) -BAND *A,*B; -{ - int lb,ub,i,j,n; - - if ( !A ) - error(E_NULL,"bd_copy"); - - if (A == B) return B; - - n = A->mat->n; - if ( !B ) - B = bd_get(A->lb,A->ub,n); - else if (B->lb != A->lb || B->ub != A->ub || B->mat->n != n ) - B = bd_resize(B,A->lb,A->ub,n); - - if (A->mat == B->mat) return B; - ub = B->ub = A->ub; - lb = B->lb = A->lb; - - for ( i=0, j=n-lb; i <= lb; i++, j++ ) - MEM_COPY(A->mat->me[i],B->mat->me[i],j*sizeof(Real)); - - for ( i=lb+1, j=1; i <= lb+ub; i++, j++ ) - MEM_COPY(A->mat->me[i]+j,B->mat->me[i]+j,(n - j)*sizeof(Real)); - - return B; -} - - -/* copy band matrix to a square matrix */ -MAT *band2mat(bA,A) -BAND *bA; -MAT *A; -{ - int i,j,l,n,n1; - int lb, ub; - Real **bmat; - - if ( !bA || !A) - error(E_NULL,"band2mat"); - if ( bA->mat == A ) - error(E_INSITU,"band2mat"); - - ub = bA->ub; - lb = bA->lb; - n = bA->mat->n; - n1 = n-1; - bmat = bA->mat->me; - - A = m_resize(A,n,n); - m_zero(A); - - for (j=0; j < n; j++) - for (i=min(n1,j+lb),l=lb+j-i; i >= max(0,j-ub); i--,l++) - A->me[i][j] = bmat[l][j]; - - return A; -} - -/* copy a square matrix to a band matrix with - lb subdiagonals and ub superdiagonals */ -BAND *mat2band(A,lb,ub,bA) -BAND *bA; -MAT *A; -int lb, ub; -{ - int i, j, l, n1; - Real **bmat; - - if (! A || ! bA) - error(E_NULL,"mat2band"); - if (ub < 0 || lb < 0) - error(E_SIZES,"mat2band"); - if (bA->mat == A) - error(E_INSITU,"mat2band"); - - n1 = A->n-1; - lb = min(n1,lb); - ub = min(n1,ub); - bA = bd_resize(bA,lb,ub,n1+1); - bmat = bA->mat->me; - - for (j=0; j <= n1; j++) - for (i=min(n1,j+lb),l=lb+j-i; i >= max(0,j-ub); i--,l++) - bmat[l][j] = A->me[i][j]; - - return bA; -} - - - -/* transposition of matrix in; - out - matrix after transposition; - can be done in situ -*/ - -BAND *bd_transp(in,out) -BAND *in, *out; -{ - int i, j, jj, l, k, lb, ub, lub, n, n1; - int in_situ; - Real **in_v, **out_v; - - if ( in == (BAND *)NULL || in->mat == (MAT *)NULL ) - error(E_NULL,"bd_transp"); - - lb = in->lb; - ub = in->ub; - lub = lb+ub; - n = in->mat->n; - n1 = n-1; - - in_situ = ( in == out ); - if ( ! in_situ ) - out = bd_resize(out,ub,lb,n); - else - { /* only need to swap lb and ub fields */ - out->lb = ub; - out->ub = lb; - } - - in_v = in->mat->me; - - if (! in_situ) { - int sh_in,sh_out; - - out_v = out->mat->me; - for (i=0, l=lub, k=lb-i; i <= lub; i++,l--,k--) { - sh_in = max(-k,0); - sh_out = max(k,0); - MEM_COPY(&(in_v[i][sh_in]),&(out_v[l][sh_out]), - (n-sh_in-sh_out)*sizeof(Real)); - /********************************** - for (j=n1-sh_out, jj=n1-sh_in; j >= sh_in; j--,jj--) { - out_v[l][jj] = in_v[i][j]; - } - **********************************/ - } - } - else if (ub == lb) { - Real tmp; - - for (i=0, l=lub, k=lb-i; i < lb; i++,l--,k--) { - for (j=n1-k, jj=n1; j >= 0; j--,jj--) { - tmp = in_v[l][jj]; - in_v[l][jj] = in_v[i][j]; - in_v[i][j] = tmp; - } - } - } - else if (ub > lb) { /* hence i-ub <= 0 & l-lb >= 0 */ - int p,pp,lbi; - - for (i=0, l=lub; i < (lub+1)/2; i++,l--) { - lbi = lb-i; - for (j=l-lb, jj=0, p=max(-lbi,0), pp = max(l-ub,0); j <= n1; - j++,jj++,p++,pp++) { - in_v[l][pp] = in_v[i][p]; - in_v[i][jj] = in_v[l][j]; - } - for ( ; p <= n1-max(lbi,0); p++,pp++) - in_v[l][pp] = in_v[i][p]; - } - - if (lub%2 == 0) { /* shift only */ - i = lub/2; - for (j=max(i-lb,0), jj=0; jj <= n1-ub+i; j++,jj++) - in_v[i][jj] = in_v[i][j]; - } - } - else { /* ub < lb, hence ub-l <= 0 & lb-i >= 0 */ - int p,pp,ubi; - - for (i=0, l=lub; i < (lub+1)/2; i++,l--) { - ubi = i-ub; - for (j=n1-max(lb-l,0), jj=n1-max(-ubi,0), p=n1-lb+i, pp=n1; - p >= 0; j--, jj--, pp--, p--) { - in_v[i][jj] = in_v[l][j]; - in_v[l][pp] = in_v[i][p]; - } - for ( ; jj >= max(ubi,0); j--, jj--) - in_v[i][jj] = in_v[l][j]; - } - - if (lub%2 == 0) { /* shift only */ - i = lub/2; - for (j=n1-lb+i, jj=n1-max(ub-i,0); j >= 0; j--, jj--) - in_v[i][jj] = in_v[i][j]; - } - } - - return out; -} - - - -/* bdLUfactor -- gaussian elimination with partial pivoting - -- on entry, the matrix A in band storage with elements - in rows 0 to lb+ub; - The jth column of A is stored in the jth column of - band A (bA) as follows: - bA->mat->me[lb+j-i][j] = A->me[i][j] for - max(0,j-lb) <= i <= min(A->n-1,j+ub); - -- on exit: U is stored as an upper triangular matrix - with lb+ub superdiagonals in rows lb to 2*lb+ub, - and the matrix L is stored in rows 0 to lb-1. - Matrix U is permuted, whereas L is not permuted !!! - Therefore we save some memory. - */ -BAND *bdLUfactor(bA,pivot) -BAND *bA; -PERM *pivot; -{ - int i, j, k, l, n, n1, lb, ub, lub, k_end, k_lub; - int i_max, shift; - Real **bA_v; - Real max1, temp; - - if ( bA==(BAND *)NULL || pivot==(PERM *)NULL ) - error(E_NULL,"bdLUfactor"); - - lb = bA->lb; - ub = bA->ub; - lub = lb+ub; - n = bA->mat->n; - n1 = n-1; - lub = lb+ub; - - if ( pivot->size != n ) - error(E_SIZES,"bdLUfactor"); - - - /* initialise pivot with identity permutation */ - for ( i=0; i < n; i++ ) - pivot->pe[i] = i; - - /* extend band matrix */ - /* extended part is filled with zeros */ - bA = bd_resize(bA,lb,min(n1,lub),n); - bA_v = bA->mat->me; - - - /* main loop */ - - for ( k=0; k < n1; k++ ) - { - k_end = max(0,lb+k-n1); - k_lub = min(k+lub,n1); - - /* find the best pivot row */ - - max1 = 0.0; - i_max = -1; - for ( i=lb; i >= k_end; i-- ) { - temp = fabs(bA_v[i][k]); - if ( temp > max1 ) - { max1 = temp; i_max = i; } - } - - /* if no pivot then ignore column k... */ - if ( i_max == -1 ) - continue; - - /* do we pivot ? */ - if ( i_max != lb ) /* yes we do... */ - { - /* save transposition using non-shifted indices */ - shift = lb-i_max; - px_transp(pivot,k+shift,k); - for ( i=lb, j=k; j <= k_lub; i++,j++ ) - { - temp = bA_v[i][j]; - bA_v[i][j] = bA_v[i-shift][j]; - bA_v[i-shift][j] = temp; - } - } - - /* row operations */ - for ( i=lb-1; i >= k_end; i-- ) { - temp = bA_v[i][k] /= bA_v[lb][k]; - shift = lb-i; - for ( j=k+1,l=i+1; j <= k_lub; l++,j++ ) - bA_v[l][j] -= temp*bA_v[l+shift][j]; - } - } - - return bA; -} - - -/* bdLUsolve -- given an LU factorisation in bA, solve bA*x=b */ -/* pivot is changed upon return */ -VEC *bdLUsolve(bA,pivot,b,x) -BAND *bA; -PERM *pivot; -VEC *b,*x; -{ - int i,j,l,n,n1,pi,lb,ub,jmin, maxj; - Real c; - Real **bA_v; - - if ( bA==(BAND *)NULL || b==(VEC *)NULL || pivot==(PERM *)NULL ) - error(E_NULL,"bdLUsolve"); - if ( bA->mat->n != b->dim || bA->mat->n != pivot->size) - error(E_SIZES,"bdLUsolve"); - - lb = bA->lb; - ub = bA->ub; - n = b->dim; - n1 = n-1; - bA_v = bA->mat->me; - - x = v_resize(x,b->dim); - px_vec(pivot,b,x); - - /* solve Lx = b; implicit diagonal = 1 - L is not permuted, therefore it must be permuted now - */ - - px_inv(pivot,pivot); - for (j=0; j < n; j++) { - jmin = j+1; - c = x->ve[j]; - maxj = max(0,j+lb-n1); - for (i=jmin,l=lb-1; l >= maxj; i++,l--) { - if ( (pi = pivot->pe[i]) < jmin) - pi = pivot->pe[i] = pivot->pe[pi]; - x->ve[pi] -= bA_v[l][j]*c; - } - } - - /* solve Ux = b; explicit diagonal */ - - x->ve[n1] /= bA_v[lb][n1]; - for (i=n-2; i >= 0; i--) { - c = x->ve[i]; - for (j=min(n1,i+ub), l=lb+j-i; j > i; j--,l--) - c -= bA_v[l][j]*x->ve[j]; - x->ve[i] = c/bA_v[lb][i]; - } - - return (x); -} - -/* LDLfactor -- L.D.L' factorisation of A in-situ; - A is a band matrix - it works using only lower bandwidth & main diagonal - so it is possible to set A->ub = 0 - */ - -BAND *bdLDLfactor(A) -BAND *A; -{ - int i,j,k,n,n1,lb,ki,jk,ji,lbkm,lbkp; - Real **Av; - Real c, cc; - - if ( ! A ) - error(E_NULL,"bdLDLfactor"); - - if (A->lb == 0) return A; - - lb = A->lb; - n = A->mat->n; - n1 = n-1; - Av = A->mat->me; - - for (k=0; k < n; k++) { - lbkm = lb-k; - lbkp = lb+k; - - /* matrix D */ - c = Av[lb][k]; - for (j=max(0,-lbkm), jk=lbkm+j; j < k; j++, jk++) { - cc = Av[jk][j]; - c -= Av[lb][j]*cc*cc; - } - if (c == 0.0) - error(E_SING,"bdLDLfactor"); - Av[lb][k] = c; - - /* matrix L */ - - for (i=min(n1,lbkp), ki=lbkp-i; i > k; i--,ki++) { - c = Av[ki][k]; - for (j=max(0,i-lb), ji=lb+j-i, jk=lbkm+j; j < k; - j++, ji++, jk++) - c -= Av[lb][j]*Av[ji][j]*Av[jk][j]; - Av[ki][k] = c/Av[lb][k]; - } - } - - return A; -} - -/* solve A*x = b, where A is factorized by - Choleski LDL^T factorization */ -VEC *bdLDLsolve(A,b,x) -BAND *A; -VEC *b, *x; -{ - int i,j,l,n,n1,lb,ilb; - Real **Av, *Avlb; - Real c; - - if ( ! A || ! b ) - error(E_NULL,"bdLDLsolve"); - if ( A->mat->n != b->dim ) - error(E_SIZES,"bdLDLsolve"); - - n = A->mat->n; - n1 = n-1; - x = v_resize(x,n); - lb = A->lb; - Av = A->mat->me; - Avlb = Av[lb]; - - /* solve L*y = b */ - x->ve[0] = b->ve[0]; - for (i=1; i < n; i++) { - ilb = i-lb; - c = b->ve[i]; - for (j=max(0,ilb), l=j-ilb; j < i; j++,l++) - c -= Av[l][j]*x->ve[j]; - x->ve[i] = c; - } - - /* solve D*z = y */ - for (i=0; i < n; i++) - x->ve[i] /= Avlb[i]; - - /* solve L^T*x = z */ - for (i=n-2; i >= 0; i--) { - ilb = i+lb; - c = x->ve[i]; - for (j=min(n1,ilb), l=ilb-j; j > i; j--,l++) - c -= Av[l][i]*x->ve[j]; - x->ve[i] = c; - } - - return x; -} - - -/* ****************************************************** - This function is a contribution from Ruediger Franke. - His e-mail addres is: Ruediger.Franke@rz.tu-ilmenau.de - - ****************************************************** -*/ - -/* bd_mv_mlt -- - * computes out = A * x - * may not work in situ (x != out) - */ - -VEC *bd_mv_mlt(A, x, out) -BAND *A; -VEC *x, *out; -{ - int i, j, j_end, k; - int start_idx, end_idx; - int n, m, lb, ub; - Real **A_me; - Real *x_ve; - Real sum; - - if (!A || !x) - error(E_NULL,"bd_mv_mlt"); - if (x->dim != A->mat->n) - error(E_SIZES,"bd_mv_mlt"); - if (!out || out->dim != A->mat->n) - out = v_resize(out, A->mat->n); - if (out == x) - error(E_INSITU,"bd_mv_mlt"); - - n = A->mat->n; - m = A->mat->m; - lb = A->lb; - ub = A->ub; - A_me = A->mat->me; - start_idx = lb; - end_idx = m + n-1 - ub; - for (i=0; ive + k; - sum = 0.0; - for (; j < j_end; j++, k++) - sum += A_me[j][k] * *x_ve++; - out->ve[i] = sum; - } - - return out; -} - - - diff --git a/src/mesch/bkpfacto.c b/src/mesch/bkpfacto.c deleted file mode 100755 index b31d72dd9b..0000000000 --- a/src/mesch/bkpfacto.c +++ /dev/null @@ -1,312 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. -*/ - -static char rcsid[] = "bkpfacto.c,v 1.1 1997/12/04 17:55:14 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - -#define btos(x) ((x) ? "TRUE" : "FALSE") - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -#define alpha 0.6403882032022076 /* = (1+sqrt(17))/8 */ - -/* sqr -- returns square of x -- utility function */ -double sqr(x) -double x; -{ return x*x; } - -/* interchange -- a row/column swap routine */ -static void interchange(A,i,j) -MAT *A; /* assumed != NULL & also SQUARE */ -int i, j; /* assumed in range */ -{ - Real **A_me, tmp; - int k, n; - - A_me = A->me; n = A->n; - if ( i == j ) - return; - if ( i > j ) - { k = i; i = j; j = k; } - for ( k = 0; k < i; k++ ) - { - /* tmp = A_me[k][i]; */ - tmp = m_entry(A,k,i); - /* A_me[k][i] = A_me[k][j]; */ - m_set_val(A,k,i,m_entry(A,k,j)); - /* A_me[k][j] = tmp; */ - m_set_val(A,k,j,tmp); - } - for ( k = j+1; k < n; k++ ) - { - /* tmp = A_me[j][k]; */ - tmp = m_entry(A,j,k); - /* A_me[j][k] = A_me[i][k]; */ - m_set_val(A,j,k,m_entry(A,i,k)); - /* A_me[i][k] = tmp; */ - m_set_val(A,i,k,tmp); - } - for ( k = i+1; k < j; k++ ) - { - /* tmp = A_me[k][j]; */ - tmp = m_entry(A,k,j); - /* A_me[k][j] = A_me[i][k]; */ - m_set_val(A,k,j,m_entry(A,i,k)); - /* A_me[i][k] = tmp; */ - m_set_val(A,i,k,tmp); - } - /* tmp = A_me[i][i]; */ - tmp = m_entry(A,i,i); - /* A_me[i][i] = A_me[j][j]; */ - m_set_val(A,i,i,m_entry(A,j,j)); - /* A_me[j][j] = tmp; */ - m_set_val(A,j,j,tmp); -} - -/* BKPfactor -- Bunch-Kaufman-Parlett factorisation of A in-situ - -- A is factored into the form P'AP = MDM' where - P is a permutation matrix, M lower triangular and D is block - diagonal with blocks of size 1 or 2 - -- P is stored in pivot; blocks[i]==i iff D[i][i] is a block */ -MAT *BKPfactor(A,pivot,blocks) -MAT *A; -PERM *pivot, *blocks; -{ - int i, j, k, n, onebyone, r; - Real **A_me, aii, aip1, aip1i, lambda, sigma, tmp; - Real det, s, t; - - if ( ! A || ! pivot || ! blocks ) - error(E_NULL,"BKPfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"BKPfactor"); - if ( A->m != pivot->size || pivot->size != blocks->size ) - error(E_SIZES,"BKPfactor"); - - n = A->n; - A_me = A->me; - px_ident(pivot); px_ident(blocks); - - for ( i = 0; i < n; i = onebyone ? i+1 : i+2 ) - { - /* printf("# Stage: %d\n",i); */ - aii = fabs(m_entry(A,i,i)); - lambda = 0.0; r = (i+1 < n) ? i+1 : i; - for ( k = i+1; k < n; k++ ) - { - tmp = fabs(m_entry(A,i,k)); - if ( tmp >= lambda ) - { - lambda = tmp; - r = k; - } - } - /* printf("# lambda = %g, r = %d\n", lambda, r); */ - /* printf("# |A[%d][%d]| = %g\n",r,r,fabs(m_entry(A,r,r))); */ - - /* determine if 1x1 or 2x2 block, and do pivoting if needed */ - if ( aii >= alpha*lambda ) - { - onebyone = TRUE; - goto dopivot; - } - /* compute sigma */ - sigma = 0.0; - for ( k = i; k < n; k++ ) - { - if ( k == r ) - continue; - tmp = ( k > r ) ? fabs(m_entry(A,r,k)) : - fabs(m_entry(A,k,r)); - if ( tmp > sigma ) - sigma = tmp; - } - if ( aii*sigma >= alpha*sqr(lambda) ) - onebyone = TRUE; - else if ( fabs(m_entry(A,r,r)) >= alpha*sigma ) - { - /* printf("# Swapping rows/cols %d and %d\n",i,r); */ - interchange(A,i,r); - px_transp(pivot,i,r); - onebyone = TRUE; - } - else - { - /* printf("# Swapping rows/cols %d and %d\n",i+1,r); */ - interchange(A,i+1,r); - px_transp(pivot,i+1,r); - px_transp(blocks,i,i+1); - onebyone = FALSE; - } - /* printf("onebyone = %s\n",btos(onebyone)); */ - /* printf("# Matrix so far (@checkpoint A) =\n"); */ - /* m_output(A); */ - /* printf("# pivot =\n"); px_output(pivot); */ - /* printf("# blocks =\n"); px_output(blocks); */ - -dopivot: - if ( onebyone ) - { /* do one by one block */ - if ( m_entry(A,i,i) != 0.0 ) - { - aii = m_entry(A,i,i); - for ( j = i+1; j < n; j++ ) - { - tmp = m_entry(A,i,j)/aii; - for ( k = j; k < n; k++ ) - m_sub_val(A,j,k,tmp*m_entry(A,i,k)); - m_set_val(A,i,j,tmp); - } - } - } - else /* onebyone == FALSE */ - { /* do two by two block */ - det = m_entry(A,i,i)*m_entry(A,i+1,i+1)-sqr(m_entry(A,i,i+1)); - /* Must have det < 0 */ - /* printf("# det = %g\n",det); */ - aip1i = m_entry(A,i,i+1)/det; - aii = m_entry(A,i,i)/det; - aip1 = m_entry(A,i+1,i+1)/det; - for ( j = i+2; j < n; j++ ) - { - s = - aip1i*m_entry(A,i+1,j) + aip1*m_entry(A,i,j); - t = - aip1i*m_entry(A,i,j) + aii*m_entry(A,i+1,j); - for ( k = j; k < n; k++ ) - m_sub_val(A,j,k,m_entry(A,i,k)*s + m_entry(A,i+1,k)*t); - m_set_val(A,i,j,s); - m_set_val(A,i+1,j,t); - } - } - /* printf("# Matrix so far (@checkpoint B) =\n"); */ - /* m_output(A); */ - /* printf("# pivot =\n"); px_output(pivot); */ - /* printf("# blocks =\n"); px_output(blocks); */ - } - - /* set lower triangular half */ - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < i; j++ ) - m_set_val(A,i,j,m_entry(A,j,i)); - - return A; -} - -/* BKPsolve -- solves A.x = b where A has been factored a la BKPfactor() - -- returns x, which is created if NULL */ -VEC *BKPsolve(A,pivot,block,b,x) -MAT *A; -PERM *pivot, *block; -VEC *b, *x; -{ - static VEC *tmp=VNULL; /* dummy storage needed */ - int i, j, n, onebyone; - Real **A_me, a11, a12, a22, b1, b2, det, sum, *tmp_ve, tmp_diag; - - if ( ! A || ! pivot || ! block || ! b ) - error(E_NULL,"BKPsolve"); - if ( A->m != A->n ) - error(E_SQUARE,"BKPsolve"); - n = A->n; - if ( b->dim != n || pivot->size != n || block->size != n ) - error(E_SIZES,"BKPsolve"); - x = v_resize(x,n); - tmp = v_resize(tmp,n); - MEM_STAT_REG(tmp,TYPE_VEC); - - A_me = A->me; tmp_ve = tmp->ve; - - px_vec(pivot,b,tmp); - /* solve for lower triangular part */ - for ( i = 0; i < n; i++ ) - { - sum = v_entry(tmp,i); - if ( block->pe[i] < i ) - for ( j = 0; j < i-1; j++ ) - sum -= m_entry(A,i,j)*v_entry(tmp,j); - else - for ( j = 0; j < i; j++ ) - sum -= m_entry(A,i,j)*v_entry(tmp,j); - v_set_val(tmp,i,sum); - } - /* printf("# BKPsolve: solving L part: tmp =\n"); v_output(tmp); */ - /* solve for diagonal part */ - for ( i = 0; i < n; i = onebyone ? i+1 : i+2 ) - { - onebyone = ( block->pe[i] == i ); - if ( onebyone ) - { - tmp_diag = m_entry(A,i,i); - if ( tmp_diag == 0.0 ) - error(E_SING,"BKPsolve"); - /* tmp_ve[i] /= tmp_diag; */ - v_set_val(tmp,i,v_entry(tmp,i) / tmp_diag); - } - else - { - a11 = m_entry(A,i,i); - a22 = m_entry(A,i+1,i+1); - a12 = m_entry(A,i+1,i); - b1 = v_entry(tmp,i); b2 = v_entry(tmp,i+1); - det = a11*a22-a12*a12; /* < 0 : see BKPfactor() */ - if ( det == 0.0 ) - error(E_SING,"BKPsolve"); - det = 1/det; - v_set_val(tmp,i,det*(a22*b1-a12*b2)); - v_set_val(tmp,i+1,det*(a11*b2-a12*b1)); - } - } - /* printf("# BKPsolve: solving D part: tmp =\n"); v_output(tmp); */ - /* solve for transpose of lower traingular part */ - for ( i = n-1; i >= 0; i-- ) - { /* use symmetry of factored form to get stride 1 */ - sum = v_entry(tmp,i); - if ( block->pe[i] > i ) - for ( j = i+2; j < n; j++ ) - sum -= m_entry(A,i,j)*v_entry(tmp,j); - else - for ( j = i+1; j < n; j++ ) - sum -= m_entry(A,i,j)*v_entry(tmp,j); - v_set_val(tmp,i,sum); - } - - /* printf("# BKPsolve: solving L^T part: tmp =\n");v_output(tmp); */ - /* and do final permutation */ - x = pxinv_vec(pivot,tmp,x); - - return x; -} - - - diff --git a/src/mesch/chfactor.c b/src/mesch/chfactor.c deleted file mode 100755 index dd83297155..0000000000 --- a/src/mesch/chfactor.c +++ /dev/null @@ -1,218 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. -*/ - -/* CHfactor.c 1.2 11/25/87 */ -static char rcsid[] = "chfactor.c,v 1.1 1997/12/04 17:55:15 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* CHfactor -- Cholesky L.L' factorisation of A in-situ */ -MAT *CHfactor(A) -MAT *A; -{ - u_int i, j, k, n; - Real **A_ent, *A_piv, *A_row, sum, tmp; - - if ( A==(MAT *)NULL ) - error(E_NULL,"CHfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"CHfactor"); - n = A->n; A_ent = A->me; - - for ( k=0; km != A->n || A->n != b->dim ) - error(E_SIZES,"CHsolve"); - x = v_resize(x,b->dim); - Lsolve(A,b,x,0.0); - Usolve(A,x,x,0.0); - - return (x); -} - -/* LDLfactor -- L.D.L' factorisation of A in-situ */ -MAT *LDLfactor(A) -MAT *A; -{ - u_int i, k, n, p; - Real **A_ent; - Real d, sum; - static VEC *r = VNULL; - - if ( ! A ) - error(E_NULL,"LDLfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"LDLfactor"); - n = A->n; A_ent = A->me; - r = v_resize(r,n); - MEM_STAT_REG(r,TYPE_VEC); - - for ( k = 0; k < n; k++ ) - { - sum = 0.0; - for ( p = 0; p < k; p++ ) - { - r->ve[p] = A_ent[p][p]*A_ent[k][p]; - sum += r->ve[p]*A_ent[k][p]; - } - d = A_ent[k][k] -= sum; - - if ( d == 0.0 ) - error(E_SING,"LDLfactor"); - for ( i = k+1; i < n; i++ ) - { - sum = __ip__(A_ent[i],r->ve,(int)k); - /**************************************** - sum = 0.0; - for ( p = 0; p < k; p++ ) - sum += A_ent[i][p]*r->ve[p]; - ****************************************/ - A_ent[i][k] = (A_ent[i][k] - sum)/d; - } - } - - return A; -} - -VEC *LDLsolve(LDL,b,x) -MAT *LDL; -VEC *b, *x; -{ - if ( ! LDL || ! b ) - error(E_NULL,"LDLsolve"); - if ( LDL->m != LDL->n ) - error(E_SQUARE,"LDLsolve"); - if ( LDL->m != b->dim ) - error(E_SIZES,"LDLsolve"); - x = v_resize(x,b->dim); - - Lsolve(LDL,b,x,1.0); - Dsolve(LDL,x,x); - LTsolve(LDL,x,x,1.0); - - return x; -} - -/* MCHfactor -- Modified Cholesky L.L' factorisation of A in-situ */ -MAT *MCHfactor(A,tol) -MAT *A; -double tol; -{ - u_int i, j, k, n; - Real **A_ent, *A_piv, *A_row, sum, tmp; - - if ( A==(MAT *)NULL ) - error(E_NULL,"MCHfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"MCHfactor"); - if ( tol <= 0.0 ) - error(E_RANGE,"MCHfactor"); - n = A->n; A_ent = A->me; - - for ( k=0; k - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Conjugate gradient routines file - Uses sparse matrix input & sparse Cholesky factorisation in pccg(). - - All the following routines use routines to define a matrix - rather than use any explicit representation - (with the exeception of the pccg() pre-conditioner) - The matrix A is defined by - - VEC *(*A)(void *params, VEC *x, VEC *y) - - where y = A.x on exit, and y is returned. The params argument is - intended to make it easier to re-use & modify such routines. - - If we have a sparse matrix data structure - SPMAT *A_mat; - then these can be used by passing sp_mv_mlt as the function, and - A_mat as the param. -*/ - -#include -#include -#include "matrix.h" -#include "sparse.h" -static char rcsid[] = "conjgrad.c,v 1.1 1997/12/04 17:55:16 hines Exp"; - - -/* #define MAX_ITER 10000 */ -static int max_iter = 10000; -int cg_num_iters; - -/* matrix-as-routine type definition */ -/* #ifdef ANSI_C */ -/* typedef VEC *(*MTX_FN)(void *params, VEC *x, VEC *out); */ -/* #else */ -typedef VEC *(*MTX_FN)(); -/* #endif */ -#ifdef ANSI_C -VEC *spCHsolve(SPMAT *,VEC *,VEC *); -#else -VEC *spCHsolve(); -#endif - -/* cg_set_maxiter -- sets maximum number of iterations if numiter > 1 - -- just returns current max_iter otherwise - -- returns old maximum */ -int cg_set_maxiter(numiter) -int numiter; -{ - int temp; - - if ( numiter < 2 ) - return max_iter; - temp = max_iter; - max_iter = numiter; - return temp; -} - - -/* pccg -- solves A.x = b using pre-conditioner M - (assumed factored a la spCHfctr()) - -- results are stored in x (if x != NULL), which is returned */ -VEC *pccg(A,A_params,M_inv,M_params,b,eps,x) -MTX_FN A, M_inv; -VEC *b, *x; -double eps; -void *A_params, *M_params; -{ - VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL; - int k; - Real alpha, beta, ip, old_ip, norm_b; - - if ( ! A || ! b ) - error(E_NULL,"pccg"); - if ( x == b ) - error(E_INSITU,"pccg"); - x = v_resize(x,b->dim); - if ( eps <= 0.0 ) - eps = MACHEPS; - - r = v_get(b->dim); - p = v_get(b->dim); - q = v_get(b->dim); - z = v_get(b->dim); - - norm_b = v_norm2(b); - - v_zero(x); - r = v_copy(b,r); - old_ip = 0.0; - for ( k = 0; ; k++ ) - { - if ( v_norm2(r) < eps*norm_b ) - break; - if ( k > max_iter ) - error(E_ITER,"pccg"); - if ( M_inv ) - (*M_inv)(M_params,r,z); - else - v_copy(r,z); /* M == identity */ - ip = in_prod(z,r); - if ( k ) /* if ( k > 0 ) ... */ - { - beta = ip/old_ip; - p = v_mltadd(z,p,beta,p); - } - else /* if ( k == 0 ) ... */ - { - beta = 0.0; - p = v_copy(z,p); - old_ip = 0.0; - } - q = (*A)(A_params,p,q); - alpha = ip/in_prod(p,q); - x = v_mltadd(x,p,alpha,x); - r = v_mltadd(r,q,-alpha,r); - old_ip = ip; - } - cg_num_iters = k; - - V_FREE(p); - V_FREE(q); - V_FREE(r); - V_FREE(z); - - return x; -} - -/* sp_pccg -- a simple interface to pccg() which uses sparse matrix - data structures - -- assumes that LLT contains the Cholesky factorisation of the - actual pre-conditioner */ -VEC *sp_pccg(A,LLT,b,eps,x) -SPMAT *A, *LLT; -VEC *b, *x; -double eps; -{ return pccg(sp_mv_mlt,A,spCHsolve,LLT,b,eps,x); } - - -/* - Routines for performing the CGS (Conjugate Gradient Squared) - algorithm of P. Sonneveld: - "CGS, a fast Lanczos-type solver for nonsymmetric linear - systems", SIAM J. Sci. & Stat. Comp. v. 10, pp. 36--52 -*/ - -/* cgs -- uses CGS to compute a solution x to A.x=b - -- the matrix A is not passed explicitly, rather a routine - A is passed where A(x,Ax,params) computes - Ax = A.x - -- the computed solution is passed */ -VEC *cgs(A,A_params,b,r0,tol,x) -MTX_FN A; -VEC *x, *b; -VEC *r0; /* tilde r0 parameter -- should be random??? */ -double tol; /* error tolerance used */ -void *A_params; -{ - VEC *p, *q, *r, *u, *v, *tmp1, *tmp2; - Real alpha, beta, norm_b, rho, old_rho, sigma; - int iter; - - if ( ! A || ! x || ! b || ! r0 ) - error(E_NULL,"cgs"); - if ( x->dim != b->dim || r0->dim != x->dim ) - error(E_SIZES,"cgs"); - if ( tol <= 0.0 ) - tol = MACHEPS; - - p = v_get(x->dim); - q = v_get(x->dim); - r = v_get(x->dim); - u = v_get(x->dim); - v = v_get(x->dim); - tmp1 = v_get(x->dim); - tmp2 = v_get(x->dim); - - norm_b = v_norm2(b); - (*A)(A_params,x,tmp1); - v_sub(b,tmp1,r); - v_zero(p); v_zero(q); - old_rho = 1.0; - - iter = 0; - while ( v_norm2(r) > tol*norm_b ) - { - if ( ++iter > max_iter ) break; - /* error(E_ITER,"cgs"); */ - rho = in_prod(r0,r); - if ( old_rho == 0.0 ) - error(E_SING,"cgs"); - beta = rho/old_rho; - v_mltadd(r,q,beta,u); - v_mltadd(q,p,beta,tmp1); - v_mltadd(u,tmp1,beta,p); - - (*A)(A_params,p,v); - - sigma = in_prod(r0,v); - if ( sigma == 0.0 ) - error(E_SING,"cgs"); - alpha = rho/sigma; - v_mltadd(u,v,-alpha,q); - v_add(u,q,tmp1); - - (*A)(A_params,tmp1,tmp2); - - v_mltadd(r,tmp2,-alpha,r); - v_mltadd(x,tmp1,alpha,x); - - old_rho = rho; - } - cg_num_iters = iter; - - V_FREE(p); V_FREE(q); V_FREE(r); - V_FREE(u); V_FREE(v); - V_FREE(tmp1); V_FREE(tmp2); - - return x; -} - -/* sp_cgs -- simple interface for SPMAT data structures */ -VEC *sp_cgs(A,b,r0,tol,x) -SPMAT *A; -VEC *b, *r0, *x; -double tol; -{ return cgs(sp_mv_mlt,A,b,r0,tol,x); } - -/* - Routine for performing LSQR -- the least squares QR algorithm - of Paige and Saunders: - "LSQR: an algorithm for sparse linear equations and - sparse least squares", ACM Trans. Math. Soft., v. 8 - pp. 43--71 (1982) -*/ -/* lsqr -- sparse CG-like least squares routine: - -- finds min_x ||A.x-b||_2 using A defined through A & AT - -- returns x (if x != NULL) */ -VEC *lsqr(A,AT,A_params,b,tol,x) -MTX_FN A, AT; /* AT is A transposed */ -VEC *x, *b; -double tol; /* error tolerance used */ -void *A_params; -{ - VEC *u, *v, *w, *tmp; - Real alpha, beta, norm_b, phi, phi_bar, - rho, rho_bar, rho_max, theta; - Real s, c; /* for Givens' rotations */ - int iter, m, n; - - if ( ! b || ! x ) - error(E_NULL,"lsqr"); - if ( tol <= 0.0 ) - tol = MACHEPS; - - m = b->dim; n = x->dim; - u = v_get((u_int)m); - v = v_get((u_int)n); - w = v_get((u_int)n); - tmp = v_get((u_int)n); - norm_b = v_norm2(b); - - v_zero(x); - beta = v_norm2(b); - if ( beta == 0.0 ) - return x; - sv_mlt(1.0/beta,b,u); - tracecatch((*AT)(A_params,u,v),"lsqr"); - alpha = v_norm2(v); - if ( alpha == 0.0 ) - return x; - sv_mlt(1.0/alpha,v,v); - v_copy(v,w); - phi_bar = beta; rho_bar = alpha; - - rho_max = 1.0; - iter = 0; - do { - if ( ++iter > max_iter ) - error(E_ITER,"lsqr"); - - tmp = v_resize(tmp,m); - tracecatch((*A) (A_params,v,tmp),"lsqr"); - - v_mltadd(tmp,u,-alpha,u); - beta = v_norm2(u); sv_mlt(1.0/beta,u,u); - - tmp = v_resize(tmp,n); - tracecatch((*AT)(A_params,u,tmp),"lsqr"); - v_mltadd(tmp,v,-beta,v); - alpha = v_norm2(v); sv_mlt(1.0/alpha,v,v); - - rho = sqrt(rho_bar*rho_bar+beta*beta); - if ( rho > rho_max ) - rho_max = rho; - c = rho_bar/rho; - s = beta/rho; - theta = s*alpha; - rho_bar = -c*alpha; - phi = c*phi_bar; - phi_bar = s*phi_bar; - - /* update x & w */ - if ( rho == 0.0 ) - error(E_SING,"lsqr"); - v_mltadd(x,w,phi/rho,x); - v_mltadd(v,w,-theta/rho,w); - } while ( fabs(phi_bar*alpha*c) > tol*norm_b/rho_max ); - - cg_num_iters = iter; - - V_FREE(tmp); V_FREE(u); V_FREE(v); V_FREE(w); - - return x; -} - -/* sp_lsqr -- simple interface for SPMAT data structures */ -VEC *sp_lsqr(A,b,tol,x) -SPMAT *A; -VEC *b, *x; -double tol; -{ return lsqr(sp_mv_mlt,sp_vm_mlt,A,b,tol,x); } - diff --git a/src/mesch/copy.c b/src/mesch/copy.c deleted file mode 100755 index 007b90d3f4..0000000000 --- a/src/mesch/copy.c +++ /dev/null @@ -1,211 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -static char rcsid[] = "copy.c,v 1.1 1997/12/04 17:55:17 hines Exp"; -#include -#include "matrix.h" - - - -/* _m_copy -- copies matrix into new area */ -MAT *_m_copy(in,out,i0,j0) -MAT *in,*out; -u_int i0,j0; -{ - u_int i /* ,j */; - - if ( in==MNULL ) - error(E_NULL,"_m_copy"); - if ( in==out ) - return (out); - if ( out==MNULL || out->m < in->m || out->n < in->n ) - out = m_resize(out,in->m,in->n); - - for ( i=i0; i < in->m; i++ ) - MEM_COPY(&(in->me[i][j0]),&(out->me[i][j0]), - (in->n - j0)*sizeof(Real)); - /* for ( j=j0; j < in->n; j++ ) - out->me[i][j] = in->me[i][j]; */ - - return (out); -} - -/* _v_copy -- copies vector into new area */ -VEC *_v_copy(in,out,i0) -VEC *in,*out; -u_int i0; -{ - /* u_int i,j; */ - - if ( in==VNULL ) - error(E_NULL,"_v_copy"); - if ( in==out ) - return (out); - if ( out==VNULL || out->dim < in->dim ) - out = v_resize(out,in->dim); - - MEM_COPY(&(in->ve[i0]),&(out->ve[i0]),(in->dim - i0)*sizeof(Real)); - /* for ( i=i0; i < in->dim; i++ ) - out->ve[i] = in->ve[i]; */ - - return (out); -} - -/* px_copy -- copies permutation 'in' to 'out' */ -PERM *px_copy(in,out) -PERM *in,*out; -{ - /* int i; */ - - if ( in == PNULL ) - error(E_NULL,"px_copy"); - if ( in == out ) - return out; - if ( out == PNULL || out->size != in->size ) - out = px_resize(out,in->size); - - MEM_COPY(in->pe,out->pe,in->size*sizeof(u_int)); - /* for ( i = 0; i < in->size; i++ ) - out->pe[i] = in->pe[i]; */ - - return out; -} - -/* - The .._move() routines are for moving blocks of memory around - within Meschach data structures and for re-arranging matrices, - vectors etc. -*/ - -/* m_move -- copies selected pieces of a matrix - -- moves the m0 x n0 submatrix with top-left cor-ordinates (i0,j0) - to the corresponding submatrix of out with top-left co-ordinates - (i1,j1) - -- out is resized (& created) if necessary */ -MAT *m_move(in,i0,j0,m0,n0,out,i1,j1) -MAT *in, *out; -int i0, j0, m0, n0, i1, j1; -{ - int i; - - if ( ! in ) - error(E_NULL,"m_move"); - if ( i0 < 0 || j0 < 0 || i1 < 0 || j1 < 0 || m0 < 0 || n0 < 0 || - i0+m0 > in->m || j0+n0 > in->n ) - error(E_BOUNDS,"m_move"); - - if ( ! out ) - out = m_resize(out,i1+m0,j1+n0); - else if ( i1+m0 > out->m || j1+n0 > out->n ) - out = m_resize(out,max(out->m,i1+m0),max(out->n,j1+n0)); - - for ( i = 0; i < m0; i++ ) - MEM_COPY(&(in->me[i0+i][j0]),&(out->me[i1+i][j1]), - n0*sizeof(Real)); - - return out; -} - -/* v_move -- copies selected pieces of a vector - -- moves the length dim0 subvector with initial index i0 - to the corresponding subvector of out with initial index i1 - -- out is resized if necessary */ -VEC *v_move(in,i0,dim0,out,i1) -VEC *in, *out; -int i0, dim0, i1; -{ - if ( ! in ) - error(E_NULL,"v_move"); - if ( i0 < 0 || dim0 < 0 || i1 < 0 || - i0+dim0 > in->dim ) - error(E_BOUNDS,"v_move"); - - if ( (! out) || i1+dim0 > out->dim ) - out = v_resize(out,i1+dim0); - - MEM_COPY(&(in->ve[i0]),&(out->ve[i1]),dim0*sizeof(Real)); - - return out; -} - -/* mv_move -- copies selected piece of matrix to a vector - -- moves the m0 x n0 submatrix with top-left co-ordinate (i0,j0) to - the subvector with initial index i1 (and length m0*n0) - -- rows are copied contiguously - -- out is resized if necessary */ -VEC *mv_move(in,i0,j0,m0,n0,out,i1) -MAT *in; -VEC *out; -int i0, j0, m0, n0, i1; -{ - int dim1, i; - - if ( ! in ) - error(E_NULL,"mv_move"); - if ( i0 < 0 || j0 < 0 || m0 < 0 || n0 < 0 || i1 < 0 || - i0+m0 > in->m || j0+n0 > in->n ) - error(E_BOUNDS,"mv_move"); - - dim1 = m0*n0; - if ( (! out) || i1+dim1 > out->dim ) - out = v_resize(out,i1+dim1); - - for ( i = 0; i < m0; i++ ) - MEM_COPY(&(in->me[i0+i][j0]),&(out->ve[i1+i*n0]),n0*sizeof(Real)); - - return out; -} - -/* vm_move -- copies selected piece of vector to a matrix - -- moves the subvector with initial index i0 and length m1*n1 to - the m1 x n1 submatrix with top-left co-ordinate (i1,j1) - -- copying is done by rows - -- out is resized if necessary */ -MAT *vm_move(in,i0,out,i1,j1,m1,n1) -VEC *in; -MAT *out; -int i0, i1, j1, m1, n1; -{ - int dim0, i; - - if ( ! in ) - error(E_NULL,"vm_move"); - if ( i0 < 0 || i1 < 0 || j1 < 0 || m1 < 0 || n1 < 0 || - i0+m1*n1 > in->dim ) - error(E_BOUNDS,"vm_move"); - - if ( ! out ) - out = m_resize(out,i1+m1,j1+n1); - else - out = m_resize(out,max(i1+m1,out->m),max(j1+n1,out->n)); - - dim0 = m1*n1; - for ( i = 0; i < m1; i++ ) - MEM_COPY(&(in->ve[i0+i*n1]),&(out->me[i1+i][j1]),n1*sizeof(Real)); - - return out; -} diff --git a/src/mesch/dmacheps.c b/src/mesch/dmacheps.c deleted file mode 100755 index d43c2f3873..0000000000 --- a/src/mesch/dmacheps.c +++ /dev/null @@ -1,48 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -#include - -double dclean(x) -double x; -{ - static double y; - y = x; - return y; /* prevents optimisation */ -} - -int main() -{ - static double deps, deps1, dtmp; - - deps = 1.0; - while ( dclean(1.0+deps) > 1.0 ) - deps = 0.5*deps; - - printf("%g\n", 2.0*deps); - return 0; -} diff --git a/src/mesch/err.c b/src/mesch/err.c deleted file mode 100755 index 81d0a20396..0000000000 --- a/src/mesch/err.c +++ /dev/null @@ -1,351 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File with basic error-handling operations - Based on previous version on Zilog - System 8000 setret() etc. - Ported to Pyramid 9810 late 1987 - */ - -static char rcsid[] = "err.c,v 1.1 1997/12/04 17:55:19 hines Exp"; - -#include -#include -#include -#include "err.h" - -#if defined(__MWERKS__) -#define isascii isprint -#undef SYSV -#endif -#ifdef SYSV -/* AT&T System V */ -#include -#else -/* something else -- assume BSD or ANSI C */ -#include -#endif - - -#ifndef FALSE -#define FALSE 0 -#define TRUE 1 -#endif - -#define EF_EXIT 0 -#define EF_ABORT 1 -#define EF_JUMP 2 -#define EF_SILENT 3 - -/* The only error caught in this file! */ -#define E_SIGNAL 16 - -static char *err_mesg[] = -{ "unknown error", /* 0 */ - "sizes of objects don't match", /* 1 */ - "index out of bounds", /* 2 */ - "can't allocate memory", /* 3 */ - "singular matrix", /* 4 */ - "matrix not positive definite", /* 5 */ - "incorrect format input", /* 6 */ - "bad input file/device", /* 7 */ - "NULL objects passed", /* 8 */ - "matrix not square", /* 9 */ - "object out of range", /* 10 */ - "can't do operation in situ for non-square matrix", /* 11 */ - "can't do operation in situ", /* 12 */ - "excessive number of iterations", /* 13 */ - "convergence criterion failed", /* 14 */ - "bad starting value", /* 15 */ - "floating exception", /* 16 */ - "internal inconsistency (data structure)",/* 17 */ - "unexpected end-of-file", /* 18 */ - "shared vectors (cannot release them)", /* 19 */ - "negative argument", /* 20 */ - "cannot overwrite object", /* 21 */ - "breakdown in iterative method" /* 22 */ - }; - -#define MAXERR (sizeof(err_mesg)/sizeof(char *)) - -static char *warn_mesg[] = { - "unknown warning", /* 0 */ - "wrong type number (use macro TYPE_*)", /* 1 */ - "no corresponding mem_stat_mark", /* 2 */ - "computed norm of a residual is less than 0", /* 3 */ - "resizing a shared vector" /* 4 */ -}; - -#define MAXWARN (sizeof(warn_mesg)/sizeof(char *)) - - - -#define MAX_ERRS 100 - -jmp_buf restart; - - -/* array of pointers to lists of errors */ - -typedef struct { - char **listp; /* pointer to a list of errors */ - unsigned len; /* length of the list */ - unsigned warn; /* =FALSE - errors, =TRUE - warnings */ -} Err_list; - -static Err_list err_list[ERR_LIST_MAX_LEN] = { - {err_mesg,MAXERR,FALSE}, /* basic errors list */ - {warn_mesg,MAXWARN,TRUE} /* basic warnings list */ -}; - - -static int err_list_end = 2; /* number of elements in err_list */ - -/* attach a new list of errors pointed by err_ptr - or change a previous one; - list_len is the number of elements in the list; - list_num is the list number; - warn == FALSE - errors (stop the program), - warn == TRUE - warnings (continue the program); - Note: lists numbered 0 and 1 are attached automatically, - you do not need to do it - */ -int err_list_attach(list_num, list_len,err_ptr,warn) -int list_num, list_len, warn; -char **err_ptr; -{ - if (list_num < 0 || list_len <= 0 || - err_ptr == (char **)NULL) - return -1; - - if (list_num >= ERR_LIST_MAX_LEN) { - fprintf(stderr,"\n file \"%s\": %s %s\n", - "err.c","increase the value of ERR_LIST_MAX_LEN", - "in matrix.h and zmatdef.h"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stderr,"\n file \"%s\": %s %s\n", - "err.c","increase the value of ERR_LIST_MAX_LEN", - "in matrix.h and zmatdef.h"); - printf("Exiting program\n"); - exit(0); - } - - if (err_list[list_num].listp != (char **)NULL && - err_list[list_num].listp != err_ptr) - free((char *)err_list[list_num].listp); - err_list[list_num].listp = err_ptr; - err_list[list_num].len = list_len; - err_list[list_num].warn = warn; - err_list_end = list_num+1; - - return list_num; -} - - -/* release the error list numbered list_num */ -int err_list_free(list_num) -int list_num; -{ - if (list_num < 0 || list_num >= err_list_end) return -1; - if (err_list[list_num].listp != (char **)NULL) { - err_list[list_num].listp = (char **)NULL; - err_list[list_num].len = 0; - err_list[list_num].warn = 0; - } - return 0; -} - - -/* check if list_num is attached; - return FALSE if not; - return TRUE if yes - */ -int err_is_list_attached(list_num) -int list_num; -{ - if (list_num < 0 || list_num >= err_list_end) - return FALSE; - - if (err_list[list_num].listp != (char **)NULL) - return TRUE; - - return FALSE; -} - -/* other local variables */ - -static int err_flag = EF_EXIT, num_errs = 0, cnt_errs = 1; - -/* set_err_flag -- sets err_flag -- returns old err_flag */ -int set_err_flag(flag) -int flag; -{ - int tmp; - - tmp = err_flag; - err_flag = flag; - return tmp; -} - -/* count_errs -- sets cnt_errs (TRUE/FALSE) & returns old value */ -int count_errs(flag) -int flag; -{ - int tmp; - - tmp = cnt_errs; - cnt_errs = flag; - return tmp; -} - -/* ev_err -- reports error (err_num) in file "file" at line "line_num" and - returns to user error handler; - list_num is an error list number (0 is the basic list - pointed by err_mesg, 1 is the basic list of warnings) - */ -int ev_err(file,err_num,line_num,fn_name,list_num) -char *file, *fn_name; -int err_num, line_num,list_num; -{ - int num; - - if ( err_num < 0 ) err_num = 0; - - if (list_num < 0 || list_num >= err_list_end || - err_list[list_num].listp == (char **)NULL) { - fprintf(stderr, - "\n Not (properly) attached list of errors: list_num = %d\n", - list_num); - fprintf(stderr," Call \"err_list_attach\" in your program\n"); - if ( ! isatty(fileno(stdout)) ) { - fprintf(stderr, - "\n Not (properly) attached list of errors: list_num = %d\n", - list_num); - fprintf(stderr," Call \"err_list_attach\" in your program\n"); - } - printf("\nExiting program\n"); - exit(0); - } - - num = err_num; - if ( num >= err_list[list_num].len ) num = 0; - - if ( cnt_errs && ++num_errs >= MAX_ERRS ) /* too many errors */ - { - fprintf(stderr,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stdout,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - printf("Sorry, too many errors: %d\n",num_errs); - printf("Exiting program\n"); - exit(0); - } - if ( err_list[list_num].warn ) - switch ( err_flag ) - { - case EF_SILENT: break; - default: - fprintf(stderr,"\n\"%s\", line %d: %s in function %s()\n\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stdout,"\n\"%s\", line %d: %s in function %s()\n\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - break; - } - else - switch ( err_flag ) - { - case EF_SILENT: - longjmp(restart,(err_num==0)? -1 : err_num); - break; - case EF_ABORT: - fprintf(stderr,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stdout,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - abort(); - break; - case EF_JUMP: - fprintf(stderr,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stdout,"\n\"%s\", line %d: %s in function %s()\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - longjmp(restart,(err_num==0)? -1 : err_num); - break; - default: - fprintf(stderr,"\n\"%s\", line %d: %s in function %s()\n\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - if ( ! isatty(fileno(stdout)) ) - fprintf(stdout,"\n\"%s\", line %d: %s in function %s()\n\n", - file,line_num,err_list[list_num].listp[num], - isascii(*fn_name) ? fn_name : "???"); - - break; - } - - /* ensure exit if fall through */ - if ( ! err_list[list_num].warn ) { -#ifdef NEURON - hoc_execerror("meschach library error", (char*)0); -#else - exit(0); -#endif - } - - return 0; -} - -/* float_error -- catches floating arithmetic signals */ -static void float_error(num) -int num; -{ - signal(SIGFPE,float_error); - /* fprintf(stderr,"SIGFPE: signal #%d\n",num); */ - /* fprintf(stderr,"errno = %d\n",errno); */ - ev_err("???.c",E_SIGNAL,0,"???",0); -} - -/* catch_signal -- sets up float_error() to catch SIGFPE's */ -void catch_FPE() -{ - signal(SIGFPE,float_error); -} - - diff --git a/src/mesch/err.h b/src/mesch/err.h deleted file mode 100755 index 7c8521215e..0000000000 --- a/src/mesch/err.h +++ /dev/null @@ -1,183 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* err.h 28/09/1993 */ - -/* RCS id: err.h,v 1.1 1997/11/03 16:15:48 hines Exp */ - - -#ifndef ERRHEADER -#define ERRHEADER - - -#include -#include "machine.h" - -/* Error recovery */ - -extern jmp_buf restart; - - -/* max. # of error lists */ -#define ERR_LIST_MAX_LEN 10 - -/* main error functions */ -#ifndef ANSI_C -extern int ev_err(); /* main error handler */ -extern int set_err_flag(); /* for different ways of handling - errors, returns old value */ -extern int count_errs(); /* to avoid "too many errors" */ -extern int err_list_attach(); /* for attaching a list of errors */ -extern int err_is_list_attached(); /* checking if a list is attached */ -extern int err_list_free(); /* freeing a list of errors */ - -#else /* ANSI_C */ - -extern int ev_err(char *,int,int,char *,int); /* main error handler */ -extern int set_err_flag(int flag); /* for different ways of handling - errors, returns old value */ -extern int count_errs(int true_false); /* to avoid "too many errors" */ -extern int err_list_attach(int list_num, int list_len, - char **err_ptr,int warn); /* for attaching a list of errors */ -extern int err_is_list_attached(int list_num); /* checking if a list - is attached */ -extern int err_list_free(int list_num); /* freeing a list of errors */ - -#endif - - -/* error(E_TYPE,"myfunc") raises error type E_TYPE for function my_func() */ -#define error(err_num,fn_name) ev_err(__FILE__,err_num,__LINE__,fn_name,0) - -/* warning(WARN_TYPE,"myfunc") raises warning type WARN_TYPE for - function my_func() */ -#define warning(err_num,fn_name) ev_err(__FILE__,err_num,__LINE__,fn_name,1) - - -/* error flags */ -#define EF_EXIT 0 /* exit on error */ -#define EF_ABORT 1 /* abort (dump core) on error */ -#define EF_JUMP 2 /* jump on error */ -#define EF_SILENT 3 /* jump, but don't print message */ -#define ERREXIT() set_err_flag(EF_EXIT) -#define ERRABORT() set_err_flag(EF_ABORT) -/* don't print message */ -#define SILENTERR() if ( ! setjmp(restart) ) set_err_flag(EF_SILENT) -/* return here on error */ -#define ON_ERROR() if ( ! setjmp(restart) ) set_err_flag(EF_JUMP) - - -/* error types */ -#define E_UNKNOWN 0 -#define E_SIZES 1 -#define E_BOUNDS 2 -#define E_MEM 3 -#define E_SING 4 -#define E_POSDEF 5 -#define E_FORMAT 6 -#define E_INPUT 7 -#define E_NULL 8 -#define E_SQUARE 9 -#define E_RANGE 10 -#define E_INSITU2 11 -#define E_INSITU 12 -#define E_ITER 13 -#define E_CONV 14 -#define E_START 15 -#define E_SIGNAL 16 -#define E_INTERN 17 -#define E_EOF 18 -#define E_SHARED_VECS 19 -#define E_NEG 20 -#define E_OVERWRITE 21 -#define E_BREAKDOWN 22 - -/* warning types */ -#define WARN_UNKNOWN 0 -#define WARN_WRONG_TYPE 1 -#define WARN_NO_MARK 2 -#define WARN_RES_LESS_0 3 -#define WARN_SHARED_VEC 4 - - -/* error catching macros */ - -/* execute err_part if error errnum is raised while executing ok_part */ -#define catch(errnum,ok_part,err_part) \ - { jmp_buf _save; int _err_num, _old_flag; \ - _old_flag = set_err_flag(EF_SILENT); \ - MEM_COPY(restart,_save,sizeof(jmp_buf)); \ - if ( (_err_num=setjmp(restart)) == 0 ) \ - { ok_part; \ - set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); } \ - else if ( _err_num == errnum ) \ - { set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); \ - err_part; } \ - else { set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); \ - error(_err_num,"catch"); \ - } \ - } - - -/* execute err_part if any error raised while executing ok_part */ -#define catchall(ok_part,err_part) \ - { jmp_buf _save; int _err_num, _old_flag; \ - _old_flag = set_err_flag(EF_SILENT); \ - MEM_COPY(restart,_save,sizeof(jmp_buf)); \ - if ( (_err_num=setjmp(restart)) == 0 ) \ - { ok_part; \ - set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); } \ - else \ - { set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); \ - err_part; } \ - } - - -/* print message if error raised while executing ok_part, - then re-raise error to trace calls */ -#define tracecatch(ok_part,function) \ - { jmp_buf _save; int _err_num, _old_flag; \ - _old_flag = set_err_flag(EF_JUMP); \ - MEM_COPY(restart,_save,sizeof(jmp_buf)); \ - if ( (_err_num=setjmp(restart)) == 0 ) \ - { ok_part; \ - set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); } \ - else \ - { set_err_flag(_old_flag); \ - MEM_COPY(_save,restart,sizeof(jmp_buf)); \ - error(_err_num,function); } \ - } - - - -#endif /* ERRHEADER */ - diff --git a/src/mesch/extras.c b/src/mesch/extras.c deleted file mode 100755 index 300f6e3007..0000000000 --- a/src/mesch/extras.c +++ /dev/null @@ -1,501 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Memory port routines: MEM_COPY and MEM_ZERO -*/ - -/* For BSD 4.[23] environments: using bcopy() and bzero() */ - -#include "machine.h" - -#ifndef MEM_COPY -void MEM_COPY(from,to,len) -char *from, *to; -int len; -{ - int i; - - if ( from < to ) - { - for ( i = 0; i < len; i++ ) - *to++ = *from++; - } - else - { - from += len; to += len; - for ( i = 0; i < len; i++ ) - *(--to) = *(--from); - } -} -#endif - -#ifndef MEM_ZERO -void MEM_ZERO(ptr,len) -char *ptr; -int len; -{ - int i; - - for ( i = 0; i < len; i++ ) - *(ptr++) = '\0'; -} -#endif - -/* - This file contains versions of something approximating the well-known - BLAS routines in C, suitable for Meschach (hence the `m'). - These are "vanilla" implementations, at least with some consideration - of the effects of caching and paging, and maybe some loop unrolling - for register-rich machines -*/ - -/* - Organisation of matrices: it is assumed that matrices are represented - by Real **'s. To keep flexibility, there is also an "initial - column" parameter j0, so that the actual elements used are - A[0][j0], A[0][j0+1], ..., A[0][j0+n-1] - A[1][j0], A[1][j0+1], ..., A[1][j0+n-1] - .. .. ... .. - A[m-1][j0], A[m-1][j0+1], ..., A[m-1][j0+n-1] -*/ - -static char rcsid[] = "$Id: extras.c,v 1.4 1995/06/08 15:13:15 des Exp $"; - -#include - -#define REGISTER_RICH 1 - -/* mblar-1 routines */ - -/* Mscale -- sets x <- alpha.x */ -void Mscale(len,alpha,x) -int len; -double alpha; -Real *x; -{ - register int i; - - for ( i = 0; i < len; i++ ) - x[i] *= alpha; -} - -/* Mswap -- swaps x and y */ -void Mswap(len,x,y) -int len; -Real *x, *y; -{ - register int i; - register Real tmp; - - for ( i = 0; i < len; i++ ) - { - tmp = x[i]; - x[i] = y[i]; - y[i] = tmp; - } -} - -/* Mcopy -- copies x to y */ -void Mcopy(len,x,y) -int len; -Real *x, *y; -{ - register int i; - - for ( i = 0; i < len; i++ ) - y[i] = x[i]; -} - -/* Maxpy -- y <- y + alpha.x */ -void Maxpy(len,alpha,x,y) -int len; -double alpha; -Real *x, *y; -{ - register int i, len4; - - /**************************************** - for ( i = 0; i < len; i++ ) - y[i] += alpha*x[i]; - ****************************************/ - -#ifdef REGISTER_RICH - len4 = len / 4; - len = len % 4; - for ( i = 0; i < len4; i++ ) - { - y[4*i] += alpha*x[4*i]; - y[4*i+1] += alpha*x[4*i+1]; - y[4*i+2] += alpha*x[4*i+2]; - y[4*i+3] += alpha*x[4*i+3]; - } - x += 4*len4; y += 4*len4; -#endif - for ( i = 0; i < len; i++ ) - y[i] += alpha*x[i]; -} - -/* Mdot -- returns x'.y */ -double Mdot(len,x,y) -int len; -Real *x, *y; -{ - register int i, len4; - register Real sum; - -#ifndef REGISTER_RICH - sum = 0.0; -#endif - -#ifdef REGISTER_RICH - register Real sum0, sum1, sum2, sum3; - - sum0 = sum1 = sum2 = sum3 = 0.0; - - len4 = len / 4; - len = len % 4; - - for ( i = 0; i < len4; i++ ) - { - sum0 += x[4*i ]*y[4*i ]; - sum1 += x[4*i+1]*y[4*i+1]; - sum2 += x[4*i+2]*y[4*i+2]; - sum3 += x[4*i+3]*y[4*i+3]; - } - sum = sum0 + sum1 + sum2 + sum3; - x += 4*len4; y += 4*len4; -#endif - - for ( i = 0; i < len; i++ ) - sum += x[i]*y[i]; - - return sum; -} - -#ifndef ABS -#define ABS(x) ((x) >= 0 ? (x) : -(x)) -#endif - -/* Mnorminf -- returns ||x||_inf */ -double Mnorminf(len,x) -int len; -Real *x; -{ - register int i; - register Real tmp, max_val; - - max_val = 0.0; - for ( i = 0; i < len; i++ ) - { - tmp = ABS(x[i]); - if ( max_val < tmp ) - max_val = tmp; - } - - return max_val; -} - -/* Mnorm1 -- returns ||x||_1 */ -double Mnorm1(len,x) -int len; -Real *x; -{ - register int i; - register Real sum; - - sum = 0.0; - for ( i = 0; i < len; i++ ) - sum += ABS(x[i]); - - return sum; -} - -/* Mnorm2 -- returns ||x||_2 */ -double Mnorm2(len,x) -int len; -Real *x; -{ - register int i; - register Real norm, invnorm, sum, tmp; - - norm = Mnorminf(len,x); - if ( norm == 0.0 ) - return 0.0; - invnorm = 1.0/norm; - sum = 0.0; - for ( i = 0; i < len; i++ ) - { - tmp = x[i]*invnorm; - sum += tmp*tmp; - } - - return sum/invnorm; -} - -/* mblar-2 routines */ - -/* Mmv -- y <- alpha.A.x + beta.y */ -void Mmv(m,n,alpha,A,j0,x,beta,y) -int m, n, j0; -double alpha, beta; -Real **A, *x, *y; -{ - register int i, j, m4, n4; - register Real sum0, sum1, sum2, sum3, tmp0, tmp1, tmp2, tmp3; - register Real *dp0, *dp1, *dp2, *dp3; - - /**************************************** - for ( i = 0; i < m; i++ ) - y[i] += alpha*Mdot(n,&(A[i][j0]),x); - ****************************************/ - - m4 = n4 = 0; - -#ifdef REGISTER_RICH - m4 = m / 4; - m = m % 4; - n4 = n / 4; - n = n % 4; - - for ( i = 0; i < m4; i++ ) - { - sum0 = sum1 = sum2 = sum3 = 0.0; - dp0 = &(A[4*i ][j0]); - dp1 = &(A[4*i+1][j0]); - dp2 = &(A[4*i+2][j0]); - dp3 = &(A[4*i+3][j0]); - - for ( j = 0; j < n4; j++ ) - { - tmp0 = x[4*j ]; - tmp1 = x[4*j+1]; - tmp2 = x[4*j+2]; - tmp3 = x[4*j+3]; - sum0 = sum0 + dp0[j]*tmp0 + dp0[j+1]*tmp1 + - dp0[j+2]*tmp2 + dp0[j+3]*tmp3; - sum1 = sum1 + dp1[j]*tmp0 + dp1[j+1]*tmp1 + - dp1[j+2]*tmp2 + dp1[j+3]*tmp3; - sum2 = sum2 + dp2[j]*tmp0 + dp2[j+1]*tmp1 + - dp2[j+2]*tmp2 + dp2[j+3]*tmp3; - sum3 = sum3 + dp3[j]*tmp0 + dp3[j+1]*tmp2 + - dp3[j+2]*tmp2 + dp3[j+3]*tmp3; - } - for ( j = 0; j < n; j++ ) - { - sum0 += dp0[4*n4+j]*x[4*n4+j]; - sum1 += dp1[4*n4+j]*x[4*n4+j]; - sum2 += dp2[4*n4+j]*x[4*n4+j]; - sum3 += dp3[4*n4+j]*x[4*n4+j]; - } - y[4*i ] = beta*y[4*i ] + alpha*sum0; - y[4*i+1] = beta*y[4*i+1] + alpha*sum1; - y[4*i+2] = beta*y[4*i+2] + alpha*sum2; - y[4*i+3] = beta*y[4*i+3] + alpha*sum3; - } -#endif - - for ( i = 0; i < m; i++ ) - y[4*m4+i] = beta*y[i] + alpha*Mdot(4*n4+n,&(A[4*m4+i][j0]),x); -} - -/* Mvm -- y <- alpha.A^T.x + beta.y */ -void Mvm(m,n,alpha,A,j0,x,beta,y) -int m, n, j0; -double alpha, beta; -Real **A, *x, *y; -{ - register int i, j, m4, n2; - register Real *Aref; - register Real tmp; - -#ifdef REGISTER_RICH - register Real *Aref0, *Aref1; - register Real tmp0, tmp1; - register Real yval0, yval1, yval2, yval3; -#endif - - if ( beta != 1.0 ) - Mscale(m,beta,y); - /**************************************** - for ( j = 0; j < n; j++ ) - Maxpy(m,alpha*x[j],&(A[j][j0]),y); - ****************************************/ - m4 = n2 = 0; - - m4 = m / 4; - m = m % 4; -#ifdef REGISTER_RICH - n2 = n / 2; - n = n % 2; - - for ( j = 0; j < n2; j++ ) - { - tmp0 = alpha*x[2*j]; - tmp1 = alpha*x[2*j+1]; - Aref0 = &(A[2*j ][j0]); - Aref1 = &(A[2*j+1][j0]); - for ( i = 0; i < m4; i++ ) - { - yval0 = y[4*i ] + tmp0*Aref0[4*i ]; - yval1 = y[4*i+1] + tmp0*Aref0[4*i+1]; - yval2 = y[4*i+2] + tmp0*Aref0[4*i+2]; - yval3 = y[4*i+3] + tmp0*Aref0[4*i+3]; - y[4*i ] = yval0 + tmp1*Aref1[4*i ]; - y[4*i+1] = yval1 + tmp1*Aref1[4*i+1]; - y[4*i+2] = yval2 + tmp1*Aref1[4*i+2]; - y[4*i+3] = yval3 + tmp1*Aref1[4*i+3]; - } - y += 4*m4; Aref0 += 4*m4; Aref1 += 4*m4; - for ( i = 0; i < m; i++ ) - y[i] += tmp0*Aref0[i] + tmp1*Aref1[i]; - } -#endif - - for ( j = 0; j < n; j++ ) - { - tmp = alpha*x[2*n2+j]; - Aref = &(A[2*n2+j][j0]); - for ( i = 0; i < m4; i++ ) - { - y[4*i ] += tmp*Aref[4*i ]; - y[4*i+1] += tmp*Aref[4*i+1]; - y[4*i+2] += tmp*Aref[4*i+2]; - y[4*i+3] += tmp*Aref[4*i+3]; - } - y += 4*m4; Aref += 4*m4; - for ( i = 0; i < m; i++ ) - y[i] += tmp*Aref[i]; - } -} - -/* Mupdate -- A <- A + alpha.x.y^T */ -void Mupdate(m,n,alpha,x,y,A,j0) -int m, n, j0; -double alpha; -Real **A, *x, *y; -{ - register int i, j, n4; - register Real *Aref; - register Real tmp; - - /**************************************** - for ( i = 0; i < m; i++ ) - Maxpy(n,alpha*x[i],y,&(A[i][j0])); - ****************************************/ - - n4 = n / 4; - n = n % 4; - for ( i = 0; i < m; i++ ) - { - tmp = alpha*x[i]; - Aref = &(A[i][j0]); - for ( j = 0; j < n4; j++ ) - { - Aref[4*j ] += tmp*y[4*j ]; - Aref[4*j+1] += tmp*y[4*j+1]; - Aref[4*j+2] += tmp*y[4*j+2]; - Aref[4*j+3] += tmp*y[4*j+3]; - } - Aref += 4*n4; y += 4*n4; - for ( j = 0; j < n; j++ ) - Aref[j] += tmp*y[j]; - } -} - -/* mblar-3 routines */ - -/* Mmm -- C <- C + alpha.A.B */ -void Mmm(m,n,p,alpha,A,Aj0,B,Bj0,C,Cj0) -int m, n, p; /* C is m x n */ -double alpha; -Real **A, **B, **C; -int Aj0, Bj0, Cj0; -{ - register int i, j, k; - /* register Real tmp, sum; */ - - /**************************************** - for ( i = 0; i < m; i++ ) - for ( k = 0; k < p; k++ ) - Maxpy(n,alpha*A[i][Aj0+k],&(B[k][Bj0]),&(C[i][Cj0])); - ****************************************/ - for ( i = 0; i < m; i++ ) - Mvm(p,n,alpha,B,Bj0,&(A[i][Aj0]),1.0,&(C[i][Cj0])); -} - -/* Mmtrm -- C <- C + alpha.A^T.B */ -void Mmtrm(m,n,p,alpha,A,Aj0,B,Bj0,C,Cj0) -int m, n, p; /* C is m x n */ -double alpha; -Real **A, **B, **C; -int Aj0, Bj0, Cj0; -{ - register int i, j, k; - - /**************************************** - for ( i = 0; i < m; i++ ) - for ( k = 0; k < p; k++ ) - Maxpy(n,alpha*A[k][Aj0+i],&(B[k][Bj0]),&(C[i][Cj0])); - ****************************************/ - for ( k = 0; k < p; k++ ) - Mupdate(m,n,alpha,&(A[k][Aj0]),&(B[k][Bj0]),C,Cj0); -} - -/* Mmmtr -- C <- C + alpha.A.B^T */ -void Mmmtr(m,n,p,alpha,A,Aj0,B,Bj0,C,Cj0) -int m, n, p; /* C is m x n */ -double alpha; -Real **A, **B, **C; -int Aj0, Bj0, Cj0; -{ - register int i, j, k; - - /**************************************** - for ( i = 0; i < m; i++ ) - for ( j = 0; j < n; j++ ) - C[i][Cj0+j] += alpha*Mdot(p,&(A[i][Aj0]),&(B[j][Bj0])); - ****************************************/ - for ( i = 0; i < m; i++ ) - Mmv(n,p,alpha,B,Bj0,&(A[i][Aj0]),1.0,&(C[i][Cj0])); -} - -/* Mmtrmtr -- C <- C + alpha.A^T.B^T */ -void Mmtrmtr(m,n,p,alpha,A,Aj0,B,Bj0,C,Cj0) -int m, n, p; /* C is m x n */ -double alpha; -Real **A, **B, **C; -int Aj0, Bj0, Cj0; -{ - register int i, j, k; - - for ( i = 0; i < m; i++ ) - for ( j = 0; j < n; j++ ) - for ( k = 0; k < p; k++ ) - C[i][Cj0+j] += A[i][Aj0+k]*B[k][Bj0+j]; -} - diff --git a/src/mesch/fft.c b/src/mesch/fft.c deleted file mode 100755 index a2875e24ec..0000000000 --- a/src/mesch/fft.c +++ /dev/null @@ -1,145 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Fast Fourier Transform routine - Loosely based on the Fortran routine in Rabiner & Gold's - "Digital Signal Processing" -*/ - -static char rcsid[] = "fft.c,v 1.1 1997/12/04 17:55:20 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -/* fft -- d.i.t. fast Fourier transform - -- radix-2 FFT only - -- vector extended to a power of 2 */ -void fft(x_re,x_im) -VEC *x_re, *x_im; -{ - int i, ip, j, k, li, n, length; - Real *xr, *xi; - Real theta, pi = 3.1415926535897932384; - Real w_re, w_im, u_re, u_im, t_re, t_im; - Real tmp, tmpr, tmpi; - - if ( ! x_re || ! x_im ) - error(E_NULL,"fft"); - if ( x_re->dim != x_im->dim ) - error(E_SIZES,"fft"); - - n = 1; - while ( x_re->dim > n ) - n *= 2; - x_re = v_resize(x_re,n); - x_im = v_resize(x_im,n); - printf("# fft: x_re =\n"); v_output(x_re); - printf("# fft: x_im =\n"); v_output(x_im); - xr = x_re->ve; - xi = x_im->ve; - - /* Decimation in time (DIT) algorithm */ - j = 0; - for ( i = 0; i < n-1; i++ ) - { - if ( i < j ) - { - tmp = xr[i]; - xr[i] = xr[j]; - xr[j] = tmp; - tmp = xi[i]; - xi[i] = xi[j]; - xi[j] = tmp; - } - k = n / 2; - while ( k <= j ) - { - j -= k; - k /= 2; - } - j += k; - } - - /* Actual FFT */ - for ( li = 1; li < n; li *= 2 ) - { - length = 2*li; - theta = pi/li; - u_re = 1.0; - u_im = 0.0; - if ( li == 1 ) - { - w_re = -1.0; - w_im = 0.0; - } - else if ( li == 2 ) - { - w_re = 0.0; - w_im = 1.0; - } - else - { - w_re = cos(theta); - w_im = sin(theta); - } - for ( j = 0; j < li; j++ ) - { - for ( i = j; i < n; i += length ) - { - ip = i + li; - /* step 1 */ - t_re = xr[ip]*u_re - xi[ip]*u_im; - t_im = xr[ip]*u_im + xi[ip]*u_re; - /* step 2 */ - xr[ip] = xr[i] - t_re; - xi[ip] = xi[i] - t_im; - /* step 3 */ - xr[i] += t_re; - xi[i] += t_im; - } - tmpr = u_re*w_re - u_im*w_im; - tmpi = u_im*w_re + u_re*w_im; - u_re = tmpr; - u_im = tmpi; - } - } -} - -/* ifft -- inverse FFT using the same interface as fft() */ -void ifft(x_re,x_im) -VEC *x_re, *x_im; -{ - /* we just use complex conjugates */ - - sv_mlt(-1.0,x_im,x_im); - fft(x_re,x_im); - sv_mlt( 1.0/((double)(x_re->dim)),x_re,x_re); -} diff --git a/src/mesch/givens.c b/src/mesch/givens.c deleted file mode 100755 index e9057ba0cd..0000000000 --- a/src/mesch/givens.c +++ /dev/null @@ -1,141 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - - -/* - Files for matrix computations - - Givens operations file. Contains routines for calculating and - applying givens rotations for/to vectors and also to matrices by - row and by column. -*/ - -/* givens.c 1.2 11/25/87 */ -static char rcsid[] = "givens.c,v 1.1 1997/12/04 17:55:22 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - -/* givens -- returns c,s parameters for Givens rotation to - eliminate y in the vector [ x y ]' */ -void givens(x,y,c,s) -double x,y; -Real *c,*s; -{ - Real norm; - - norm = sqrt(x*x+y*y); - if ( norm == 0.0 ) - { *c = 1.0; *s = 0.0; } /* identity */ - else - { *c = x/norm; *s = y/norm; } -} - -/* rot_vec -- apply Givens rotation to x's i & k components */ -VEC *rot_vec(x,i,k,c,s,out) -VEC *x,*out; -u_int i,k; -double c,s; -{ - Real temp; - - if ( x==VNULL ) - error(E_NULL,"rot_vec"); - if ( i >= x->dim || k >= x->dim ) - error(E_RANGE,"rot_vec"); - out = v_copy(x,out); - - /* temp = c*out->ve[i] + s*out->ve[k]; */ - temp = c*v_entry(out,i) + s*v_entry(out,k); - /* out->ve[k] = -s*out->ve[i] + c*out->ve[k]; */ - v_set_val(out,k,-s*v_entry(out,i)+c*v_entry(out,k)); - /* out->ve[i] = temp; */ - v_set_val(out,i,temp); - - return (out); -} - -/* rot_rows -- premultiply mat by givens rotation described by c,s */ -MAT *rot_rows(mat,i,k,c,s,out) -MAT *mat,*out; -u_int i,k; -double c,s; -{ - u_int j; - Real temp; - - if ( mat==(MAT *)NULL ) - error(E_NULL,"rot_rows"); - if ( i >= mat->m || k >= mat->m ) - error(E_RANGE,"rot_rows"); - if ( mat != out ) - out = m_copy(mat,m_resize(out,mat->m,mat->n)); - - for ( j=0; jn; j++ ) - { - /* temp = c*out->me[i][j] + s*out->me[k][j]; */ - temp = c*m_entry(out,i,j) + s*m_entry(out,k,j); - /* out->me[k][j] = -s*out->me[i][j] + c*out->me[k][j]; */ - m_set_val(out,k,j, -s*m_entry(out,i,j) + c*m_entry(out,k,j)); - /* out->me[i][j] = temp; */ - m_set_val(out,i,j, temp); - } - - return (out); -} - -/* rot_cols -- postmultiply mat by givens rotation described by c,s */ -MAT *rot_cols(mat,i,k,c,s,out) -MAT *mat,*out; -u_int i,k; -double c,s; -{ - u_int j; - Real temp; - - if ( mat==(MAT *)NULL ) - error(E_NULL,"rot_cols"); - if ( i >= mat->n || k >= mat->n ) - error(E_RANGE,"rot_cols"); - if ( mat != out ) - out = m_copy(mat,m_resize(out,mat->m,mat->n)); - - for ( j=0; jm; j++ ) - { - /* temp = c*out->me[j][i] + s*out->me[j][k]; */ - temp = c*m_entry(out,j,i) + s*m_entry(out,j,k); - /* out->me[j][k] = -s*out->me[j][i] + c*out->me[j][k]; */ - m_set_val(out,j,k, -s*m_entry(out,j,i) + c*m_entry(out,j,k)); - /* out->me[j][i] = temp; */ - m_set_val(out,j,i,temp); - } - - return (out); -} - diff --git a/src/mesch/hessen.c b/src/mesch/hessen.c deleted file mode 100755 index 6ba432c2b4..0000000000 --- a/src/mesch/hessen.c +++ /dev/null @@ -1,153 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - - -/* - File containing routines for determining Hessenberg - factorisations. -*/ - -static char rcsid[] = "hessen.c,v 1.1 1997/12/04 17:55:23 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" - - - -/* Hfactor -- compute Hessenberg factorisation in compact form. - -- factorisation performed in situ - -- for details of the compact form see QRfactor.c and matrix2.doc */ -MAT *Hfactor(A, diag, beta) -MAT *A; -VEC *diag, *beta; -{ - static VEC *tmp1 = VNULL; - int k, limit; - - if ( ! A || ! diag || ! beta ) - error(E_NULL,"Hfactor"); - if ( diag->dim < A->m - 1 || beta->dim < A->m - 1 ) - error(E_SIZES,"Hfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"Hfactor"); - limit = A->m - 1; - - tmp1 = v_resize(tmp1,A->m); - MEM_STAT_REG(tmp1,TYPE_VEC); - - for ( k = 0; k < limit; k++ ) - { - get_col(A,(u_int)k,tmp1); - /* printf("the %d'th column = "); v_output(tmp1); */ - hhvec(tmp1,k+1,&beta->ve[k],tmp1,&A->me[k+1][k]); - /* diag->ve[k] = tmp1->ve[k+1]; */ - v_set_val(diag,k,v_entry(tmp1,k+1)); - /* printf("H/h vector = "); v_output(tmp1); */ - /* printf("from the %d'th entry\n",k+1); */ - /* printf("beta = %g\n",beta->ve[k]); */ - - /* hhtrcols(A,k+1,k+1,tmp1,beta->ve[k]); */ - /* hhtrrows(A,0 ,k+1,tmp1,beta->ve[k]); */ - hhtrcols(A,k+1,k+1,tmp1,v_entry(beta,k)); - hhtrrows(A,0 ,k+1,tmp1,v_entry(beta,k)); - /* printf("A = "); m_output(A); */ - } - - return (A); -} - -/* makeHQ -- construct the Hessenberg orthogonalising matrix Q; - -- i.e. Hess M = Q.M.Q' */ -MAT *makeHQ(H, diag, beta, Qout) -MAT *H, *Qout; -VEC *diag, *beta; -{ - int i, j, limit; - static VEC *tmp1 = VNULL, *tmp2 = VNULL; - - if ( H==(MAT *)NULL || diag==(VEC *)NULL || beta==(VEC *)NULL ) - error(E_NULL,"makeHQ"); - limit = H->m - 1; - if ( diag->dim < limit || beta->dim < limit ) - error(E_SIZES,"makeHQ"); - if ( H->m != H->n ) - error(E_SQUARE,"makeHQ"); - Qout = m_resize(Qout,H->m,H->m); - - tmp1 = v_resize(tmp1,H->m); - tmp2 = v_resize(tmp2,H->m); - MEM_STAT_REG(tmp1,TYPE_VEC); - MEM_STAT_REG(tmp2,TYPE_VEC); - - for ( i = 0; i < H->m; i++ ) - { - /* tmp1 = i'th basis vector */ - for ( j = 0; j < H->m; j++ ) - /* tmp1->ve[j] = 0.0; */ - v_set_val(tmp1,j,0.0); - /* tmp1->ve[i] = 1.0; */ - v_set_val(tmp1,i,1.0); - - /* apply H/h transforms in reverse order */ - for ( j = limit-1; j >= 0; j-- ) - { - get_col(H,(u_int)j,tmp2); - /* tmp2->ve[j+1] = diag->ve[j]; */ - v_set_val(tmp2,j+1,v_entry(diag,j)); - hhtrvec(tmp2,beta->ve[j],j+1,tmp1,tmp1); - } - - /* insert into Qout */ - set_col(Qout,(u_int)i,tmp1); - } - - return (Qout); -} - -/* makeH -- construct actual Hessenberg matrix */ -MAT *makeH(H,Hout) -MAT *H, *Hout; -{ - int i, j, limit; - - if ( H==(MAT *)NULL ) - error(E_NULL,"makeH"); - if ( H->m != H->n ) - error(E_SQUARE,"makeH"); - Hout = m_resize(Hout,H->m,H->m); - Hout = m_copy(H,Hout); - - limit = H->m; - for ( i = 1; i < limit; i++ ) - for ( j = 0; j < i-1; j++ ) - /* Hout->me[i][j] = 0.0;*/ - m_set_val(Hout,i,j,0.0); - - return (Hout); -} - diff --git a/src/mesch/hsehldr.c b/src/mesch/hsehldr.c deleted file mode 100755 index 81302dc4be..0000000000 --- a/src/mesch/hsehldr.c +++ /dev/null @@ -1,179 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Files for matrix computations - - Householder transformation file. Contains routines for calculating - householder transformations, applying them to vectors and matrices - by both row & column. -*/ - -/* hsehldr.c 1.3 10/8/87 */ -static char rcsid[] = "hsehldr.c,v 1.1 1997/12/04 17:55:24 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -/* hhvec -- calulates Householder vector to eliminate all entries after the - i0 entry of the vector vec. It is returned as out. May be in-situ */ -VEC *hhvec(vec,i0,beta,out,newval) -VEC *vec,*out; -u_int i0; -Real *beta,*newval; -{ - Real norm; - - out = _v_copy(vec,out,i0); - norm = sqrt(_in_prod(out,out,i0)); - if ( norm <= 0.0 ) - { - *beta = 0.0; - return (out); - } - *beta = 1.0/(norm * (norm+fabs(out->ve[i0]))); - if ( out->ve[i0] > 0.0 ) - *newval = -norm; - else - *newval = norm; - out->ve[i0] -= *newval; - - return (out); -} - -/* hhtrvec -- apply Householder transformation to vector -- may be in-situ */ -VEC *hhtrvec(hh,beta,i0,in,out) -VEC *hh,*in,*out; /* hh = Householder vector */ -u_int i0; -double beta; -{ - Real scale; - /* u_int i; */ - - if ( hh==(VEC *)NULL || in==(VEC *)NULL ) - error(E_NULL,"hhtrvec"); - if ( in->dim != hh->dim ) - error(E_SIZES,"hhtrvec"); - if ( i0 > in->dim ) - error(E_BOUNDS,"hhtrvec"); - - scale = beta*_in_prod(hh,in,i0); - out = v_copy(in,out); - __mltadd__(&(out->ve[i0]),&(hh->ve[i0]),-scale,(int)(in->dim-i0)); - /************************************************************ - for ( i=i0; idim; i++ ) - out->ve[i] = in->ve[i] - scale*hh->ve[i]; - ************************************************************/ - - return (out); -} - -/* hhtrrows -- transform a matrix by a Householder vector by rows - starting at row i0 from column j0 -- in-situ */ -MAT *hhtrrows(M,i0,j0,hh,beta) -MAT *M; -u_int i0, j0; -VEC *hh; -double beta; -{ - Real ip, scale; - int i /*, j */; - - if ( M==(MAT *)NULL || hh==(VEC *)NULL ) - error(E_NULL,"hhtrrows"); - if ( M->n != hh->dim ) - error(E_RANGE,"hhtrrows"); - if ( i0 > M->m || j0 > M->n ) - error(E_BOUNDS,"hhtrrows"); - - if ( beta == 0.0 ) return (M); - - /* for each row ... */ - for ( i = i0; i < M->m; i++ ) - { /* compute inner product */ - ip = __ip__(&(M->me[i][j0]),&(hh->ve[j0]),(int)(M->n-j0)); - /************************************************** - ip = 0.0; - for ( j = j0; j < M->n; j++ ) - ip += M->me[i][j]*hh->ve[j]; - **************************************************/ - scale = beta*ip; - if ( scale == 0.0 ) - continue; - - /* do operation */ - __mltadd__(&(M->me[i][j0]),&(hh->ve[j0]),-scale, - (int)(M->n-j0)); - /************************************************** - for ( j = j0; j < M->n; j++ ) - M->me[i][j] -= scale*hh->ve[j]; - **************************************************/ - } - - return (M); -} - - -/* hhtrcols -- transform a matrix by a Householder vector by columns - starting at row i0 from column j0 -- in-situ */ -MAT *hhtrcols(M,i0,j0,hh,beta) -MAT *M; -u_int i0, j0; -VEC *hh; -double beta; -{ - /* Real ip, scale; */ - int i /*, k */; - static VEC *w = VNULL; - - if ( M==(MAT *)NULL || hh==(VEC *)NULL ) - error(E_NULL,"hhtrcols"); - if ( M->m != hh->dim ) - error(E_SIZES,"hhtrcols"); - if ( i0 > M->m || j0 > M->n ) - error(E_BOUNDS,"hhtrcols"); - - if ( beta == 0.0 ) return (M); - - w = v_resize(w,M->n); - MEM_STAT_REG(w,TYPE_VEC); - v_zero(w); - - for ( i = i0; i < M->m; i++ ) - if ( hh->ve[i] != 0.0 ) - __mltadd__(&(w->ve[j0]),&(M->me[i][j0]),hh->ve[i], - (int)(M->n-j0)); - for ( i = i0; i < M->m; i++ ) - if ( hh->ve[i] != 0.0 ) - __mltadd__(&(M->me[i][j0]),&(w->ve[j0]),-beta*hh->ve[i], - (int)(M->n-j0)); - return (M); -} - diff --git a/src/mesch/init.c b/src/mesch/init.c deleted file mode 100755 index 9000eb0fd0..0000000000 --- a/src/mesch/init.c +++ /dev/null @@ -1,299 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This is a file of routines for zero-ing, and initialising - vectors, matrices and permutations. - This is to be included in the matrix.a library -*/ - -static char rcsid[] = "init.c,v 1.1 1997/12/04 17:55:25 hines Exp"; - -#include -#include "matrix.h" - -/* v_zero -- zero the vector x */ -VEC *v_zero(x) -VEC *x; -{ - if ( x == VNULL ) - error(E_NULL,"v_zero"); - - __zero__(x->ve,x->dim); - /* for ( i = 0; i < x->dim; i++ ) - x->ve[i] = 0.0; */ - - return x; -} - - -/* iv_zero -- zero the vector ix */ -IVEC *iv_zero(ix) -IVEC *ix; -{ - int i; - - if ( ix == IVNULL ) - error(E_NULL,"iv_zero"); - - for ( i = 0; i < ix->dim; i++ ) - ix->ive[i] = 0; - - return ix; -} - - -/* m_zero -- zero the matrix A */ -MAT *m_zero(A) -MAT *A; -{ - int i, A_m, A_n; - Real **A_me; - - if ( A == MNULL ) - error(E_NULL,"m_zero"); - - A_m = A->m; A_n = A->n; A_me = A->me; - for ( i = 0; i < A_m; i++ ) - __zero__(A_me[i],A_n); - /* for ( j = 0; j < A_n; j++ ) - A_me[i][j] = 0.0; */ - - return A; -} - -/* mat_id -- set A to being closest to identity matrix as possible - -- i.e. A[i][j] == 1 if i == j and 0 otherwise */ -MAT *m_ident(A) -MAT *A; -{ - int i, size; - - if ( A == MNULL ) - error(E_NULL,"m_ident"); - - m_zero(A); - size = min(A->m,A->n); - for ( i = 0; i < size; i++ ) - A->me[i][i] = 1.0; - - return A; -} - -/* px_ident -- set px to identity permutation */ -PERM *px_ident(px) -PERM *px; -{ - int i, px_size; - u_int *px_pe; - - if ( px == PNULL ) - error(E_NULL,"px_ident"); - - px_size = px->size; px_pe = px->pe; - for ( i = 0; i < px_size; i++ ) - px_pe[i] = i; - - return px; -} - -/* Pseudo random number generator data structures */ -/* Knuth's lagged Fibonacci-based generator: See "Seminumerical Algorithms: - The Art of Computer Programming" sections 3.2-3.3 */ - -#ifdef ANSI_C -#ifndef LONG_MAX -#include -#endif -#endif - -#ifdef LONG_MAX -#define MODULUS LONG_MAX -#else -#define MODULUS 1000000000L /* assuming long's at least 32 bits long */ -#endif -#define MZ 0L - -static long mrand_list[56]; -static int started = FALSE; -static int inext = 0, inextp = 31; - - -/* mrand -- pseudo-random number generator */ -#ifdef ANSI_C -double mrand(void) -#else -double mrand() -#endif -{ - long lval; - static Real factor = 1.0/((Real)MODULUS); - - if ( ! started ) - smrand(3127); - - inext = (inext >= 54) ? 0 : inext+1; - inextp = (inextp >= 54) ? 0 : inextp+1; - - lval = mrand_list[inext]-mrand_list[inextp]; - if ( lval < 0L ) - lval += MODULUS; - mrand_list[inext] = lval; - - return (double)lval*factor; -} - -/* mrandlist -- fills the array a[] with len random numbers */ -void mrandlist(a, len) -Real a[]; -int len; -{ - int i; - long lval; - static Real factor = 1.0/((Real)MODULUS); - - if ( ! started ) - smrand(3127); - - for ( i = 0; i < len; i++ ) - { - inext = (inext >= 54) ? 0 : inext+1; - inextp = (inextp >= 54) ? 0 : inextp+1; - - lval = mrand_list[inext]-mrand_list[inextp]; - if ( lval < 0L ) - lval += MODULUS; - mrand_list[inext] = lval; - - a[i] = (Real)lval*factor; - } -} - - -/* smrand -- set seed for mrand() */ -void smrand(seed) -int seed; -{ - int i; - - mrand_list[0] = (123413*seed) % MODULUS; - for ( i = 1; i < 55; i++ ) - mrand_list[i] = (123413*mrand_list[i-1]) % MODULUS; - - started = TRUE; - - /* run mrand() through the list sufficient times to - thoroughly randomise the array */ - for ( i = 0; i < 55*55; i++ ) - mrand(); -} -#undef MODULUS -#undef MZ -#undef FAC - -/* v_rand -- initialises x to be a random vector, components - independently & uniformly ditributed between 0 and 1 */ -VEC *v_rand(x) -VEC *x; -{ - /* int i; */ - - if ( ! x ) - error(E_NULL,"v_rand"); - - /* for ( i = 0; i < x->dim; i++ ) */ - /* x->ve[i] = rand()/((Real)MAX_RAND); */ - /* x->ve[i] = mrand(); */ - mrandlist(x->ve,x->dim); - - return x; -} - -/* m_rand -- initialises A to be a random vector, components - independently & uniformly distributed between 0 and 1 */ -MAT *m_rand(A) -MAT *A; -{ - int i /* , j */; - - if ( ! A ) - error(E_NULL,"m_rand"); - - for ( i = 0; i < A->m; i++ ) - /* for ( j = 0; j < A->n; j++ ) */ - /* A->me[i][j] = rand()/((Real)MAX_RAND); */ - /* A->me[i][j] = mrand(); */ - mrandlist(A->me[i],A->n); - - return A; -} - -/* v_ones -- fills x with one's */ -VEC *v_ones(x) -VEC *x; -{ - int i; - - if ( ! x ) - error(E_NULL,"v_ones"); - - for ( i = 0; i < x->dim; i++ ) - x->ve[i] = 1.0; - - return x; -} - -/* m_ones -- fills matrix with one's */ -MAT *m_ones(A) -MAT *A; -{ - int i, j; - - if ( ! A ) - error(E_NULL,"m_ones"); - - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < A->n; j++ ) - A->me[i][j] = 1.0; - - return A; -} - -/* v_count -- initialises x so that x->ve[i] == i */ -VEC *v_count(x) -VEC *x; -{ - int i; - - if ( ! x ) - error(E_NULL,"v_count"); - - for ( i = 0; i < x->dim; i++ ) - x->ve[i] = (Real)i; - - return x; -} diff --git a/src/mesch/iter.h b/src/mesch/iter.h deleted file mode 100755 index b86d049326..0000000000 --- a/src/mesch/iter.h +++ /dev/null @@ -1,248 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* iter.h 14/09/93 */ - -/* - - Structures for iterative methods - -*/ - -#ifndef ITERHH - -#define ITERHH - -/* RCS id: iter.h,v 1.1 1997/11/03 16:15:49 hines Exp */ - - -#include "sparse.h" - - -/* basic structure for iterative methods */ - -/* type Fun_Ax for functions to get y = A*x */ -#ifdef ANSI_C -typedef VEC *(*Fun_Ax)(void *,VEC *,VEC *); -#else -typedef VEC *(*Fun_Ax)(); -#endif - - -/* type ITER */ -typedef struct Iter_data { - int shared_x; /* if TRUE then x is shared and it will not be free'd */ - int shared_b; /* if TRUE then b is shared and it will not be free'd */ - unsigned k; /* no. of direction (search) vectors; =0 - none */ - int limit; /* upper bound on the no. of iter. steps */ - int steps; /* no. of iter. steps done */ - Real eps; /* accuracy required */ - - VEC *x; /* input: initial guess; - output: approximate solution */ - VEC *b; /* right hand side of the equation A*x = b */ - - Fun_Ax Ax; /* function computing y = A*x */ - void *A_par; /* parameters for Ax */ - - Fun_Ax ATx; /* function computing y = A^T*x; - T = transpose */ - void *AT_par; /* parameters for ATx */ - - Fun_Ax Bx; /* function computing y = B*x; B - preconditioner */ - void *B_par; /* parameters for Bx */ - -#ifdef ANSI_C - -#ifdef PROTOTYPES_IN_STRUCT - void (*info)(struct Iter_data *, double, VEC *,VEC *); - /* function giving some information for a user; - nres - a norm of a residual res */ - - int (*stop_crit)(struct Iter_data *, double, VEC *,VEC *); - /* stopping criterion: - nres - a norm of res; - res - residual; - if returned value == TRUE then stop; - if returned value == FALSE then continue; */ -#else - void (*info)(); - int (*stop_crit)(); -#endif /* PROTOTYPES_IN_STRUCT */ - -#else - - void (*info)(); - /* function giving some information for a user */ - - int (*stop_crit)(); - /* stopping criterion: - if returned value == TRUE then stop; - if returned value == FALSE then continue; */ - -#endif /* ANSI_C */ - - Real init_res; /* the norm of the initial residual */ - -} ITER; - - -#define INULL (ITER *)NULL - -/* type Fun_info */ -#ifdef ANSI_C -typedef void (*Fun_info)(ITER *, double, VEC *,VEC *); -#else -typedef void (*Fun_info)(); -#endif - -/* type Fun_stp_crt */ -#ifdef ANSI_C -typedef int (*Fun_stp_crt)(ITER *, double, VEC *,VEC *); -#else -typedef int (*Fun_stp_crt)(); -#endif - - - -/* macros */ -/* default values */ - -#define ITER_LIMIT_DEF 1000 -#define ITER_EPS_DEF 1e-6 - -/* other macros */ - -/* set ip->Ax=fun and ip->A_par=fun_par */ -#define iter_Ax(ip,fun,fun_par) \ - (ip->Ax=(Fun_Ax)(fun),ip->A_par=(void *)(fun_par),0) -#define iter_ATx(ip,fun,fun_par) \ - (ip->ATx=(Fun_Ax)(fun),ip->AT_par=(void *)(fun_par),0) -#define iter_Bx(ip,fun,fun_par) \ - (ip->Bx=(Fun_Ax)(fun),ip->B_par=(void *)(fun_par),0) - -/* save free macro */ -#define ITER_FREE(ip) (iter_free(ip), (ip)=(ITER *)NULL) - - -/* prototypes from iter0.c */ - -#ifdef ANSI_C -/* standard information */ -void iter_std_info(ITER *ip,double nres,VEC *res,VEC *Bres); -/* standard stopping criterion */ -int iter_std_stop_crit(ITER *ip, double nres, VEC *res,VEC *Bres); - -/* get, resize and free ITER variable */ -ITER *iter_get(int lenb, int lenx); -ITER *iter_resize(ITER *ip,int lenb,int lenx); -int iter_free(ITER *ip); - -void iter_dump(FILE *fp,ITER *ip); - -/* copy ip1 to ip2 copying also elements of x and b */ -ITER *iter_copy(ITER *ip1, ITER *ip2); -/* copy ip1 to ip2 without copying elements of x and b */ -ITER *iter_copy2(ITER *ip1,ITER *ip2); - -/* functions for generating sparse matrices with random elements */ -SPMAT *iter_gen_sym(int n, int nrow); -SPMAT *iter_gen_nonsym(int m,int n,int nrow,double diag); -SPMAT *iter_gen_nonsym_posdef(int n,int nrow); - -#else - -void iter_std_info(); -int iter_std_stop_crit(); -ITER *iter_get(); -int iter_free(); -ITER *iter_resize(); -void iter_dump(); -ITER *iter_copy(); -ITER *iter_copy2(); -SPMAT *iter_gen_sym(); -SPMAT *iter_gen_nonsym(); -SPMAT *iter_gen_nonsym_posdef(); - -#endif - -/* prototypes from iter.c */ - -/* different iterative procedures */ -#ifdef ANSI_C -VEC *iter_cg(ITER *ip); -VEC *iter_cg1(ITER *ip); -VEC *iter_spcg(SPMAT *A,SPMAT *LLT,VEC *b,double eps,VEC *x,int limit, - int *steps); -VEC *iter_cgs(ITER *ip,VEC *r0); -VEC *iter_spcgs(SPMAT *A,SPMAT *B,VEC *b,VEC *r0,double eps,VEC *x, - int limit, int *steps); -VEC *iter_lsqr(ITER *ip); -VEC *iter_splsqr(SPMAT *A,VEC *b,double tol,VEC *x, - int limit,int *steps); -VEC *iter_gmres(ITER *ip); -VEC *iter_spgmres(SPMAT *A,SPMAT *B,VEC *b,double tol,VEC *x,int k, - int limit, int *steps); -MAT *iter_arnoldi_iref(ITER *ip,Real *h,MAT *Q,MAT *H); -MAT *iter_arnoldi(ITER *ip,Real *h,MAT *Q,MAT *H); -MAT *iter_sparnoldi(SPMAT *A,VEC *x0,int k,Real *h,MAT *Q,MAT *H); -VEC *iter_mgcr(ITER *ip); -VEC *iter_spmgcr(SPMAT *A,SPMAT *B,VEC *b,double tol,VEC *x,int k, - int limit, int *steps); -void iter_lanczos(ITER *ip,VEC *a,VEC *b,Real *beta2,MAT *Q); -void iter_splanczos(SPMAT *A,int m,VEC *x0,VEC *a,VEC *b,Real *beta2, - MAT *Q); -VEC *iter_lanczos2(ITER *ip,VEC *evals,VEC *err_est); -VEC *iter_splanczos2(SPMAT *A,int m,VEC *x0,VEC *evals,VEC *err_est); -VEC *iter_cgne(ITER *ip); -VEC *iter_spcgne(SPMAT *A,SPMAT *B,VEC *b,double eps,VEC *x, - int limit,int *steps); -#else -VEC *iter_cg(); -VEC *iter_cg1(); -VEC *iter_spcg(); -VEC *iter_cgs(); -VEC *iter_spcgs(); -VEC *iter_lsqr(); -VEC *iter_splsqr(); -VEC *iter_gmres(); -VEC *iter_spgmres(); -MAT *iter_arnoldi_iref(); -MAT *iter_arnoldi(); -MAT *iter_sparnoldi(); -VEC *iter_mgcr(); -VEC *iter_spmgcr(); -void iter_lanczos(); -void iter_splanczos(); -VEC *iter_lanczos2(); -VEC *iter_splanczos2(); -VEC *iter_cgne(); -VEC *iter_spcgne(); - -#endif - - -#endif /* ITERHH */ diff --git a/src/mesch/iter0.c b/src/mesch/iter0.c deleted file mode 100755 index cf0521b002..0000000000 --- a/src/mesch/iter0.c +++ /dev/null @@ -1,382 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* iter0.c 14/09/93 */ - -/* ITERATIVE METHODS - service functions */ - -/* functions for creating and releasing ITER structures; - for memory information; - for getting some values from an ITER variable; - for changing values in an ITER variable; - see also iter.c -*/ - -#include -#include "iter.h" -#include - - -static char rcsid[] = "iter0.c,v 1.1 1997/12/04 17:55:26 hines Exp"; - - -/* standard functions */ - -/* standard information */ -void iter_std_info(ip,nres,res,Bres) -ITER *ip; -double nres; -VEC *res, *Bres; -{ - if (nres >= 0.0) - printf(" %d. residual = %g\n",ip->steps,nres); - else - printf(" %d. residual = %g (WARNING !!! should be >= 0) \n", - ip->steps,nres); -} - -/* standard stopping criterion */ -int iter_std_stop_crit(ip, nres, res, Bres) -ITER *ip; -double nres; -VEC *res, *Bres; -{ - /* standard stopping criterium */ - if (nres <= ip->init_res*ip->eps) return TRUE; - return FALSE; -} - - -/* iter_get - create a new structure pointing to ITER */ - -ITER *iter_get(lenb, lenx) -int lenb, lenx; -{ - ITER *ip; - - if ((ip = NEW(ITER)) == (ITER *) NULL) - error(E_MEM,"iter_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ITER,0,sizeof(ITER)); - mem_numvar(TYPE_ITER,1); - } - - /* default values */ - - ip->shared_x = FALSE; - ip->shared_b = FALSE; - ip->k = 0; - ip->limit = ITER_LIMIT_DEF; - ip->eps = ITER_EPS_DEF; - ip->steps = 0; - - if (lenb > 0) ip->b = v_get(lenb); - else ip->b = (VEC *)NULL; - - if (lenx > 0) ip->x = v_get(lenx); - else ip->x = (VEC *)NULL; - - ip->Ax = (Fun_Ax) NULL; - ip->A_par = NULL; - ip->ATx = (Fun_Ax) NULL; - ip->AT_par = NULL; - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - ip->info = iter_std_info; - ip->stop_crit = iter_std_stop_crit; - ip->init_res = 0.0; - - return ip; -} - - -/* iter_free - release memory */ -int iter_free(ip) -ITER *ip; -{ - if (ip == (ITER *)NULL) return -1; - - if (mem_info_is_on()) { - mem_bytes(TYPE_ITER,sizeof(ITER),0); - mem_numvar(TYPE_ITER,-1); - } - - if ( !ip->shared_x && ip->x != NULL ) v_free(ip->x); - if ( !ip->shared_b && ip->b != NULL ) v_free(ip->b); - - free((char *)ip); - - return 0; -} - -ITER *iter_resize(ip,new_lenb,new_lenx) -ITER *ip; -int new_lenb, new_lenx; -{ - VEC *old; - - if ( ip == (ITER *) NULL) - error(E_NULL,"iter_resize"); - - old = ip->x; - ip->x = v_resize(ip->x,new_lenx); - if ( ip->shared_x && old != ip->x ) - warning(WARN_SHARED_VEC,"iter_resize"); - old = ip->b; - ip->b = v_resize(ip->b,new_lenb); - if ( ip->shared_b && old != ip->b ) - warning(WARN_SHARED_VEC,"iter_resize"); - - return ip; -} - - -/* print out ip structure - for diagnostic purposes mainly */ -void iter_dump(fp,ip) -ITER *ip; -FILE *fp; -{ - if (ip == NULL) { - fprintf(fp," ITER structure: NULL\n"); - return; - } - - fprintf(fp,"\n ITER structure:\n"); - fprintf(fp," ip->shared_x = %s, ip->shared_b = %s\n", - (ip->shared_x ? "TRUE" : "FALSE"), - (ip->shared_b ? "TRUE" : "FALSE") ); - fprintf(fp," ip->k = %d, ip->limit = %d, ip->steps = %d, ip->eps = %g\n", - ip->k,ip->limit,ip->steps,ip->eps); - fprintf(fp," ip->x = 0x%p, ip->b = 0x%p\n",ip->x,ip->b); - fprintf(fp," ip->Ax = 0x%p, ip->A_par = 0x%p\n",ip->Ax,ip->A_par); - fprintf(fp," ip->ATx = 0x%p, ip->AT_par = 0x%p\n",ip->ATx,ip->AT_par); - fprintf(fp," ip->Bx = 0x%p, ip->B_par = 0x%p\n",ip->Bx,ip->B_par); - fprintf(fp," ip->info = 0x%p, ip->stop_crit = 0x%p, ip->init_res = %g\n", - ip->info,ip->stop_crit,ip->init_res); - fprintf(fp,"\n"); - -} - - -/* copy the structure ip1 to ip2 preserving vectors x and b of ip2 - (vectors x and b in ip2 are the same before and after iter_copy2) - if ip2 == NULL then a new structure is created with x and b being NULL - and other members are taken from ip1 -*/ -ITER *iter_copy2(ip1,ip2) -ITER *ip1, *ip2; -{ - VEC *x, *b; - int shx, shb; - - if (ip1 == (ITER *)NULL) - error(E_NULL,"iter_copy2"); - - if (ip2 == (ITER *)NULL) { - if ((ip2 = NEW(ITER)) == (ITER *) NULL) - error(E_MEM,"iter_copy2"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ITER,0,sizeof(ITER)); - mem_numvar(TYPE_ITER,1); - } - ip2->x = ip2->b = NULL; - ip2->shared_x = ip2->shared_x = FALSE; - } - - x = ip2->x; - b = ip2->b; - shb = ip2->shared_b; - shx = ip2->shared_x; - MEM_COPY(ip1,ip2,sizeof(ITER)); - ip2->x = x; - ip2->b = b; - ip2->shared_x = shx; - ip2->shared_b = shb; - - return ip2; -} - - -/* copy the structure ip1 to ip2 copying also the vectors x and b */ -ITER *iter_copy(ip1,ip2) -ITER *ip1, *ip2; -{ - VEC *x, *b; - - if (ip1 == (ITER *)NULL) - error(E_NULL,"iter_copy"); - - if (ip2 == (ITER *)NULL) { - if ((ip2 = NEW(ITER)) == (ITER *) NULL) - error(E_MEM,"iter_copy2"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ITER,0,sizeof(ITER)); - mem_numvar(TYPE_ITER,1); - } - } - - x = ip2->x; - b = ip2->b; - - MEM_COPY(ip1,ip2,sizeof(ITER)); - if (ip1->x) - ip2->x = v_copy(ip1->x,x); - if (ip1->b) - ip2->b = v_copy(ip1->b,b); - - ip2->shared_x = ip2->shared_b = FALSE; - - return ip2; -} - - -/*** functions to generate sparse matrices with random entries ***/ - - -/* iter_gen_sym -- generate symmetric positive definite - n x n matrix, - nrow - number of nonzero entries in a row - */ -SPMAT *iter_gen_sym(n,nrow) -int n, nrow; -{ - SPMAT *A; - VEC *u; - Real s1; - int i, j, k, k_max; - - if (nrow <= 1) nrow = 2; - /* nrow should be even */ - if ((nrow & 1)) nrow -= 1; - A = sp_get(n,n,nrow); - u = v_get(A->m); - v_zero(u); - for ( i = 0; i < A->m; i++ ) - { - k_max = ((rand() >> 8) % (nrow/2)); - for ( k = 0; k <= k_max; k++ ) - { - j = (rand() >> 8) % A->n; - s1 = mrand(); - sp_set_val(A,i,j,s1); - sp_set_val(A,j,i,s1); - u->ve[i] += fabs(s1); - u->ve[j] += fabs(s1); - } - } - /* ensure that A is positive definite */ - for ( i = 0; i < A->m; i++ ) - sp_set_val(A,i,i,u->ve[i] + 1.0); - - V_FREE(u); - return A; -} - - -/* iter_gen_nonsym -- generate non-symmetric m x n sparse matrix, m >= n - nrow - number of entries in a row; - diag - number which is put in diagonal entries and then permuted - (if diag is zero then 1.0 is there) -*/ -SPMAT *iter_gen_nonsym(m,n,nrow,diag) -int m, n, nrow; -double diag; -{ - SPMAT *A; - PERM *px; - int i, j, k, k_max; - Real s1; - - if (nrow <= 1) nrow = 2; - if (diag == 0.0) diag = 1.0; - A = sp_get(m,n,nrow); - px = px_get(n); - for ( i = 0; i < A->m; i++ ) - { - k_max = (rand() >> 8) % (nrow-1); - for ( k = 0; k <= k_max; k++ ) - { - j = (rand() >> 8) % A->n; - s1 = mrand(); - sp_set_val(A,i,j,-s1); - } - } - /* to make it likely that A is nonsingular, use pivot... */ - for ( i = 0; i < 2*A->n; i++ ) - { - j = (rand() >> 8) % A->n; - k = (rand() >> 8) % A->n; - px_transp(px,j,k); - } - for ( i = 0; i < A->n; i++ ) - sp_set_val(A,i,px->pe[i],diag); - - PX_FREE(px); - return A; -} - - -/* iter_gen_nonsym -- generate non-symmetric positive definite - n x n sparse matrix; - nrow - number of entries in a row -*/ -SPMAT *iter_gen_nonsym_posdef(n,nrow) -int n, nrow; -{ - SPMAT *A; - PERM *px; - VEC *u; - int i, j, k, k_max; - Real s1; - - if (nrow <= 1) nrow = 2; - A = sp_get(n,n,nrow); - px = px_get(n); - u = v_get(A->m); - v_zero(u); - for ( i = 0; i < A->m; i++ ) - { - k_max = (rand() >> 8) % (nrow-1); - for ( k = 0; k <= k_max; k++ ) - { - j = (rand() >> 8) % A->n; - s1 = mrand(); - sp_set_val(A,i,j,-s1); - u->ve[i] += fabs(s1); - } - } - /* ensure that A is positive definite */ - for ( i = 0; i < A->m; i++ ) - sp_set_val(A,i,i,u->ve[i] + 1.0); - - PX_FREE(px); - V_FREE(u); - return A; -} - - - diff --git a/src/mesch/iternsym.c b/src/mesch/iternsym.c deleted file mode 100755 index 0fa5d3e4ab..0000000000 --- a/src/mesch/iternsym.c +++ /dev/null @@ -1,1288 +0,0 @@ -#include <../../nrnconf.h> - - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* iter.c 17/09/93 */ - -/* - ITERATIVE METHODS - implementation of several iterative methods; - see also iter0.c -*/ - -#include -#include "matrix.h" -#include "matrix2.h" -#include "sparse.h" -#include "iter.h" -#include - -static char rcsid[] = "iternsym.c,v 1.1 1997/12/04 17:55:27 hines Exp"; - - -#ifdef ANSI_C -VEC *spCHsolve(SPMAT *,VEC *,VEC *); -#else -VEC *spCHsolve(); -#endif - - -/* - iter_cgs -- uses CGS to compute a solution x to A.x=b -*/ - -VEC *iter_cgs(ip,r0) -ITER *ip; -VEC *r0; -{ - static VEC *p = VNULL, *q = VNULL, *r = VNULL, *u = VNULL; - static VEC *v = VNULL, *z = VNULL; - VEC *tmp; - Real alpha, beta, nres, rho, old_rho, sigma, inner; - - if (ip == INULL) - error(E_NULL,"iter_cgs"); - if (!ip->Ax || !ip->b || !r0) - error(E_NULL,"iter_cgs"); - if ( ip->x == ip->b ) - error(E_INSITU,"iter_cgs"); - if (!ip->stop_crit) - error(E_NULL,"iter_cgs"); - if ( r0->dim != ip->b->dim ) - error(E_SIZES,"iter_cgs"); - - if ( ip->eps <= 0.0 ) ip->eps = MACHEPS; - - p = v_resize(p,ip->b->dim); - q = v_resize(q,ip->b->dim); - r = v_resize(r,ip->b->dim); - u = v_resize(u,ip->b->dim); - v = v_resize(v,ip->b->dim); - - MEM_STAT_REG(p,TYPE_VEC); - MEM_STAT_REG(q,TYPE_VEC); - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(v,TYPE_VEC); - - if (ip->Bx) { - z = v_resize(z,ip->b->dim); - MEM_STAT_REG(z,TYPE_VEC); - } - - if (ip->x != VNULL) { - if (ip->x->dim != ip->b->dim) - error(E_SIZES,"iter_cgs"); - ip->Ax(ip->A_par,ip->x,v); /* v = A*x */ - if (ip->Bx) { - v_sub(ip->b,v,v); /* v = b - A*x */ - (ip->Bx)(ip->B_par,v,r); /* r = B*(b-A*x) */ - } - else v_sub(ip->b,v,r); /* r = b-A*x */ - } - else { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); /* x == 0 */ - ip->shared_x = FALSE; - if (ip->Bx) (ip->Bx)(ip->B_par,ip->b,r); /* r = B*b */ - else v_copy(ip->b,r); /* r = b */ - } - - v_zero(p); - v_zero(q); - old_rho = 1.0; - - for (ip->steps = 0; ip->steps <= ip->limit; ip->steps++) { - - inner = in_prod(r,r); - nres = sqrt(fabs(inner)); - if (ip->steps == 0) ip->init_res = nres; - - if (ip->info) ip->info(ip,nres,r,VNULL); - if ( ip->stop_crit(ip,nres,r,VNULL) ) break; - - rho = in_prod(r0,r); - if ( old_rho == 0.0 ) - error(E_BREAKDOWN,"iter_cgs"); - beta = rho/old_rho; - v_mltadd(r,q,beta,u); - v_mltadd(q,p,beta,v); - v_mltadd(u,v,beta,p); - - (ip->Ax)(ip->A_par,p,q); - if (ip->Bx) { - (ip->Bx)(ip->B_par,q,z); - tmp = z; - } - else tmp = q; - - sigma = in_prod(r0,tmp); - if ( sigma == 0.0 ) - error(E_BREAKDOWN,"iter_cgs"); - alpha = rho/sigma; - v_mltadd(u,tmp,-alpha,q); - v_add(u,q,v); - - (ip->Ax)(ip->A_par,v,u); - if (ip->Bx) { - (ip->Bx)(ip->B_par,u,z); - tmp = z; - } - else tmp = u; - - v_mltadd(r,tmp,-alpha,r); - v_mltadd(ip->x,v,alpha,ip->x); - - old_rho = rho; - } - - return ip->x; -} - - - -/* iter_spcgs -- simple interface for SPMAT data structures - use always as follows: - x = iter_spcgs(A,B,b,r0,tol,x,limit,steps); - or - x = iter_spcgs(A,B,b,r0,tol,VNULL,limit,steps); - In the second case the solution vector is created. - If B is not NULL then it is a preconditioner. -*/ -VEC *iter_spcgs(A,B,b,r0,tol,x,limit,steps) -SPMAT *A, *B; -VEC *b, *r0, *x; -double tol; -int *steps,limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - if (B) { - ip->Bx = (Fun_Ax) sp_mv_mlt; - ip->B_par = (void *) B; - } - else { - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - } - ip->info = (Fun_info) NULL; - ip->limit = limit; - ip->b = b; - ip->eps = tol; - ip->x = x; - iter_cgs(ip,r0); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; - -} - -/* - Routine for performing LSQR -- the least squares QR algorithm - of Paige and Saunders: - "LSQR: an algorithm for sparse linear equations and - sparse least squares", ACM Trans. Math. Soft., v. 8 - pp. 43--71 (1982) - */ -/* lsqr -- sparse CG-like least squares routine: - -- finds min_x ||A.x-b||_2 using A defined through A & AT - -- returns x (if x != NULL) */ -VEC *iter_lsqr(ip) -ITER *ip; -{ - static VEC *u = VNULL, *v = VNULL, *w = VNULL, *tmp = VNULL; - Real alpha, beta, phi, phi_bar; - Real rho, rho_bar, rho_max, theta, nres; - Real s, c; /* for Givens' rotations */ - int m, n; - - if ( ! ip || ! ip->b || !ip->Ax || !ip->ATx ) - error(E_NULL,"iter_lsqr"); - if ( ip->x == ip->b ) - error(E_INSITU,"iter_lsqr"); - if (!ip->stop_crit || !ip->x) - error(E_NULL,"iter_lsqr"); - - if ( ip->eps <= 0.0 ) ip->eps = MACHEPS; - - m = ip->b->dim; - n = ip->x->dim; - - u = v_resize(u,(u_int)m); - v = v_resize(v,(u_int)n); - w = v_resize(w,(u_int)n); - tmp = v_resize(tmp,(u_int)n); - - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(v,TYPE_VEC); - MEM_STAT_REG(w,TYPE_VEC); - MEM_STAT_REG(tmp,TYPE_VEC); - - if (ip->x != VNULL) { - ip->Ax(ip->A_par,ip->x,u); /* u = A*x */ - v_sub(ip->b,u,u); /* u = b-A*x */ - } - else { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - v_copy(ip->b,u); /* u = b */ - } - - beta = v_norm2(u); - if ( beta == 0.0 ) return ip->x; - - sv_mlt(1.0/beta,u,u); - (ip->ATx)(ip->AT_par,u,v); - alpha = v_norm2(v); - if ( alpha == 0.0 ) return ip->x; - - sv_mlt(1.0/alpha,v,v); - v_copy(v,w); - phi_bar = beta; - rho_bar = alpha; - - rho_max = 1.0; - for (ip->steps = 0; ip->steps <= ip->limit; ip->steps++) { - - tmp = v_resize(tmp,m); - (ip->Ax)(ip->A_par,v,tmp); - - v_mltadd(tmp,u,-alpha,u); - beta = v_norm2(u); - sv_mlt(1.0/beta,u,u); - - tmp = v_resize(tmp,n); - (ip->ATx)(ip->AT_par,u,tmp); - v_mltadd(tmp,v,-beta,v); - alpha = v_norm2(v); - sv_mlt(1.0/alpha,v,v); - - rho = sqrt(rho_bar*rho_bar+beta*beta); - if ( rho > rho_max ) - rho_max = rho; - c = rho_bar/rho; - s = beta/rho; - theta = s*alpha; - rho_bar = -c*alpha; - phi = c*phi_bar; - phi_bar = s*phi_bar; - - /* update ip->x & w */ - if ( rho == 0.0 ) - error(E_BREAKDOWN,"iter_lsqr"); - v_mltadd(ip->x,w,phi/rho,ip->x); - v_mltadd(v,w,-theta/rho,w); - - nres = fabs(phi_bar*alpha*c)*rho_max; - - if (ip->info) ip->info(ip,nres,w,VNULL); - if (ip->steps == 0) ip->init_res = nres; - if ( ip->stop_crit(ip,nres,w,VNULL) ) break; - } - - return ip->x; -} - -/* iter_splsqr -- simple interface for SPMAT data structures */ -VEC *iter_splsqr(A,b,tol,x,limit,steps) -SPMAT *A; -VEC *b, *x; -double tol; -int *steps,limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - ip->ATx = (Fun_Ax) sp_vm_mlt; - ip->AT_par = (void *) A; - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - - ip->info = (Fun_info) NULL; - ip->limit = limit; - ip->b = b; - ip->eps = tol; - ip->x = x; - iter_lsqr(ip); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; -} - - - -/* iter_arnoldi -- an implementation of the Arnoldi method; - iterative refinement is applied. -*/ -MAT *iter_arnoldi_iref(ip,h_rem,Q,H) -ITER *ip; -Real *h_rem; -MAT *Q, *H; -{ - static VEC *u=VNULL, *r=VNULL, *s=VNULL, *tmp=VNULL; - VEC v; /* auxiliary vector */ - int i,j; - Real h_val, c; - - if (ip == INULL) - error(E_NULL,"iter_arnoldi_iref"); - if ( ! ip->Ax || ! Q || ! ip->x ) - error(E_NULL,"iter_arnoldi_iref"); - if ( ip->k <= 0 ) - error(E_BOUNDS,"iter_arnoldi_iref"); - if ( Q->n != ip->x->dim || Q->m != ip->k ) - error(E_SIZES,"iter_arnoldi_iref"); - - m_zero(Q); - H = m_resize(H,ip->k,ip->k); - m_zero(H); - - u = v_resize(u,ip->x->dim); - r = v_resize(r,ip->k); - s = v_resize(s,ip->k); - tmp = v_resize(tmp,ip->x->dim); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(s,TYPE_VEC); - MEM_STAT_REG(tmp,TYPE_VEC); - - v.dim = v.max_dim = ip->x->dim; - - c = v_norm2(ip->x); - if ( c <= 0.0) - return H; - else { - v.ve = Q->me[0]; - sv_mlt(1.0/c,ip->x,&v); - } - - v_zero(r); - v_zero(s); - for ( i = 0; i < ip->k; i++ ) - { - v.ve = Q->me[i]; - u = (ip->Ax)(ip->A_par,&v,u); - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - /* modified Gram-Schmidt */ - r->ve[j] = in_prod(&v,u); - v_mltadd(u,&v,-r->ve[j],u); - } - h_val = v_norm2(u); - /* if u == 0 then we have an exact subspace */ - if ( h_val <= 0.0 ) - { - *h_rem = h_val; - return H; - } - /* iterative refinement -- ensures near orthogonality */ - do { - v_zero(tmp); - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - s->ve[j] = in_prod(&v,u); - v_mltadd(tmp,&v,s->ve[j],tmp); - } - v_sub(u,tmp,u); - v_add(r,s,r); - } while ( v_norm2(s) > 0.1*(h_val = v_norm2(u)) ); - /* now that u is nearly orthogonal to Q, update H */ - set_col(H,i,r); - /* check once again if h_val is zero */ - if ( h_val <= 0.0 ) - { - *h_rem = h_val; - return H; - } - if ( i == ip->k-1 ) - { - *h_rem = h_val; - continue; - } - /* H->me[i+1][i] = h_val; */ - m_set_val(H,i+1,i,h_val); - v.ve = Q->me[i+1]; - sv_mlt(1.0/h_val,u,&v); - } - - return H; -} - -/* iter_arnoldi -- an implementation of the Arnoldi method; - modified Gram-Schmidt algorithm -*/ -MAT *iter_arnoldi(ip,h_rem,Q,H) -ITER *ip; -Real *h_rem; -MAT *Q, *H; -{ - static VEC *u=VNULL, *r=VNULL; - VEC v; /* auxiliary vector */ - int i,j; - Real h_val, c; - - if (ip == INULL) - error(E_NULL,"iter_arnoldi"); - if ( ! ip->Ax || ! Q || ! ip->x ) - error(E_NULL,"iter_arnoldi"); - if ( ip->k <= 0 ) - error(E_BOUNDS,"iter_arnoldi"); - if ( Q->n != ip->x->dim || Q->m != ip->k ) - error(E_SIZES,"iter_arnoldi"); - - m_zero(Q); - H = m_resize(H,ip->k,ip->k); - m_zero(H); - - u = v_resize(u,ip->x->dim); - r = v_resize(r,ip->k); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(r,TYPE_VEC); - - v.dim = v.max_dim = ip->x->dim; - - c = v_norm2(ip->x); - if ( c <= 0.0) - return H; - else { - v.ve = Q->me[0]; - sv_mlt(1.0/c,ip->x,&v); - } - - v_zero(r); - for ( i = 0; i < ip->k; i++ ) - { - v.ve = Q->me[i]; - u = (ip->Ax)(ip->A_par,&v,u); - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - /* modified Gram-Schmidt */ - r->ve[j] = in_prod(&v,u); - v_mltadd(u,&v,-r->ve[j],u); - } - h_val = v_norm2(u); - /* if u == 0 then we have an exact subspace */ - if ( h_val <= 0.0 ) - { - *h_rem = h_val; - return H; - } - set_col(H,i,r); - if ( i == ip->k-1 ) - { - *h_rem = h_val; - continue; - } - /* H->me[i+1][i] = h_val; */ - m_set_val(H,i+1,i,h_val); - v.ve = Q->me[i+1]; - sv_mlt(1.0/h_val,u,&v); - } - - return H; -} - - - -/* iter_sparnoldi -- uses arnoldi() with an explicit representation of A */ -MAT *iter_sparnoldi(A,x0,m,h_rem,Q,H) -SPMAT *A; -VEC *x0; -int m; -Real *h_rem; -MAT *Q, *H; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - ip->x = x0; - ip->k = m; - iter_arnoldi_iref(ip,h_rem,Q,H); - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return H; -} - - -/* for testing gmres */ -static void test_gmres(ip,i,Q,R,givc,givs,h_val) -ITER *ip; -int i; -MAT *Q, *R; -VEC *givc, *givs; -double h_val; -{ - VEC vt, vt1; - static MAT *Q1, *R1; - int j; - - /* test Q*A*Q^T = R */ - - Q = m_resize(Q,i+1,ip->b->dim); - Q1 = m_resize(Q1,i+1,ip->b->dim); - R1 = m_resize(R1,i+1,i+1); - MEM_STAT_REG(Q1,TYPE_MAT); - MEM_STAT_REG(R1,TYPE_MAT); - - vt.dim = vt.max_dim = ip->b->dim; - vt1.dim = vt1.max_dim = ip->b->dim; - for (j=0; j <= i; j++) { - vt.ve = Q->me[j]; - vt1.ve = Q1->me[j]; - ip->Ax(ip->A_par,&vt,&vt1); - } - - mmtr_mlt(Q,Q1,R1); - R1 = m_resize(R1,i+2,i+1); - for (j=0; j < i; j++) - R1->me[i+1][j] = 0.0; - R1->me[i+1][i] = h_val; - - for (j = 0; j <= i; j++) { - rot_rows(R1,j,j+1,givc->ve[j],givs->ve[j],R1); - } - - R1 = m_resize(R1,i+1,i+1); - m_sub(R,R1,R1); - /* if (m_norm_inf(R1) > MACHEPS*ip->b->dim) */ - printf(" %d. ||Q*A*Q^T - H|| = %g [cf. MACHEPS = %g]\n", - ip->steps,m_norm_inf(R1),MACHEPS); - - /* check Q*Q^T = I */ - - Q = m_resize(Q,i+1,ip->b->dim); - mmtr_mlt(Q,Q,R1); - for (j=0; j <= i; j++) - R1->me[j][j] -= 1.0; - if (m_norm_inf(R1) > MACHEPS*ip->b->dim) - printf(" ! m_norm_inf(Q*Q^T) = %g\n",m_norm_inf(R1)); - -} - - -/* gmres -- generalised minimum residual algorithm of Saad & Schultz - SIAM J. Sci. Stat. Comp. v.7, pp.856--869 (1986) -*/ -VEC *iter_gmres(ip) -ITER *ip; -{ - static VEC *u=VNULL, *r=VNULL, *rhs = VNULL; - static VEC *givs=VNULL, *givc=VNULL, *z = VNULL; - static MAT *Q = MNULL, *R = MNULL; - VEC *rr, v, v1; /* additional pointers (not real vectors) */ - int i,j, done; - Real nres; -/* Real last_h; */ - - if (ip == INULL) - error(E_NULL,"iter_gmres"); - if ( ! ip->Ax || ! ip->b ) - error(E_NULL,"iter_gmres"); - if ( ! ip->stop_crit ) - error(E_NULL,"iter_gmres"); - if ( ip->k <= 0 ) - error(E_BOUNDS,"iter_gmres"); - if (ip->x != VNULL && ip->x->dim != ip->b->dim) - error(E_SIZES,"iter_gmres"); - if (ip->eps <= 0.0) ip->eps = MACHEPS; - - r = v_resize(r,ip->k+1); - u = v_resize(u,ip->b->dim); - rhs = v_resize(rhs,ip->k+1); - givs = v_resize(givs,ip->k); /* Givens rotations */ - givc = v_resize(givc,ip->k); - - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(u,TYPE_VEC); - MEM_STAT_REG(rhs,TYPE_VEC); - MEM_STAT_REG(givs,TYPE_VEC); - MEM_STAT_REG(givc,TYPE_VEC); - - R = m_resize(R,ip->k+1,ip->k); - Q = m_resize(Q,ip->k,ip->b->dim); - MEM_STAT_REG(R,TYPE_MAT); - MEM_STAT_REG(Q,TYPE_MAT); - - if (ip->x == VNULL) { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - } - - v.dim = v.max_dim = ip->b->dim; /* v and v1 are pointers to rows */ - v1.dim = v1.max_dim = ip->b->dim; /* of matrix Q */ - - if (ip->Bx != (Fun_Ax)NULL) { /* if precondition is defined */ - z = v_resize(z,ip->b->dim); - MEM_STAT_REG(z,TYPE_VEC); - } - - done = FALSE; - for (ip->steps = 0; ip->steps < ip->limit; ) { - - /* restart */ - - ip->Ax(ip->A_par,ip->x,u); /* u = A*x */ - v_sub(ip->b,u,u); /* u = b - A*x */ - rr = u; /* rr is a pointer only */ - - if (ip->Bx) { - (ip->Bx)(ip->B_par,u,z); /* tmp = B*(b-A*x) */ - rr = z; - } - - nres = v_norm2(rr); - if (ip->steps == 0) { - if (ip->info) ip->info(ip,nres,VNULL,VNULL); - ip->init_res = nres; - } - - if ( nres == 0.0 ) { - done = TRUE; - break; - } - - v.ve = Q->me[0]; - sv_mlt(1.0/nres,rr,&v); - - v_zero(r); - v_zero(rhs); - rhs->ve[0] = nres; - - for ( i = 0; i < ip->k && ip->steps < ip->limit; i++ ) { - ip->steps++; - v.ve = Q->me[i]; - (ip->Ax)(ip->A_par,&v,u); - rr = u; - if (ip->Bx) { - (ip->Bx)(ip->B_par,u,z); - rr = z; - } - - if (i < ip->k - 1) { - v1.ve = Q->me[i+1]; - v_copy(rr,&v1); - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - /* r->ve[j] = in_prod(&v,rr); */ - /* modified Gram-Schmidt algorithm */ - r->ve[j] = in_prod(&v,&v1); - v_mltadd(&v1,&v,-r->ve[j],&v1); - } - - r->ve[i+1] = nres = v_norm2(&v1); - if (nres <= MACHEPS*ip->init_res) { - for (j = 0; j < i; j++) - rot_vec(r,j,j+1,givc->ve[j],givs->ve[j],r); - set_col(R,i,r); - done = TRUE; - break; - } - sv_mlt(1.0/nres,&v1,&v1); - } - else { /* i == ip->k - 1 */ - /* Q->me[ip->k] need not be computed */ - - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - r->ve[j] = in_prod(&v,rr); - } - - nres = in_prod(rr,rr) - in_prod(r,r); - if (sqrt(fabs(nres)) <= MACHEPS*ip->init_res) { - for (j = 0; j < i; j++) - rot_vec(r,j,j+1,givc->ve[j],givs->ve[j],r); - set_col(R,i,r); - done = TRUE; - break; - } - if (nres < 0.0) { /* do restart */ - i--; - ip->steps--; - break; - } - r->ve[i+1] = sqrt(nres); - } - - /* QR update */ - - /* last_h = r->ve[i+1]; */ /* for test only */ - for (j = 0; j < i; j++) - rot_vec(r,j,j+1,givc->ve[j],givs->ve[j],r); - givens(r->ve[i],r->ve[i+1],&givc->ve[i],&givs->ve[i]); - rot_vec(r,i,i+1,givc->ve[i],givs->ve[i],r); - rot_vec(rhs,i,i+1,givc->ve[i],givs->ve[i],rhs); - - set_col(R,i,r); - - nres = fabs((double) rhs->ve[i+1]); - if (ip->info) ip->info(ip,nres,VNULL,VNULL); - if ( ip->stop_crit(ip,nres,VNULL,VNULL) ) { - done = TRUE; - break; - } - } - - /* use ixi submatrix of R */ - - if (i >= ip->k) i = ip->k - 1; - - R = m_resize(R,i+1,i+1); - rhs = v_resize(rhs,i+1); - - /* test only */ - /* test_gmres(ip,i,Q,R,givc,givs,last_h); */ - - Usolve(R,rhs,rhs,0.0); /* solve a system: R*x = rhs */ - - /* new approximation */ - - for (j = 0; j <= i; j++) { - v.ve = Q->me[j]; - v_mltadd(ip->x,&v,rhs->ve[j],ip->x); - } - - if (done) break; - - /* back to old dimensions */ - - rhs = v_resize(rhs,ip->k+1); - R = m_resize(R,ip->k+1,ip->k); - - } - - return ip->x; -} - -/* iter_spgmres - a simple interface to iter_gmres */ - -VEC *iter_spgmres(A,B,b,tol,x,k,limit,steps) -SPMAT *A, *B; -VEC *b, *x; -double tol; -int *steps,k,limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - if (B) { - ip->Bx = (Fun_Ax) sp_mv_mlt; - ip->B_par = (void *) B; - } - else { - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - } - ip->k = k; - ip->limit = limit; - ip->info = (Fun_info) NULL; - ip->b = b; - ip->eps = tol; - ip->x = x; - iter_gmres(ip); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; -} - - -/* for testing mgcr */ -static void test_mgcr(ip,i,Q,R) -ITER *ip; -int i; -MAT *Q, *R; -{ - VEC vt, vt1; - static MAT *R1; - static VEC *r, *r1; - VEC *rr; - int k,j; - Real sm; - - - /* check Q*Q^T = I */ - vt.dim = vt.max_dim = ip->b->dim; - vt1.dim = vt1.max_dim = ip->b->dim; - - Q = m_resize(Q,i+1,ip->b->dim); - R1 = m_resize(R1,i+1,i+1); - r = v_resize(r,ip->b->dim); - r1 = v_resize(r1,ip->b->dim); - MEM_STAT_REG(R1,TYPE_MAT); - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(r1,TYPE_VEC); - - m_zero(R1); - for (k=1; k <= i; k++) - for (j=1; j <= i; j++) { - vt.ve = Q->me[k]; - vt1.ve = Q->me[j]; - R1->me[k][j] = in_prod(&vt,&vt1); - } - for (j=1; j <= i; j++) - R1->me[j][j] -= 1.0; - if (m_norm_inf(R1) > MACHEPS*ip->b->dim) - printf(" ! (mgcr:) m_norm_inf(Q*Q^T) = %g\n",m_norm_inf(R1)); - - /* check (r_i,Ap_j) = 0 for j <= i */ - - ip->Ax(ip->A_par,ip->x,r); - v_sub(ip->b,r,r); - rr = r; - if (ip->Bx) { - ip->Bx(ip->B_par,r,r1); - rr = r1; - } - - printf(" ||r|| = %g\n",v_norm2(rr)); - sm = 0.0; - for (j = 1; j <= i; j++) { - vt.ve = Q->me[j]; - sm = max(sm,in_prod(&vt,rr)); - } - if (sm >= MACHEPS*ip->b->dim) - printf(" ! (mgcr:) max_j (r,Ap_j) = %g\n",sm); - -} - - - - -/* - iter_mgcr -- modified generalized conjugate residual algorithm; - fast version of GCR; -*/ -VEC *iter_mgcr(ip) -ITER *ip; -{ - static VEC *As, *beta, *alpha, *z; - static MAT *N, *H; - - VEC *rr, v, s; /* additional pointer and structures */ - Real nres; /* norm of a residual */ - Real dd; /* coefficient d_i */ - int i,j; - int done; /* if TRUE then stop the iterative process */ - int dim; /* dimension of the problem */ - - /* ip cannot be NULL */ - if (ip == INULL) error(E_NULL,"mgcr"); - /* Ax, b and stopping criterion must be given */ - if (! ip->Ax || ! ip->b || ! ip->stop_crit) - error(E_NULL,"mgcr"); - /* at least one direction vector must exist */ - if ( ip->k <= 0) error(E_BOUNDS,"mgcr"); - /* if the vector x is given then b and x must have the same dimension */ - if ( ip->x && ip->x->dim != ip->b->dim) - error(E_SIZES,"mgcr"); - if (ip->eps <= 0.0) ip->eps = MACHEPS; - - dim = ip->b->dim; - As = v_resize(As,dim); - alpha = v_resize(alpha,ip->k); - beta = v_resize(beta,ip->k); - - MEM_STAT_REG(As,TYPE_VEC); - MEM_STAT_REG(alpha,TYPE_VEC); - MEM_STAT_REG(beta,TYPE_VEC); - - H = m_resize(H,ip->k,ip->k); - N = m_resize(N,ip->k,dim); - - MEM_STAT_REG(H,TYPE_MAT); - MEM_STAT_REG(N,TYPE_MAT); - - /* if a preconditioner is defined */ - if (ip->Bx) { - z = v_resize(z,dim); - MEM_STAT_REG(z,TYPE_VEC); - } - - /* if x is NULL then it is assumed that x has - entries with value zero */ - if ( ! ip->x ) { - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - } - - /* v and s are additional pointers to rows of N */ - /* they must have the same dimension as rows of N */ - v.dim = v.max_dim = s.dim = s.max_dim = dim; - - - done = FALSE; - for (ip->steps = 0; ip->steps < ip->limit; ) { - (*ip->Ax)(ip->A_par,ip->x,As); /* As = A*x */ - v_sub(ip->b,As,As); /* As = b - A*x */ - rr = As; /* rr is an additional pointer */ - - /* if a preconditioner is defined */ - if (ip->Bx) { - (*ip->Bx)(ip->B_par,As,z); /* z = B*(b-A*x) */ - rr = z; - } - - /* norm of the residual */ - nres = v_norm2(rr); - dd = nres; /* dd = ||r_i|| */ - - /* check if the norm of the residual is zero */ - if (ip->steps == 0) { - /* information for a user */ - if (ip->info) (*ip->info)(ip,nres,As,rr); - ip->init_res = fabs(nres); - } - - if (nres == 0.0) { - /* iterative process is finished */ - done = TRUE; - break; - } - - /* save this residual in the first row of N */ - v.ve = N->me[0]; - v_copy(rr,&v); - - for (i = 0; i < ip->k && ip->steps < ip->limit; i++) { - ip->steps++; - v.ve = N->me[i]; /* pointer to a row of N (=s_i) */ - /* note that we must use here &v, not v */ - (*ip->Ax)(ip->A_par,&v,As); - rr = As; /* As = A*s_i */ - if (ip->Bx) { - (*ip->Bx)(ip->B_par,As,z); /* z = B*A*s_i */ - rr = z; - } - - if (i < ip->k - 1) { - s.ve = N->me[i+1]; /* pointer to a row of N (=s_{i+1}) */ - v_copy(rr,&s); /* s_{i+1} = B*A*s_i */ - for (j = 0; j <= i-1; j++) { - v.ve = N->me[j+1]; /* pointer to a row of N (=s_{j+1}) */ - /* beta->ve[j] = in_prod(&v,rr); */ /* beta_{j,i} */ - /* modified Gram-Schmidt algorithm */ - beta->ve[j] = in_prod(&v,&s); /* beta_{j,i} */ - /* s_{i+1} -= beta_{j,i}*s_{j+1} */ - v_mltadd(&s,&v,- beta->ve[j],&s); - } - - /* beta_{i,i} = ||s_{i+1}||_2 */ - beta->ve[i] = nres = v_norm2(&s); - if ( nres <= MACHEPS*ip->init_res) { - /* s_{i+1} == 0 */ - i--; - done = TRUE; - break; - } - sv_mlt(1.0/nres,&s,&s); /* normalize s_{i+1} */ - - v.ve = N->me[0]; - alpha->ve[i] = in_prod(&v,&s); /* alpha_i = (s_0 , s_{i+1}) */ - - } - else { - for (j = 0; j <= i-1; j++) { - v.ve = N->me[j+1]; /* pointer to a row of N (=s_{j+1}) */ - beta->ve[j] = in_prod(&v,rr); /* beta_{j,i} */ - } - - nres = in_prod(rr,rr); /* rr = B*A*s_{k-1} */ - for (j = 0; j <= i-1; j++) - nres -= beta->ve[j]*beta->ve[j]; - - if (sqrt(fabs(nres)) <= MACHEPS*ip->init_res) { - /* s_k is zero */ - i--; - done = TRUE; - break; - } - if (nres < 0.0) { /* do restart */ - i--; - ip->steps--; - break; - } - beta->ve[i] = sqrt(nres); /* beta_{k-1,k-1} */ - - v.ve = N->me[0]; - alpha->ve[i] = in_prod(&v,rr); - for (j = 0; j <= i-1; j++) - alpha->ve[i] -= beta->ve[j]*alpha->ve[j]; - alpha->ve[i] /= beta->ve[i]; /* alpha_{k-1} */ - - } - - set_col(H,i,beta); - - /* other method of computing dd */ - /* if (fabs((double)alpha->ve[i]) > dd) { - nres = - dd*dd + alpha->ve[i]*alpha->ve[i]; - nres = sqrt((double) nres); - if (ip->info) (*ip->info)(ip,-nres,VNULL,VNULL); - break; - } */ - /* to avoid overflow/underflow in computing dd */ - /* dd *= cos(asin((double)(alpha->ve[i]/dd))); */ - - nres = alpha->ve[i]/dd; - if (fabs(nres-1.0) <= MACHEPS*ip->init_res) - dd = 0.0; - else { - nres = 1.0 - nres*nres; - if (nres < 0.0) { - nres = sqrt((double) -nres); - if (ip->info) (*ip->info)(ip,-dd*nres,VNULL,VNULL); - break; - } - dd *= sqrt((double) nres); - } - - if (ip->info) (*ip->info)(ip,dd,VNULL,VNULL); - if ( ip->stop_crit(ip,dd,VNULL,VNULL) ) { - /* stopping criterion is satisfied */ - done = TRUE; - break; - } - - } /* end of for */ - - if (i >= ip->k) i = ip->k - 1; - - /* use (i+1) by (i+1) submatrix of H */ - H = m_resize(H,i+1,i+1); - alpha = v_resize(alpha,i+1); - Usolve(H,alpha,alpha,0.0); /* c_i is saved in alpha */ - - for (j = 0; j <= i; j++) { - v.ve = N->me[j]; - v_mltadd(ip->x,&v,alpha->ve[j],ip->x); - } - - - if (done) break; /* stop the iterative process */ - alpha = v_resize(alpha,ip->k); - H = m_resize(H,ip->k,ip->k); - - } /* end of while */ - - return ip->x; /* return the solution */ -} - - - -/* iter_spmgcr - a simple interface to iter_mgcr */ -/* no preconditioner */ -VEC *iter_spmgcr(A,B,b,tol,x,k,limit,steps) -SPMAT *A, *B; -VEC *b, *x; -double tol; -int *steps,k,limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - if (B) { - ip->Bx = (Fun_Ax) sp_mv_mlt; - ip->B_par = (void *) B; - } - else { - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - } - - ip->k = k; - ip->limit = limit; - ip->info = (Fun_info) NULL; - ip->b = b; - ip->eps = tol; - ip->x = x; - iter_mgcr(ip); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; -} - - - -/* - Conjugate gradients method for a normal equation - a preconditioner B must be symmetric !! -*/ -VEC *iter_cgne(ip) -ITER *ip; -{ - static VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL; - Real alpha, beta, inner, old_inner, nres; - VEC *rr1; /* pointer only */ - - if (ip == INULL) - error(E_NULL,"iter_cgne"); - if (!ip->Ax || ! ip->ATx || !ip->b) - error(E_NULL,"iter_cgne"); - if ( ip->x == ip->b ) - error(E_INSITU,"iter_cgne"); - if (!ip->stop_crit) - error(E_NULL,"iter_cgne"); - - if ( ip->eps <= 0.0 ) ip->eps = MACHEPS; - - r = v_resize(r,ip->b->dim); - p = v_resize(p,ip->b->dim); - q = v_resize(q,ip->b->dim); - - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(p,TYPE_VEC); - MEM_STAT_REG(q,TYPE_VEC); - - z = v_resize(z,ip->b->dim); - MEM_STAT_REG(z,TYPE_VEC); - - if (ip->x) { - if (ip->x->dim != ip->b->dim) - error(E_SIZES,"iter_cgne"); - ip->Ax(ip->A_par,ip->x,p); /* p = A*x */ - v_sub(ip->b,p,z); /* z = b - A*x */ - } - else { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - v_copy(ip->b,z); - } - rr1 = z; - if (ip->Bx) { - (ip->Bx)(ip->B_par,rr1,p); - rr1 = p; - } - (ip->ATx)(ip->AT_par,rr1,r); /* r = A^T*B*(b-A*x) */ - - - old_inner = 0.0; - for ( ip->steps = 0; ip->steps <= ip->limit; ip->steps++ ) - { - rr1 = r; - if ( ip->Bx ) { - (ip->Bx)(ip->B_par,r,z); /* rr = B*r */ - rr1 = z; - } - - inner = in_prod(r,rr1); - nres = sqrt(fabs(inner)); - if (ip->info) ip->info(ip,nres,r,rr1); - if (ip->steps == 0) ip->init_res = nres; - if ( ip->stop_crit(ip,nres,r,rr1) ) break; - - if ( ip->steps ) /* if ( ip->steps > 0 ) ... */ - { - beta = inner/old_inner; - p = v_mltadd(rr1,p,beta,p); - } - else /* if ( ip->steps == 0 ) ... */ - { - beta = 0.0; - p = v_copy(rr1,p); - old_inner = 0.0; - } - (ip->Ax)(ip->A_par,p,q); /* q = A*p */ - if (ip->Bx) { - (ip->Bx)(ip->B_par,q,z); - (ip->ATx)(ip->AT_par,z,q); - rr1 = q; /* q = A^T*B*A*p */ - } - else { - (ip->ATx)(ip->AT_par,q,z); /* z = A^T*A*p */ - rr1 = z; - } - - alpha = inner/in_prod(rr1,p); - v_mltadd(ip->x,p,alpha,ip->x); - v_mltadd(r,rr1,-alpha,r); - old_inner = inner; - } - - return ip->x; -} - -/* iter_spcgne -- a simple interface to iter_cgne() which - uses sparse matrix data structures - -- assumes that B contains an actual preconditioner (or NULL) - use always as follows: - x = iter_spcgne(A,B,b,eps,x,limit,steps); - or - x = iter_spcgne(A,B,b,eps,VNULL,limit,steps); - In the second case the solution vector is created. -*/ -VEC *iter_spcgne(A,B,b,eps,x,limit,steps) -SPMAT *A, *B; -VEC *b, *x; -double eps; -int *steps, limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *)A; - ip->ATx = (Fun_Ax) sp_vm_mlt; - ip->AT_par = (void *)A; - if (B) { - ip->Bx = (Fun_Ax) sp_mv_mlt; - ip->B_par = (void *)B; - } - else { - ip->Bx = (Fun_Ax) NULL; - ip->B_par = NULL; - } - ip->info = (Fun_info) NULL; - ip->b = b; - ip->eps = eps; - ip->limit = limit; - ip->x = x; - iter_cgne(ip); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; -} - - - diff --git a/src/mesch/itersym.c b/src/mesch/itersym.c deleted file mode 100755 index ca8c05cbc4..0000000000 --- a/src/mesch/itersym.c +++ /dev/null @@ -1,591 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* itersym.c 17/09/93 */ - - -/* - ITERATIVE METHODS - implementation of several iterative methods; - see also iter0.c - */ - -#include -#include "matrix.h" -#include "matrix2.h" -#include "sparse.h" -#include "iter.h" -#include - -static char rcsid[] = "itersym.c,v 1.1 1997/12/04 17:55:29 hines Exp"; - - -#ifdef ANSI_C -VEC *spCHsolve(SPMAT *,VEC *,VEC *); -VEC *trieig(VEC *,VEC *,MAT *); -#else -VEC *spCHsolve(); -VEC *trieig(); -#endif - - - -/* iter_spcg -- a simple interface to iter_cg() which uses sparse matrix - data structures - -- assumes that LLT contains the Cholesky factorisation of the - actual preconditioner; - use always as follows: - x = iter_spcg(A,LLT,b,eps,x,limit,steps); - or - x = iter_spcg(A,LLT,b,eps,VNULL,limit,steps); - In the second case the solution vector is created. - */ -VEC *iter_spcg(A,LLT,b,eps,x,limit,steps) -SPMAT *A, *LLT; -VEC *b, *x; -double eps; -int *steps, limit; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *)A; - ip->Bx = (Fun_Ax) spCHsolve; - ip->B_par = (void *)LLT; - ip->info = (Fun_info) NULL; - ip->b = b; - ip->eps = eps; - ip->limit = limit; - ip->x = x; - iter_cg(ip); - x = ip->x; - if (steps) *steps = ip->steps; - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return x; -} - -/* - Conjugate gradients method; - */ -VEC *iter_cg(ip) -ITER *ip; -{ - static VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL; - Real alpha, beta, inner, old_inner, nres; - VEC *rr; /* rr == r or rr == z */ - - if (ip == INULL) - error(E_NULL,"iter_cg"); - if (!ip->Ax || !ip->b) - error(E_NULL,"iter_cg"); - if ( ip->x == ip->b ) - error(E_INSITU,"iter_cg"); - if (!ip->stop_crit) - error(E_NULL,"iter_cg"); - - if ( ip->eps <= 0.0 ) - ip->eps = MACHEPS; - - r = v_resize(r,ip->b->dim); - p = v_resize(p,ip->b->dim); - q = v_resize(q,ip->b->dim); - - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(p,TYPE_VEC); - MEM_STAT_REG(q,TYPE_VEC); - - if (ip->Bx != (Fun_Ax)NULL) { - z = v_resize(z,ip->b->dim); - MEM_STAT_REG(z,TYPE_VEC); - rr = z; - } - else rr = r; - - if (ip->x != VNULL) { - if (ip->x->dim != ip->b->dim) - error(E_SIZES,"iter_cg"); - ip->Ax(ip->A_par,ip->x,p); /* p = A*x */ - v_sub(ip->b,p,r); /* r = b - A*x */ - } - else { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - v_copy(ip->b,r); - } - - old_inner = 0.0; - for ( ip->steps = 0; ip->steps <= ip->limit; ip->steps++ ) - { - if ( ip->Bx ) - (ip->Bx)(ip->B_par,r,rr); /* rr = B*r */ - - inner = in_prod(rr,r); - nres = sqrt(fabs(inner)); - if (ip->info) ip->info(ip,nres,r,rr); - if (ip->steps == 0) ip->init_res = nres; - if ( ip->stop_crit(ip,nres,r,rr) ) break; - - if ( ip->steps ) /* if ( ip->steps > 0 ) ... */ - { - beta = inner/old_inner; - p = v_mltadd(rr,p,beta,p); - } - else /* if ( ip->steps == 0 ) ... */ - { - beta = 0.0; - p = v_copy(rr,p); - old_inner = 0.0; - } - (ip->Ax)(ip->A_par,p,q); /* q = A*p */ - alpha = in_prod(p,q); - if (sqrt(fabs(alpha)) <= MACHEPS*ip->init_res) - error(E_BREAKDOWN,"iter_cg"); - alpha = inner/alpha; - v_mltadd(ip->x,p,alpha,ip->x); - v_mltadd(r,q,-alpha,r); - old_inner = inner; - } - - return ip->x; -} - - - -/* iter_lanczos -- raw lanczos algorithm -- no re-orthogonalisation - -- creates T matrix of size == m, - but no larger than before beta_k == 0 - -- uses passed routine to do matrix-vector multiplies */ -void iter_lanczos(ip,a,b,beta2,Q) -ITER *ip; -VEC *a, *b; -Real *beta2; -MAT *Q; -{ - int j; - static VEC *v = VNULL, *w = VNULL, *tmp = VNULL; - Real alpha, beta, c; - - if ( ! ip ) - error(E_NULL,"iter_lanczos"); - if ( ! ip->Ax || ! ip->x || ! a || ! b ) - error(E_NULL,"iter_lanczos"); - if ( ip->k <= 0 ) - error(E_BOUNDS,"iter_lanczos"); - if ( Q && ( Q->n < ip->x->dim || Q->m < ip->k ) ) - error(E_SIZES,"iter_lanczos"); - - a = v_resize(a,(u_int)ip->k); - b = v_resize(b,(u_int)(ip->k-1)); - v = v_resize(v,ip->x->dim); - w = v_resize(w,ip->x->dim); - tmp = v_resize(tmp,ip->x->dim); - MEM_STAT_REG(v,TYPE_VEC); - MEM_STAT_REG(w,TYPE_VEC); - MEM_STAT_REG(tmp,TYPE_VEC); - - beta = 1.0; - v_zero(a); - v_zero(b); - if (Q) m_zero(Q); - - /* normalise x as w */ - c = v_norm2(ip->x); - if (c <= MACHEPS) { /* ip->x == 0 */ - *beta2 = 0.0; - return; - } - else - sv_mlt(1.0/c,ip->x,w); - - (ip->Ax)(ip->A_par,w,v); - - for ( j = 0; j < ip->k; j++ ) - { - /* store w in Q if Q not NULL */ - if ( Q ) set_row(Q,j,w); - - alpha = in_prod(w,v); - a->ve[j] = alpha; - v_mltadd(v,w,-alpha,v); - beta = v_norm2(v); - if ( beta == 0.0 ) - { - *beta2 = 0.0; - return; - } - - if ( j < ip->k-1 ) - b->ve[j] = beta; - v_copy(w,tmp); - sv_mlt(1/beta,v,w); - sv_mlt(-beta,tmp,v); - (ip->Ax)(ip->A_par,w,tmp); - v_add(v,tmp,v); - } - *beta2 = beta; - -} - -/* iter_splanczos -- version that uses sparse matrix data structure */ -void iter_splanczos(A,m,x0,a,b,beta2,Q) -SPMAT *A; -int m; -VEC *x0, *a, *b; -Real *beta2; -MAT *Q; -{ - ITER *ip; - - ip = iter_get(0,0); - ip->shared_x = ip->shared_b = TRUE; - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - ip->x = x0; - ip->k = m; - iter_lanczos(ip,a,b,beta2,Q); - iter_free(ip); /* release only ITER structure */ -} - - -#ifndef MAC -extern double frexp(), ldexp(); -#endif - -/* product -- returns the product of a long list of numbers - -- answer stored in mant (mantissa) and expt (exponent) */ -static double product(a,offset,expt) -VEC *a; -double offset; -int *expt; -{ - Real mant, tmp_fctr; - int i, tmp_expt; - - if ( ! a ) - error(E_NULL,"product"); - - mant = 1.0; - *expt = 0; - if ( offset == 0.0 ) - for ( i = 0; i < a->dim; i++ ) - { - mant *= frexp(a->ve[i],&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - else - for ( i = 0; i < a->dim; i++ ) - { - tmp_fctr = a->ve[i] - offset; - tmp_fctr += (tmp_fctr > 0.0 ) ? -MACHEPS*offset : - MACHEPS*offset; - mant *= frexp(tmp_fctr,&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - - return mant; -} - -/* product2 -- returns the product of a long list of numbers - -- answer stored in mant (mantissa) and expt (exponent) */ -static double product2(a,k,expt) -VEC *a; -int k; /* entry of a to leave out */ -int *expt; -{ - Real mant, mu, tmp_fctr; - int i, tmp_expt; - - if ( ! a ) - error(E_NULL,"product2"); - if ( k < 0 || k >= a->dim ) - error(E_BOUNDS,"product2"); - - mant = 1.0; - *expt = 0; - mu = a->ve[k]; - for ( i = 0; i < a->dim; i++ ) - { - if ( i == k ) - continue; - tmp_fctr = a->ve[i] - mu; - tmp_fctr += ( tmp_fctr > 0.0 ) ? -MACHEPS*mu : MACHEPS*mu; - mant *= frexp(tmp_fctr,&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - - return mant; -} - -/* dbl_cmp -- comparison function to pass to qsort() */ -static int dbl_cmp(x,y) -Real *x, *y; -{ - Real tmp; - - tmp = *x - *y; - return (tmp > 0 ? 1 : tmp < 0 ? -1: 0); -} - -/* iter_lanczos2 -- lanczos + error estimate for every e-val - -- uses Cullum & Willoughby approach, Sparse Matrix Proc. 1978 - -- returns multiple e-vals where multiple e-vals may not exist - -- returns evals vector */ -VEC *iter_lanczos2(ip,evals,err_est) -ITER *ip; /* ITER structure */ -VEC *evals; /* eigenvalue vector */ -VEC *err_est; /* error estimates of eigenvalues */ -{ - VEC *a; - static VEC *b=VNULL, *a2=VNULL, *b2=VNULL; - Real beta, pb_mant, det_mant, det_mant1, det_mant2; - int i, pb_expt, det_expt, det_expt1, det_expt2; - - if ( ! ip ) - error(E_NULL,"iter_lanczos2"); - if ( ! ip->Ax || ! ip->x ) - error(E_NULL,"iter_lanczos2"); - if ( ip->k <= 0 ) - error(E_RANGE,"iter_lanczos2"); - - a = evals; - a = v_resize(a,(u_int)ip->k); - b = v_resize(b,(u_int)(ip->k-1)); - MEM_STAT_REG(b,TYPE_VEC); - - iter_lanczos(ip,a,b,&beta,MNULL); - - /* printf("# beta =%g\n",beta); */ - pb_mant = 0.0; - if ( err_est ) - { - pb_mant = product(b,(double)0.0,&pb_expt); - /* printf("# pb_mant = %g, pb_expt = %d\n",pb_mant, pb_expt); */ - } - - /* printf("# diags =\n"); v_output(a); */ - /* printf("# off diags =\n"); v_output(b); */ - a2 = v_resize(a2,a->dim - 1); - b2 = v_resize(b2,b->dim - 1); - MEM_STAT_REG(a2,TYPE_VEC); - MEM_STAT_REG(b2,TYPE_VEC); - for ( i = 0; i < a2->dim - 1; i++ ) - { - a2->ve[i] = a->ve[i+1]; - b2->ve[i] = b->ve[i+1]; - } - a2->ve[a2->dim-1] = a->ve[a2->dim]; - - trieig(a,b,MNULL); - - /* sort evals as a courtesy */ - qsort((void *)(a->ve),(int)(a->dim),sizeof(Real),(int (*)())dbl_cmp); - - /* error estimates */ - if ( err_est ) - { - err_est = v_resize(err_est,(u_int)ip->k); - - trieig(a2,b2,MNULL); - /* printf("# a =\n"); v_output(a); */ - /* printf("# a2 =\n"); v_output(a2); */ - - for ( i = 0; i < a->dim; i++ ) - { - det_mant1 = product2(a,i,&det_expt1); - det_mant2 = product(a2,(double)a->ve[i],&det_expt2); - /* printf("# det_mant1=%g, det_expt1=%d\n", - det_mant1,det_expt1); */ - /* printf("# det_mant2=%g, det_expt2=%d\n", - det_mant2,det_expt2); */ - if ( det_mant1 == 0.0 ) - { /* multiple e-val of T */ - err_est->ve[i] = 0.0; - continue; - } - else if ( det_mant2 == 0.0 ) - { - err_est->ve[i] = HUGE; - continue; - } - if ( (det_expt1 + det_expt2) % 2 ) - /* if odd... */ - det_mant = sqrt(2.0*fabs(det_mant1*det_mant2)); - else /* if even... */ - det_mant = sqrt(fabs(det_mant1*det_mant2)); - det_expt = (det_expt1+det_expt2)/2; - err_est->ve[i] = fabs(beta* - ldexp(pb_mant/det_mant,pb_expt-det_expt)); - } - } - - return a; -} - -/* iter_splanczos2 -- version of iter_lanczos2() that uses sparse matrix data - structure */ - -VEC *iter_splanczos2(A,m,x0,evals,err_est) -SPMAT *A; -int m; -VEC *x0; /* initial vector */ -VEC *evals; /* eigenvalue vector */ -VEC *err_est; /* error estimates of eigenvalues */ -{ - ITER *ip; - VEC *a; - - ip = iter_get(0,0); - ip->Ax = (Fun_Ax) sp_mv_mlt; - ip->A_par = (void *) A; - ip->x = x0; - ip->k = m; - a = iter_lanczos2(ip,evals,err_est); - ip->shared_x = ip->shared_b = TRUE; - iter_free(ip); /* release only ITER structure */ - return a; -} - - - - -/* - Conjugate gradient method - Another variant - mainly for testing - */ - -VEC *iter_cg1(ip) -ITER *ip; -{ - static VEC *r = VNULL, *p = VNULL, *q = VNULL, *z = VNULL; - Real alpha; - double inner,nres; - VEC *rr; /* rr == r or rr == z */ - - if (ip == INULL) - error(E_NULL,"iter_cg"); - if (!ip->Ax || !ip->b) - error(E_NULL,"iter_cg"); - if ( ip->x == ip->b ) - error(E_INSITU,"iter_cg"); - if (!ip->stop_crit) - error(E_NULL,"iter_cg"); - - if ( ip->eps <= 0.0 ) - ip->eps = MACHEPS; - - r = v_resize(r,ip->b->dim); - p = v_resize(p,ip->b->dim); - q = v_resize(q,ip->b->dim); - - MEM_STAT_REG(r,TYPE_VEC); - MEM_STAT_REG(p,TYPE_VEC); - MEM_STAT_REG(q,TYPE_VEC); - - if (ip->Bx != (Fun_Ax)NULL) { - z = v_resize(z,ip->b->dim); - MEM_STAT_REG(z,TYPE_VEC); - rr = z; - } - else rr = r; - - if (ip->x != VNULL) { - if (ip->x->dim != ip->b->dim) - error(E_SIZES,"iter_cg"); - ip->Ax(ip->A_par,ip->x,p); /* p = A*x */ - v_sub(ip->b,p,r); /* r = b - A*x */ - } - else { /* ip->x == 0 */ - ip->x = v_get(ip->b->dim); - ip->shared_x = FALSE; - v_copy(ip->b,r); - } - - if (ip->Bx) (ip->Bx)(ip->B_par,r,p); - else v_copy(r,p); - - inner = in_prod(p,r); - nres = sqrt(fabs(inner)); - if (ip->info) ip->info(ip,nres,r,p); - if ( nres == 0.0) return ip->x; - - for ( ip->steps = 0; ip->steps <= ip->limit; ip->steps++ ) - { - ip->Ax(ip->A_par,p,q); - inner = in_prod(q,p); - if (sqrt(fabs(inner)) <= MACHEPS*ip->init_res) - error(E_BREAKDOWN,"iter_cg1"); - - alpha = in_prod(p,r)/inner; - v_mltadd(ip->x,p,alpha,ip->x); - v_mltadd(r,q,-alpha,r); - - rr = r; - if (ip->Bx) { - ip->Bx(ip->B_par,r,z); - rr = z; - } - - nres = in_prod(r,rr); - if (nres < 0.0) { - warning(WARN_RES_LESS_0,"iter_cg"); - break; - } - nres = sqrt(fabs(nres)); - if (ip->info) ip->info(ip,nres,r,z); - if (ip->steps == 0) ip->init_res = nres; - if ( ip->stop_crit(ip,nres,r,z) ) break; - - alpha = -in_prod(rr,q)/inner; - v_mltadd(rr,p,alpha,p); - - } - - return ip->x; -} - - diff --git a/src/mesch/ivecop.c b/src/mesch/ivecop.c deleted file mode 100755 index 36d6b4ee1d..0000000000 --- a/src/mesch/ivecop.c +++ /dev/null @@ -1,436 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* ivecop.c */ - -#include -#include "matrix.h" - -static char rcsid[] = "ivecop.c,v 1.1 1997/12/04 17:55:30 hines Exp"; - -static char line[MAXLINE]; - - - -/* iv_get -- get integer vector -- see also memory.c */ -IVEC *iv_get(dim) -int dim; -{ - IVEC *iv; - /* u_int i; */ - - if (dim < 0) - error(E_NEG,"iv_get"); - - if ((iv=NEW(IVEC)) == IVNULL ) - error(E_MEM,"iv_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_IVEC,0,sizeof(IVEC)); - mem_numvar(TYPE_IVEC,1); - } - - iv->dim = iv->max_dim = dim; - if ((iv->ive = NEW_A(dim,int)) == (int *)NULL ) - error(E_MEM,"iv_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_IVEC,0,dim*sizeof(int)); - } - - return (iv); -} - -/* iv_free -- returns iv & asoociated memory back to memory heap */ -int iv_free(iv) -IVEC *iv; -{ - if ( iv==IVNULL || iv->dim > MAXDIM ) - /* don't trust it */ - return (-1); - - if ( iv->ive == (int *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_IVEC,sizeof(IVEC),0); - mem_numvar(TYPE_IVEC,-1); - } - free((char *)iv); - } - else - { - if (mem_info_is_on()) { - mem_bytes(TYPE_IVEC,sizeof(IVEC)+iv->max_dim*sizeof(int),0); - mem_numvar(TYPE_IVEC,-1); - } - free((char *)iv->ive); - free((char *)iv); - } - - return (0); -} - -/* iv_resize -- returns the IVEC with dimension new_dim - -- iv is set to the zero vector */ -IVEC *iv_resize(iv,new_dim) -IVEC *iv; -int new_dim; -{ - int i; - - if (new_dim < 0) - error(E_NEG,"iv_resize"); - - if ( ! iv ) - return iv_get(new_dim); - - if (new_dim == iv->dim) - return iv; - - if ( new_dim > iv->max_dim ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_IVEC,iv->max_dim*sizeof(int), - new_dim*sizeof(int)); - } - iv->ive = RENEW(iv->ive,new_dim,int); - if ( ! iv->ive ) - error(E_MEM,"iv_resize"); - iv->max_dim = new_dim; - } - if ( iv->dim <= new_dim ) - for ( i = iv->dim; i < new_dim; i++ ) - iv->ive[i] = 0; - iv->dim = new_dim; - - return iv; -} - -/* iv_copy -- copy integer vector in to out - -- out created/resized if necessary */ -IVEC *iv_copy(in,out) -IVEC *in, *out; -{ - int i; - - if ( ! in ) - error(E_NULL,"iv_copy"); - out = iv_resize(out,in->dim); - for ( i = 0; i < in->dim; i++ ) - out->ive[i] = in->ive[i]; - - return out; -} - -/* iv_move -- move selected pieces of an IVEC - -- moves the length dim0 subvector with initial index i0 - to the corresponding subvector of out with initial index i1 - -- out is resized if necessary */ -IVEC *iv_move(in,i0,dim0,out,i1) -IVEC *in, *out; -int i0, dim0, i1; -{ - if ( ! in ) - error(E_NULL,"iv_move"); - if ( i0 < 0 || dim0 < 0 || i1 < 0 || - i0+dim0 > in->dim ) - error(E_BOUNDS,"iv_move"); - - if ( (! out) || i1+dim0 > out->dim ) - out = iv_resize(out,i1+dim0); - - MEM_COPY(&(in->ive[i0]),&(out->ive[i1]),dim0*sizeof(int)); - - return out; -} - -/* iv_add -- integer vector addition -- may be in-situ */ -IVEC *iv_add(iv1,iv2,out) -IVEC *iv1,*iv2,*out; -{ - u_int i; - int *out_ive, *iv1_ive, *iv2_ive; - - if ( iv1==IVNULL || iv2==IVNULL ) - error(E_NULL,"iv_add"); - if ( iv1->dim != iv2->dim ) - error(E_SIZES,"iv_add"); - if ( out==IVNULL || out->dim != iv1->dim ) - out = iv_resize(out,iv1->dim); - - out_ive = out->ive; - iv1_ive = iv1->ive; - iv2_ive = iv2->ive; - - for ( i = 0; i < iv1->dim; i++ ) - out_ive[i] = iv1_ive[i] + iv2_ive[i]; - - return (out); -} - - - -/* iv_sub -- integer vector addition -- may be in-situ */ -IVEC *iv_sub(iv1,iv2,out) -IVEC *iv1,*iv2,*out; -{ - u_int i; - int *out_ive, *iv1_ive, *iv2_ive; - - if ( iv1==IVNULL || iv2==IVNULL ) - error(E_NULL,"iv_sub"); - if ( iv1->dim != iv2->dim ) - error(E_SIZES,"iv_sub"); - if ( out==IVNULL || out->dim != iv1->dim ) - out = iv_resize(out,iv1->dim); - - out_ive = out->ive; - iv1_ive = iv1->ive; - iv2_ive = iv2->ive; - - for ( i = 0; i < iv1->dim; i++ ) - out_ive[i] = iv1_ive[i] - iv2_ive[i]; - - return (out); -} - -/* iv_foutput -- print a representation of iv on stream fp */ -void iv_foutput(fp,iv) -FILE *fp; -IVEC *iv; -{ - int i; - - fprintf(fp,"IntVector: "); - if ( iv == IVNULL ) - { - fprintf(fp,"**** NULL ****\n"); - return; - } - fprintf(fp,"dim: %d\n",iv->dim); - for ( i = 0; i < iv->dim; i++ ) - { - if ( (i+1) % 8 ) - fprintf(fp,"%8d ",iv->ive[i]); - else - fprintf(fp,"%8d\n",iv->ive[i]); - } - if ( i % 8 ) - fprintf(fp,"\n"); -} - - -/* iv_finput -- input integer vector from stream fp */ -IVEC *iv_finput(fp,x) -FILE *fp; -IVEC *x; -{ - IVEC *iiv_finput(),*biv_finput(); - - if ( isatty(fileno(fp)) ) - return iiv_finput(fp,x); - else - return biv_finput(fp,x); -} - -/* iiv_finput -- interactive input of IVEC iv */ -IVEC *iiv_finput(fp,iv) -FILE *fp; -IVEC *iv; -{ - u_int i,dim,dynamic; /* dynamic set if memory allocated here */ - - /* get dimension */ - if ( iv != (IVEC *)NULL && iv->dimdim; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"IntVector: dim: "); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"iiv_finput"); - } while ( sscanf(line,"%u",&dim)<1 || dim>MAXDIM ); - iv = iv_get(dim); - } - - /* input elements */ - for ( i=0; iive[i]); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"iiv_finput"); - if ( (*line == 'b' || *line == 'B') && i > 0 ) - { i--; dynamic = FALSE; goto redo; } - if ( (*line == 'f' || *line == 'F') && i < dim-1 ) - { i++; dynamic = FALSE; goto redo; } - } while ( *line=='\0' || sscanf(line,"%d",&iv->ive[i]) < 1 ); - - return (iv); -} - -/* biv_finput -- batch-file input of IVEC iv */ -IVEC *biv_finput(fp,iv) -FILE *fp; -IVEC *iv; -{ - u_int i,dim; - int io_code; - - /* get dimension */ - skipjunk(fp); - if ((io_code=fscanf(fp," IntVector: dim:%u",&dim)) < 1 || - dim>MAXDIM ) - error(io_code==EOF ? 7 : 6,"biv_finput"); - - /* allocate memory if necessary */ - if ( iv==(IVEC *)NULL || iv->dimive[i])) < 1 ) - error(io_code==EOF ? 7 : 6,"biv_finput"); - - return (iv); -} - -/* iv_dump -- dumps all the contents of IVEC iv onto stream fp */ -void iv_dump(fp,iv) -FILE*fp; -IVEC*iv; -{ - int i; - - fprintf(fp,"IntVector: "); - if ( ! iv ) - { - fprintf(fp,"**** NULL ****\n"); - return; - } - fprintf(fp,"dim: %d, max_dim: %d\n",iv->dim,iv->max_dim); - fprintf(fp,"ive @ 0x%p\n", iv->ive); - for ( i = 0; i < iv->max_dim; i++ ) - { - if ( (i+1) % 8 ) - fprintf(fp,"%8d ",iv->ive[i]); - else - fprintf(fp,"%8d\n",iv->ive[i]); - } - if ( i % 8 ) - fprintf(fp,"\n"); -} - -#define MAX_STACK 60 - - -/* iv_sort -- sorts vector x, and generates permutation that gives the order - of the components; x = [1.3, 3.7, 0.5] -> [0.5, 1.3, 3.7] and - the permutation is order = [2, 0, 1]. - -- if order is NULL on entry then it is ignored - -- the sorted vector x is returned */ -IVEC *iv_sort(x, order) -IVEC *x; -PERM *order; -{ - int *x_ive, tmp, v; - /* int *order_pe; */ - int dim, i, j, l, r, tmp_i; - int stack[MAX_STACK], sp; - - if ( ! x ) - error(E_NULL,"v_sort"); - if ( order != PNULL && order->size != x->dim ) - order = px_resize(order, x->dim); - - x_ive = x->ive; - dim = x->dim; - if ( order != PNULL ) - px_ident(order); - - if ( dim <= 1 ) - return x; - - /* using quicksort algorithm in Sedgewick, - "Algorithms in C", Ch. 9, pp. 118--122 (1990) */ - sp = 0; - l = 0; r = dim-1; v = x_ive[0]; - for ( ; ; ) - { - while ( r > l ) - { - /* "i = partition(x_ive,l,r);" */ - v = x_ive[r]; - i = l-1; - j = r; - for ( ; ; ) - { - while ( x_ive[++i] < v ) - ; - --j; - while ( x_ive[j] > v && j != 0 ) - --j; - if ( i >= j ) break; - - tmp = x_ive[i]; - x_ive[i] = x_ive[j]; - x_ive[j] = tmp; - if ( order != PNULL ) - { - tmp_i = order->pe[i]; - order->pe[i] = order->pe[j]; - order->pe[j] = tmp_i; - } - } - tmp = x_ive[i]; - x_ive[i] = x_ive[r]; - x_ive[r] = tmp; - if ( order != PNULL ) - { - tmp_i = order->pe[i]; - order->pe[i] = order->pe[r]; - order->pe[r] = tmp_i; - } - - if ( i-l > r-i ) - { stack[sp++] = l; stack[sp++] = i-1; l = i+1; } - else - { stack[sp++] = i+1; stack[sp++] = r; r = i-1; } - } - - /* recursion elimination */ - if ( sp == 0 ) - break; - r = stack[--sp]; - l = stack[--sp]; - } - - return x; -} diff --git a/src/mesch/lanczos.c b/src/mesch/lanczos.c deleted file mode 100755 index f6b77bf1da..0000000000 --- a/src/mesch/lanczos.c +++ /dev/null @@ -1,324 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File containing Lanczos type routines for finding eigenvalues - of large, sparse, symmetic matrices -*/ - -#include -#include -#include "matrix.h" -#include "sparse.h" - -static char rcsid[] = "lanczos.c,v 1.1 1997/12/04 17:55:31 hines Exp"; - -#ifdef ANSI_C -extern VEC *trieig(VEC *,VEC *,MAT *); -#else -extern VEC *trieig(); -#endif - -/* lanczos -- raw lanczos algorithm -- no re-orthogonalisation - -- creates T matrix of size == m, - but no larger than before beta_k == 0 - -- uses passed routine to do matrix-vector multiplies */ -void lanczos(A_fn,A_params,m,x0,a,b,beta2,Q) -VEC *(*A_fn)(); /* VEC *(*A_fn)(void *A_params,VEC *in, VEC *out) */ -void *A_params; -int m; -VEC *x0, *a, *b; -Real *beta2; -MAT *Q; -{ - int j; - VEC *v, *w, *tmp; - Real alpha, beta; - - if ( ! A_fn || ! x0 || ! a || ! b ) - error(E_NULL,"lanczos"); - if ( m <= 0 ) - error(E_BOUNDS,"lanczos"); - if ( Q && ( Q->m < x0->dim || Q->n < m ) ) - error(E_SIZES,"lanczos"); - - a = v_resize(a,(u_int)m); b = v_resize(b,(u_int)(m-1)); - v = v_get(x0->dim); - w = v_get(x0->dim); - tmp = v_get(x0->dim); - - beta = 1.0; - /* normalise x0 as w */ - sv_mlt(1.0/v_norm2(x0),x0,w); - - (*A_fn)(A_params,w,v); - - for ( j = 0; j < m; j++ ) - { - /* store w in Q if Q not NULL */ - if ( Q ) - set_col(Q,j,w); - - alpha = in_prod(w,v); - a->ve[j] = alpha; - v_mltadd(v,w,-alpha,v); - beta = v_norm2(v); - if ( beta == 0.0 ) - { - v_resize(a,(u_int)j+1); - v_resize(b,(u_int)j); - *beta2 = 0.0; - if ( Q ) - Q = m_resize(Q,Q->m,j+1); - return; - } - if ( j < m-1 ) - b->ve[j] = beta; - v_copy(w,tmp); - sv_mlt(1/beta,v,w); - sv_mlt(-beta,tmp,v); - (*A_fn)(A_params,w,tmp); - v_add(v,tmp,v); - } - *beta2 = beta; - - - V_FREE(v); V_FREE(w); V_FREE(tmp); -} -#ifndef MAC -extern double frexp(), ldexp(); -#endif -/* product -- returns the product of a long list of numbers - -- answer stored in mant (mantissa) and expt (exponent) */ -static double product(a,offset,expt) -VEC *a; -double offset; -int *expt; -{ - Real mant, tmp_fctr; - int i, tmp_expt; - - if ( ! a ) - error(E_NULL,"product"); - - mant = 1.0; - *expt = 0; - if ( offset == 0.0 ) - for ( i = 0; i < a->dim; i++ ) - { - mant *= frexp(a->ve[i],&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - else - for ( i = 0; i < a->dim; i++ ) - { - tmp_fctr = a->ve[i] - offset; - tmp_fctr += (tmp_fctr > 0.0 ) ? -MACHEPS*offset : - MACHEPS*offset; - mant *= frexp(tmp_fctr,&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - - return mant; -} - -/* product2 -- returns the product of a long list of numbers - -- answer stored in mant (mantissa) and expt (exponent) */ -static double product2(a,k,expt) -VEC *a; -int k; /* entry of a to leave out */ -int *expt; -{ - Real mant, mu, tmp_fctr; - int i, tmp_expt; - - if ( ! a ) - error(E_NULL,"product2"); - if ( k < 0 || k >= a->dim ) - error(E_BOUNDS,"product2"); - - mant = 1.0; - *expt = 0; - mu = a->ve[k]; - for ( i = 0; i < a->dim; i++ ) - { - if ( i == k ) - continue; - tmp_fctr = a->ve[i] - mu; - tmp_fctr += ( tmp_fctr > 0.0 ) ? -MACHEPS*mu : MACHEPS*mu; - mant *= frexp(tmp_fctr,&tmp_expt); - *expt += tmp_expt; - if ( ! (i % 10) ) - { - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - } - } - mant = frexp(mant,&tmp_expt); - *expt += tmp_expt; - - return mant; -} - -/* dbl_cmp -- comparison function to pass to qsort() */ -static int dbl_cmp(x,y) -Real *x, *y; -{ - Real tmp; - - tmp = *x - *y; - return (tmp > 0 ? 1 : tmp < 0 ? -1: 0); -} - -/* lanczos2 -- lanczos + error estimate for every e-val - -- uses Cullum & Willoughby approach, Sparse Matrix Proc. 1978 - -- returns multiple e-vals where multiple e-vals may not exist - -- returns evals vector */ -VEC *lanczos2(A_fn,A_params,m,x0,evals,err_est) -VEC *(*A_fn)(); -void *A_params; -int m; -VEC *x0; /* initial vector */ -VEC *evals; /* eigenvalue vector */ -VEC *err_est; /* error estimates of eigenvalues */ -{ - VEC *a; - static VEC *b=VNULL, *a2=VNULL, *b2=VNULL; - Real beta, pb_mant, det_mant, det_mant1, det_mant2; - int i, pb_expt, det_expt, det_expt1, det_expt2; - - if ( ! A_fn || ! x0 ) - error(E_NULL,"lanczos2"); - if ( m <= 0 ) - error(E_RANGE,"lanczos2"); - - a = evals; - a = v_resize(a,(u_int)m); - b = v_resize(b,(u_int)(m-1)); - MEM_STAT_REG(b,TYPE_VEC); - - lanczos(A_fn,A_params,m,x0,a,b,&beta,MNULL); - - /* printf("# beta =%g\n",beta); */ - pb_mant = 0.0; - if ( err_est ) - { - pb_mant = product(b,(double)0.0,&pb_expt); - /* printf("# pb_mant = %g, pb_expt = %d\n",pb_mant, pb_expt); */ - } - - /* printf("# diags =\n"); out_vec(a); */ - /* printf("# off diags =\n"); out_vec(b); */ - a2 = v_resize(a2,a->dim - 1); - b2 = v_resize(b2,b->dim - 1); - MEM_STAT_REG(a2,TYPE_VEC); - MEM_STAT_REG(b2,TYPE_VEC); - for ( i = 0; i < a2->dim - 1; i++ ) - { - a2->ve[i] = a->ve[i+1]; - b2->ve[i] = b->ve[i+1]; - } - a2->ve[a2->dim-1] = a->ve[a2->dim]; - - trieig(a,b,MNULL); - - /* sort evals as a courtesy */ - qsort((void *)(a->ve),(int)(a->dim),sizeof(Real),(int (*)())dbl_cmp); - - /* error estimates */ - if ( err_est ) - { - err_est = v_resize(err_est,(u_int)m); - - trieig(a2,b2,MNULL); - /* printf("# a =\n"); out_vec(a); */ - /* printf("# a2 =\n"); out_vec(a2); */ - - for ( i = 0; i < a->dim; i++ ) - { - det_mant1 = product2(a,i,&det_expt1); - det_mant2 = product(a2,(double)a->ve[i],&det_expt2); - /* printf("# det_mant1=%g, det_expt1=%d\n", - det_mant1,det_expt1); */ - /* printf("# det_mant2=%g, det_expt2=%d\n", - det_mant2,det_expt2); */ - if ( det_mant1 == 0.0 ) - { /* multiple e-val of T */ - err_est->ve[i] = 0.0; - continue; - } - else if ( det_mant2 == 0.0 ) - { - err_est->ve[i] = HUGE; - continue; - } - if ( (det_expt1 + det_expt2) % 2 ) - /* if odd... */ - det_mant = sqrt(2.0*fabs(det_mant1*det_mant2)); - else /* if even... */ - det_mant = sqrt(fabs(det_mant1*det_mant2)); - det_expt = (det_expt1+det_expt2)/2; - err_est->ve[i] = fabs(beta* - ldexp(pb_mant/det_mant,pb_expt-det_expt)); - } - } - - return a; -} - -/* sp_lanczos -- version that uses sparse matrix data structure */ -void sp_lanczos(A,m,x0,a,b,beta2,Q) -SPMAT *A; -int m; -VEC *x0, *a, *b; -Real *beta2; -MAT *Q; -{ lanczos(sp_mv_mlt,A,m,x0,a,b,beta2,Q); } - -/* sp_lanczos2 -- version of lanczos2() that uses sparse matrix data - structure */ -VEC *sp_lanczos2(A,m,x0,evals,err_est) -SPMAT *A; -int m; -VEC *x0; /* initial vector */ -VEC *evals; /* eigenvalue vector */ -VEC *err_est; /* error estimates of eigenvalues */ -{ return lanczos2(sp_mv_mlt,A,m,x0,evals,err_est); } - diff --git a/src/mesch/lufactor.c b/src/mesch/lufactor.c deleted file mode 100755 index 2fe31f24e3..0000000000 --- a/src/mesch/lufactor.c +++ /dev/null @@ -1,282 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. -*/ - -/* LUfactor.c 1.5 11/25/87 */ -static char rcsid[] = "lufactor.c,v 1.1 1997/12/04 17:55:32 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* LUfactor -- gaussian elimination with scaled partial pivoting - -- Note: returns LU matrix which is A */ -MAT *LUfactor(A,pivot) -MAT *A; -PERM *pivot; -{ - u_int i, j, k, k_max, m, n; - int i_max; - Real **A_v, *A_piv, *A_row; - Real max1, temp, tiny; - static VEC *scale = VNULL; - - if ( A==(MAT *)NULL || pivot==(PERM *)NULL ) - error(E_NULL,"LUfactor"); - if ( pivot->size != A->m ) - error(E_SIZES,"LUfactor"); - m = A->m; n = A->n; - scale = v_resize(scale,A->m); - MEM_STAT_REG(scale,TYPE_VEC); - A_v = A->me; - - tiny = 10.0/HUGE_VAL; - - /* initialise pivot with identity permutation */ - for ( i=0; ipe[i] = i; - - /* set scale parameters */ - for ( i=0; ive[i] = max1; - } - - /* main loop */ - k_max = min(m,n)-1; - for ( k=0; kve[i]) >= tiny*fabs(A_v[i][k]) ) - { - temp = fabs(A_v[i][k])/scale->ve[i]; - if ( temp > max1 ) - { max1 = temp; i_max = i; } - } - - /* if no pivot then ignore column k... */ - if ( i_max == -1 ) - { - /* set pivot entry A[k][k] exactly to zero, - rather than just "small" */ - A_v[k][k] = 0.0; - continue; - } - - /* do we pivot ? */ - if ( i_max != k ) /* yes we do... */ - { - px_transp(pivot,i_max,k); - for ( j=0; jm != A->n || A->n != b->dim ) - error(E_SIZES,"LUsolve"); - - x = v_resize(x,b->dim); - px_vec(pivot,b,x); /* x := P.b */ - Lsolve(A,x,x,1.0); /* implicit diagonal = 1 */ - Usolve(A,x,x,0.0); /* explicit diagonal */ - - return (x); -} - -/* LUTsolve -- given an LU factorisation in A, solve A^T.x=b */ -VEC *LUTsolve(LU,pivot,b,x) -MAT *LU; -PERM *pivot; -VEC *b,*x; -{ - if ( ! LU || ! b || ! pivot ) - error(E_NULL,"LUTsolve"); - if ( LU->m != LU->n || LU->n != b->dim ) - error(E_SIZES,"LUTsolve"); - - x = v_copy(b,x); - UTsolve(LU,x,x,0.0); /* explicit diagonal */ - LTsolve(LU,x,x,1.0); /* implicit diagonal = 1 */ - pxinv_vec(pivot,x,x); /* x := P^T.tmp */ - - return (x); -} - -/* m_inverse -- returns inverse of A, provided A is not too rank deficient - -- uses LU factorisation */ -MAT *m_inverse(A,out) -MAT *A, *out; -{ - int i; - static VEC *tmp = VNULL, *tmp2 = VNULL; - static MAT *A_cp = MNULL; - static PERM *pivot = PNULL; - - if ( ! A ) - error(E_NULL,"m_inverse"); - if ( A->m != A->n ) - error(E_SQUARE,"m_inverse"); - if ( ! out || out->m < A->m || out->n < A->n ) - out = m_resize(out,A->m,A->n); - - A_cp = m_resize(A_cp,A->m,A->n); - A_cp = m_copy(A,A_cp); - tmp = v_resize(tmp,A->m); - tmp2 = v_resize(tmp2,A->m); - pivot = px_resize(pivot,A->m); - MEM_STAT_REG(A_cp,TYPE_MAT); - MEM_STAT_REG(tmp, TYPE_VEC); - MEM_STAT_REG(tmp2,TYPE_VEC); - MEM_STAT_REG(pivot,TYPE_PERM); - tracecatch(LUfactor(A_cp,pivot),"m_inverse"); - for ( i = 0; i < A->n; i++ ) - { - v_zero(tmp); - tmp->ve[i] = 1.0; - tracecatch(LUsolve(A_cp,pivot,tmp,tmp2),"m_inverse"); - set_col(out,i,tmp2); - } - - return out; -} - -/* LUcondest -- returns an estimate of the condition number of LU given the - LU factorisation in compact form */ -double LUcondest(LU,pivot) -MAT *LU; -PERM *pivot; -{ - static VEC *y = VNULL, *z = VNULL; - Real cond_est=0.0, L_norm, U_norm, sum, tiny; - int i, j, n; - - if ( ! LU || ! pivot ) - error(E_NULL,"LUcondest"); - if ( LU->m != LU->n ) - error(E_SQUARE,"LUcondest"); - if ( LU->n != pivot->size ) - error(E_SIZES,"LUcondest"); - - tiny = 10.0/HUGE_VAL; - - n = LU->n; - y = v_resize(y,n); - z = v_resize(z,n); - MEM_STAT_REG(y,TYPE_VEC); - MEM_STAT_REG(z,TYPE_VEC); - - for ( i = 0; i < n; i++ ) - { - sum = 0.0; - for ( j = 0; j < i; j++ ) - sum -= LU->me[j][i]*y->ve[j]; - sum -= (sum < 0.0) ? 1.0 : -1.0; - if ( fabs(LU->me[i][i]) <= tiny*fabs(sum) ) - return HUGE_VAL; - y->ve[i] = sum / LU->me[i][i]; - } - - catch(E_SING, - LTsolve(LU,y,y,1.0); - LUsolve(LU,pivot,y,z); - , - return HUGE_VAL); - - /* now estimate norm of A (even though it is not directly available) */ - /* actually computes ||L||_inf.||U||_inf */ - U_norm = 0.0; - for ( i = 0; i < n; i++ ) - { - sum = 0.0; - for ( j = i; j < n; j++ ) - sum += fabs(LU->me[i][j]); - if ( sum > U_norm ) - U_norm = sum; - } - L_norm = 0.0; - for ( i = 0; i < n; i++ ) - { - sum = 1.0; - for ( j = 0; j < i; j++ ) - sum += fabs(LU->me[i][j]); - if ( sum > L_norm ) - L_norm = sum; - } - - tracecatch(cond_est = U_norm*L_norm*v_norm_inf(z)/v_norm_inf(y), - "LUcondest"); - - return cond_est; -} diff --git a/src/mesch/machine.c b/src/mesch/machine.c deleted file mode 100755 index ab7166ac8b..0000000000 --- a/src/mesch/machine.c +++ /dev/null @@ -1,147 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - This file contains basic routines which are used by the functions - in meschach.a etc. - These are the routines that should be modified in order to take - full advantage of specialised architectures (pipelining, vector - processors etc). - */ - -static char *rcsid = "machine.c,v 1.1 1997/12/04 17:55:33 hines Exp"; - -#include "machine.h" - -/* __ip__ -- inner product */ -double __ip__(dp1,dp2,len) -register Real *dp1, *dp2; -int len; -{ -#ifdef VUNROLL - register int len4; - register Real sum1, sum2, sum3; -#endif - register int i; - register Real sum; - - sum = 0.0; -#ifdef VUNROLL - sum1 = sum2 = sum3 = 0.0; - - len4 = len / 4; - len = len % 4; - - for ( i = 0; i < len4; i++ ) - { - sum += dp1[4*i]*dp2[4*i]; - sum1 += dp1[4*i+1]*dp2[4*i+1]; - sum2 += dp1[4*i+2]*dp2[4*i+2]; - sum3 += dp1[4*i+3]*dp2[4*i+3]; - } - sum += sum1 + sum2 + sum3; - dp1 += 4*len4; dp2 += 4*len4; -#endif - - for ( i = 0; i < len; i++ ) - sum += dp1[i]*dp2[i]; - - return sum; -} - -/* __mltadd__ -- scalar multiply and add c.f. v_mltadd() */ -void __mltadd__(dp1,dp2,s,len) -register Real *dp1, *dp2; -register double s; -register int len; -{ - register int i; -#ifdef VUNROLL - register int len4; - - len4 = len / 4; - len = len % 4; - for ( i = 0; i < len4; i++ ) - { - dp1[4*i] += s*dp2[4*i]; - dp1[4*i+1] += s*dp2[4*i+1]; - dp1[4*i+2] += s*dp2[4*i+2]; - dp1[4*i+3] += s*dp2[4*i+3]; - } - dp1 += 4*len4; dp2 += 4*len4; -#endif - - for ( i = 0; i < len; i++ ) - dp1[i] += s*dp2[i]; -} - -/* __smlt__ scalar multiply array c.f. sv_mlt() */ -void __smlt__(dp,s,out,len) -register Real *dp, *out; -register double s; -register int len; -{ - register int i; - for ( i = 0; i < len; i++ ) - out[i] = s*dp[i]; -} - -/* __add__ -- add arrays c.f. v_add() */ -void __add__(dp1,dp2,out,len) -register Real *dp1, *dp2, *out; -register int len; -{ - register int i; - for ( i = 0; i < len; i++ ) - out[i] = dp1[i] + dp2[i]; -} - -/* __sub__ -- subtract arrays c.f. v_sub() */ -void __sub__(dp1,dp2,out,len) -register Real *dp1, *dp2, *out; -register int len; -{ - register int i; - for ( i = 0; i < len; i++ ) - out[i] = dp1[i] - dp2[i]; -} - -/* __zero__ -- zeros an array of floating point numbers */ -void __zero__(dp,len) -register Real *dp; -register int len; -{ -#ifdef CHAR0ISDBL0 - /* if a floating point zero is equivalent to a string of nulls */ - MEM_ZERO((char *)dp,len*sizeof(Real)); -#else - /* else, need to zero the array entry by entry */ - int i; - for ( i = 0; i < len; i++ ) - dp[i] = 0.0; -#endif -} - diff --git a/src/mesch/machine.h b/src/mesch/machine.h deleted file mode 100755 index c8eb495fb4..0000000000 --- a/src/mesch/machine.h +++ /dev/null @@ -1,256 +0,0 @@ -/* - * This file has been hacked from the machine.h files supplied from the - * original meschach distribution. It's now a generic file that works on - * all machines. - * - * This file used to define a bunch of HAVE_xyz macros. This is all handled - * now in config.h. - */ -/* machine.h. Generated automatically by configure. */ -/* Any machine specific stuff goes here */ -/* Add details necessary for your own installation here! */ - -/* RCS id: machine.h,v 1.3 1998/08/31 19:47:38 hines Exp */ - -/* This is for use with "configure" -- if you are not using configure - then use machine.van for the "vanilla" version of machine.h */ - -/* Note special macros: ANSI_C (ANSI C syntax) - SEGMENTED (segmented memory machine e.g. MS-DOS) - MALLOCDECL (declared if malloc() etc have - been declared) */ - -#ifndef _MACHINE_H -#define _MACHINE_H 1 - -#include - -#include <../../nrnconf.h> - -#if defined(HAVE_STDINT_H) -#include -#endif - -#if !defined(HUGE) && defined(HUGE_VAL) -#define HUGE HUGE_VAL -#endif - -typedef uint32_t u_int; - -/* #undef const */ - -/* #undef MALLOCDECL */ -#define NOT_SEGMENTED 1 -#define CHAR0ISDBL0 1 -#undef HAVE_PROTOTYPES -#define HAVE_PROTOTYPES 1 -/* #undef HAVE_PROTOTYPES_IN_STRUCT */ - -/* for inclusion into C++ files */ -#ifdef __cplusplus -#define ANSI_C 1 -#ifndef HAVE_PROTOTYPES -#define HAVE_PROTOTYPES 1 -#endif -#ifndef HAVE_PROTOTYPES_IN_STRUCT -#define HAVE_PROTOTYPES_IN_STRUCT 1 -#endif -#endif /* __cplusplus */ - -/* example usage: VEC *PROTO(v_get,(int dim)); */ -#ifdef HAVE_PROTOTYPES -#define PROTO(name,args) name args -#else -#define PROTO(name,args) name() -#endif /* HAVE_PROTOTYPES */ -#ifdef HAVE_PROTOTYPES_IN_STRUCT -/* PROTO_() is to be used instead of PROTO() in struct's and typedef's */ -#define PROTO_(name,args) name args -#else -#define PROTO_(name,args) name() -#endif /* HAVE_PROTOTYPES_IN_STRUCT */ - -/* for basic or larger versions */ -#define COMPLEX 1 -#define SPARSE 1 - -/* for loop unrolling */ -/* #undef VUNROLL */ -/* #undef MUNROLL */ - -/* for segmented memory */ -#ifndef NOT_SEGMENTED -#define SEGMENTED -#endif - -/* An AIX machine had incompatible prototypes between -malloc.h and stdlib.h so prefer stdlib.h if it exists -*/ -#ifdef HAVE_STDLIB_H -#include -#else -/* if the system has malloc.h */ -#ifdef HAVE_MALLOC_H -#define MALLOCDECL 1 -#include -#endif -#endif - -/* any compiler should have this header */ -/* if not, change it */ -#include - - -/* Check for ANSI C memmove and memset */ -#if defined(STDC_HEADERS) || defined(WIN32) -/* standard copy & zero functions */ -#define MEM_COPY(from,to,size) memmove((to),(from),(size)) -#define MEM_ZERO(where,size) memset((where),'\0',(size)) - -#ifndef ANSI_C -#define ANSI_C 1 -#endif - -#endif - -/* standard headers */ -#ifdef ANSI_C -#include -#include -#include -#include -#endif - - -/* if have bcopy & bzero and no alternatives yet known, use them */ -#ifdef HAVE_BCOPY -#ifndef MEM_COPY -/* nonstandard copy function */ -#define MEM_COPY(from,to,size) bcopy((char *)(from),(char *)(to),(int)(size)) -#endif -#endif - -#ifdef HAVE_BZERO -#ifndef MEM_ZERO -/* nonstandard zero function */ -#define MEM_ZERO(where,size) bzero((char *)(where),(int)(size)) -#endif -#endif - -/* if the system has complex.h */ -#if 0 -#ifdef HAVE_COMPLEX_H -#include -#endif -/* - I've commented this out because it causes problems when run through a - C++ compiler. complex.h is part of the C++ standard library but does - something completely different. -*/ -#endif - -/* If prototypes are available & ANSI_C not yet defined, then define it, - but don't include any header files as the proper ANSI C headers - aren't here */ -#ifdef HAVE_PROTOTYPES -#ifndef ANSI_C -#define ANSI_C 1 -#endif -#endif - -/* floating point precision */ - -/* you can choose single, double or long double (if available) precision */ - -#define FLOAT 1 -#define DOUBLE 2 -#define LONG_DOUBLE 3 - -/* #undef REAL_FLT */ -/* #undef REAL_DBL */ - -/* if nothing is defined, choose double precision */ -#ifndef REAL_DBL -#ifndef REAL_FLT -#define REAL_DBL 1 -#endif -#endif - -/* single precision */ -#ifdef REAL_FLT -#define Real float -#define LongReal float -#define REAL FLOAT -#define LONGREAL FLOAT -#endif - -/* double precision */ -#ifdef REAL_DBL -#define Real double -#define LongReal double -#define REAL DOUBLE -#define LONGREAL DOUBLE -#endif - - -/* machine epsilon or unit roundoff error */ -/* This is correct on most IEEE Real precision systems */ -#ifdef DBL_EPSILON -#if REAL == DOUBLE -#define MACHEPS DBL_EPSILON -#elif REAL == FLOAT -#define MACHEPS FLT_EPSILON -#elif REAL == LONGDOUBLE -#define MACHEPS LDBL_EPSILON -#endif -#endif - -#define F_MACHEPS 1.19209e-07 -#define D_MACHEPS 2.22045e-16 - -#ifndef MACHEPS -#if REAL == DOUBLE -#define MACHEPS D_MACHEPS -#elif REAL == FLOAT -#define MACHEPS F_MACHEPS -#elif REAL == LONGDOUBLE -#define MACHEPS D_MACHEPS -#endif -#endif - -/* #undef M_MACHEPS */ - -/******************** -#ifdef DBL_EPSILON -#define MACHEPS DBL_EPSILON -#endif -#ifdef M_MACHEPS -#ifndef MACHEPS -#define MACHEPS M_MACHEPS -#endif -#endif -********************/ - -#define M_MAX_INT 2147483647 -#ifdef M_MAX_INT -#ifndef MAX_RAND -#define MAX_RAND ((double)(M_MAX_INT)) -/* This isn't true on a lot of older unix systems. */ -#endif -#endif - -/* for non-ANSI systems */ -#ifndef HUGE_VAL -#define HUGE_VAL HUGE -#else -#ifndef HUGE -#define HUGE HUGE_VAL -#endif -#endif - - -#if HAVE_UNISTD_H -#include -#endif - -#endif diff --git a/src/mesch/matlab.c b/src/mesch/matlab.c deleted file mode 100755 index 0f5ca9c224..0000000000 --- a/src/mesch/matlab.c +++ /dev/null @@ -1,212 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file contains routines for import/exporting data to/from - MATLAB. The main routines are: - MAT *m_save(FILE *fp,MAT *A,char *name) - VEC *v_save(FILE *fp,VEC *x,char *name) - MAT *m_load(FILE *fp,char **name) -*/ - -#include -#include "matrix.h" -#include "matlab.h" - -static char rcsid[] = "matlab.c,v 1.1 1997/12/04 17:55:34 hines Exp"; - -/* m_save -- save matrix in ".mat" file for MATLAB - -- returns matrix to be saved */ -MAT *m_save(fp,A,name) -FILE *fp; -MAT *A; -char *name; -{ - int i; - matlab mat; - - if ( ! A ) - error(E_NULL,"m_save"); - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = A->m; - mat.n = A->n; - mat.imag = FALSE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ -#if ORDER == ROW_ORDER - for ( i = 0; i < A->m; i++ ) - fwrite(A->me[i],sizeof(Real),(int)(A->n),fp); -#else /* column major order: ORDER == COL_ORDER */ - for ( j = 0; j < A->n; j++ ) - for ( i = 0; i < A->m; i++ ) - fwrite(&(A->me[i][j]),sizeof(Real),1,fp); -#endif - - return A; -} - - -/* v_save -- save vector in ".mat" file for MATLAB - -- saves it as a row vector - -- returns vector to be saved */ -VEC *v_save(fp,x,name) -FILE *fp; -VEC *x; -char *name; -{ - matlab mat; - - if ( ! x ) - error(E_NULL,"v_save"); - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = x->dim; - mat.n = 1; - mat.imag = FALSE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ - fwrite(x->ve,sizeof(Real),(int)(x->dim),fp); - - return x; -} - -/* d_save -- save double in ".mat" file for MATLAB - -- saves it as a row vector - -- returns vector to be saved */ -double d_save(fp,x,name) -FILE *fp; -double x; -char *name; -{ - matlab mat; - Real x1 = x; - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = 1; - mat.n = 1; - mat.imag = FALSE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ - fwrite(&x1,sizeof(Real),1,fp); - - return x; -} - -/* m_load -- loads in a ".mat" file variable as produced by MATLAB - -- matrix returned; imaginary parts ignored */ -MAT *m_load(fp,name) -FILE *fp; -char **name; -{ - MAT *A; - int i; - int m_flag, o_flag, p_flag, t_flag; - float f_temp; - Real d_temp; - matlab mat; - - if ( fread(&mat,sizeof(matlab),1,fp) != 1 ) - error(E_FORMAT,"m_load"); - if ( mat.type >= 10000 ) /* don't load a sparse matrix! */ - error(E_FORMAT,"m_load"); - m_flag = (mat.type/1000) % 10; - o_flag = (mat.type/100) % 10; - p_flag = (mat.type/10) % 10; - t_flag = (mat.type) % 10; - if ( m_flag != MACH_ID ) - error(E_FORMAT,"m_load"); - if ( t_flag != 0 ) - error(E_FORMAT,"m_load"); - if ( p_flag != DOUBLE_PREC && p_flag != SINGLE_PREC ) - error(E_FORMAT,"m_load"); - *name = (char *)malloc((unsigned)(mat.namlen)+1); - if ( fread(*name,sizeof(char),(unsigned)(mat.namlen),fp) == 0 ) - error(E_FORMAT,"m_load"); - A = m_get((unsigned)(mat.m),(unsigned)(mat.n)); - for ( i = 0; i < A->m*A->n; i++ ) - { - if ( p_flag == DOUBLE_PREC ) { - if (fread(&d_temp,sizeof(double),1,fp) != 1) { - error(E_INPUT, "m_load"); - } - } else { - if (fread(&f_temp,sizeof(float),1,fp) != 1) { - error(E_INPUT, "m_load"); - } - d_temp = f_temp; - } - if ( o_flag == ROW_ORDER ) { - A->me[i / A->n][i % A->n] = d_temp; - } else if ( o_flag == COL_ORDER ) { - A->me[i % A->m][i / A->m] = d_temp; - } else { - error(E_FORMAT,"m_load"); - } - } - - if ( mat.imag ) /* skip imaginary part */ - for ( i = 0; i < A->m*A->n; i++ ) - { - if ( p_flag == DOUBLE_PREC ) { - if (fread(&d_temp,sizeof(double),1,fp) != 1) { - error(E_INPUT, "m_load"); - } - } else { - if (fread(&f_temp,sizeof(float),1,fp) != 1) { - error(E_INPUT, "m_load"); - } - } - } - - return A; -} - diff --git a/src/mesch/matlab.h b/src/mesch/matlab.h deleted file mode 100755 index f48396ff53..0000000000 --- a/src/mesch/matlab.h +++ /dev/null @@ -1,114 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* matlab.h -- Header file for matlab.c, spmatlab.c and zmatlab.c - for save/load formats */ - -#ifndef MATLAB_DEF - -#define MATLAB_DEF - -/* structure required by MATLAB */ -typedef struct { - long type; /* matrix type */ - long m; /* # rows */ - long n; /* # cols */ - long imag; /* is complex? */ - long namlen; /* length of variable name */ - } matlab; - -/* macros for matrix storage type */ -#define INTEL 0 /* for 80x87 format */ -#define PC INTEL -#define MOTOROLA 1 /* 6888x format */ -#define SUN MOTOROLA -#define APOLLO MOTOROLA -#undef MAC -#define MAC MOTOROLA -#define VAX_D 2 -#define VAX_G 3 - -#define COL_ORDER 0 -#define ROW_ORDER 1 - -#define DOUBLE_PREC 0 /* double precision */ -#define SINGLE_PREC 1 /* single precision */ -#define INT_32 2 /* 32 bit integers (signed) */ -#define INT_16 3 /* 16 bit integers (signed) */ -#define INT_16u 4 /* 16 bit integers (unsigned) */ -/* end of macros for matrix storage type */ - -#ifndef MACH_ID -#define MACH_ID MOTOROLA -#endif - -#define ORDER ROW_ORDER - -#if REAL == DOUBLE -#define PRECISION DOUBLE_PREC -#elif REAL == FLOAT -#define PRECISION SINGLE_PREC -#endif - - -/* prototypes */ - -#ifdef ANSI_C - -MAT *m_save(FILE *,MAT *,char *); -MAT *m_load(FILE *,char **); -VEC *v_save(FILE *,VEC *,char *); -double d_save(FILE *,double,char *); - -#else - -extern MAT *m_save(), *m_load(); -extern VEC *v_save(); -extern double d_save(); -#endif - -/* complex variant */ -#ifdef COMPLEX -#include "zmatrix.h" - -#ifdef ANSI_C -extern ZMAT *zm_save(FILE *fp,ZMAT *A,char *name); -extern ZVEC *zv_save(FILE *fp,ZVEC *x,char *name); -extern complex z_save(FILE *fp,complex z,char *name); -extern ZMAT *zm_load(FILE *fp,char **name); - -#else - -extern ZMAT *zm_save(); -extern ZVEC *zv_save(); -extern complex z_save(); -extern ZMAT *zm_load(); - -#endif - -#endif - -#endif diff --git a/src/mesch/matop.c b/src/mesch/matop.c deleted file mode 100755 index 381f6da067..0000000000 --- a/src/mesch/matop.c +++ /dev/null @@ -1,499 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* matop.c 1.3 11/25/87 */ - - -#include -#include "matrix.h" - -static char rcsid[] = "matop.c,v 1.1 1997/12/04 17:55:35 hines Exp"; - - -/* m_add -- matrix addition -- may be in-situ */ -MAT *m_add(mat1,mat2,out) -MAT *mat1,*mat2,*out; -{ - u_int m,n,i; - - if ( mat1==(MAT *)NULL || mat2==(MAT *)NULL ) - error(E_NULL,"m_add"); - if ( mat1->m != mat2->m || mat1->n != mat2->n ) - error(E_SIZES,"m_add"); - if ( out==(MAT *)NULL || out->m != mat1->m || out->n != mat1->n ) - out = m_resize(out,mat1->m,mat1->n); - m = mat1->m; n = mat1->n; - for ( i=0; ime[i],mat2->me[i],out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = mat1->me[i][j]+mat2->me[i][j]; - **************************************************/ - } - - return (out); -} - -/* m_sub -- matrix subtraction -- may be in-situ */ -MAT *m_sub(mat1,mat2,out) -MAT *mat1,*mat2,*out; -{ - u_int m,n,i; - - if ( mat1==(MAT *)NULL || mat2==(MAT *)NULL ) - error(E_NULL,"m_sub"); - if ( mat1->m != mat2->m || mat1->n != mat2->n ) - error(E_SIZES,"m_sub"); - if ( out==(MAT *)NULL || out->m != mat1->m || out->n != mat1->n ) - out = m_resize(out,mat1->m,mat1->n); - m = mat1->m; n = mat1->n; - for ( i=0; ime[i],mat2->me[i],out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = mat1->me[i][j]-mat2->me[i][j]; - **************************************************/ - } - - return (out); -} - -/* m_mlt -- matrix-matrix multiplication */ -MAT *m_mlt(A,B,OUT) -MAT *A,*B,*OUT; -{ - u_int i, /* j, */ k, m, n, p; - Real **A_v, **B_v /*, *B_row, *OUT_row, sum, tmp */; - - if ( A==(MAT *)NULL || B==(MAT *)NULL ) - error(E_NULL,"m_mlt"); - if ( A->n != B->m ) - error(E_SIZES,"m_mlt"); - if ( A == OUT || B == OUT ) - error(E_INSITU,"m_mlt"); - m = A->m; n = A->n; p = B->n; - A_v = A->me; B_v = B->me; - - if ( OUT==(MAT *)NULL || OUT->m != A->m || OUT->n != B->n ) - OUT = m_resize(OUT,A->m,B->n); - -/**************************************************************** - for ( i=0; ime[i][j] = sum; - } -****************************************************************/ - m_zero(OUT); - for ( i=0; ime[i],B_v[k],A_v[i][k],(int)p); - /************************************************** - B_row = B_v[k]; OUT_row = OUT->me[i]; - for ( j=0; jn != B->n ) - error(E_SIZES,"mmtr_mlt"); - if ( ! OUT || OUT->m != A->m || OUT->n != B->m ) - OUT = m_resize(OUT,A->m,B->m); - - limit = A->n; - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < B->m; j++ ) - { - OUT->me[i][j] = __ip__(A->me[i],B->me[j],(int)limit); - /************************************************** - sum = 0.0; - A_row = A->me[i]; - B_row = B->me[j]; - for ( k = 0; k < limit; k++ ) - sum += (*A_row++)*(*B_row++); - OUT->me[i][j] = sum; - **************************************************/ - } - - return OUT; -} - -/* mtrm_mlt -- matrix transposed-matrix multiplication - -- A^T.B is returned, result stored in OUT */ -MAT *mtrm_mlt(A,B,OUT) -MAT *A, *B, *OUT; -{ - int i, k, limit; - /* Real *B_row, *OUT_row, multiplier; */ - - if ( ! A || ! B ) - error(E_NULL,"mmtr_mlt"); - if ( A == OUT || B == OUT ) - error(E_INSITU,"mtrm_mlt"); - if ( A->m != B->m ) - error(E_SIZES,"mmtr_mlt"); - if ( ! OUT || OUT->m != A->n || OUT->n != B->n ) - OUT = m_resize(OUT,A->n,B->n); - - limit = B->n; - m_zero(OUT); - for ( k = 0; k < A->m; k++ ) - for ( i = 0; i < A->n; i++ ) - { - if ( A->me[k][i] != 0.0 ) - __mltadd__(OUT->me[i],B->me[k],A->me[k][i],(int)limit); - /************************************************** - multiplier = A->me[k][i]; - OUT_row = OUT->me[i]; - B_row = B->me[k]; - for ( j = 0; j < limit; j++ ) - *(OUT_row++) += multiplier*(*B_row++); - **************************************************/ - } - - return OUT; -} - -/* mv_mlt -- matrix-vector multiplication - -- Note: b is treated as a column vector */ -VEC *mv_mlt(A,b,out) -MAT *A; -VEC *b,*out; -{ - u_int i, m, n; - Real **A_v, *b_v /*, *A_row */; - /* register Real sum; */ - - if ( A==(MAT *)NULL || b==(VEC *)NULL ) - error(E_NULL,"mv_mlt"); - if ( A->n != b->dim ) - error(E_SIZES,"mv_mlt"); - if ( b == out ) - error(E_INSITU,"mv_mlt"); - if ( out == (VEC *)NULL || out->dim != A->m ) - out = v_resize(out,A->m); - - m = A->m; n = A->n; - A_v = A->me; b_v = b->ve; - for ( i=0; ive[i] = __ip__(A_v[i],b_v,(int)n); - /************************************************** - A_row = A_v[i]; b_v = b->ve; - for ( j=0; jve[i] = sum; - **************************************************/ - } - - return out; -} - -/* sm_mlt -- scalar-matrix multiply -- may be in-situ */ -MAT *sm_mlt(scalar,matrix,out) -double scalar; -MAT *matrix,*out; -{ - u_int m,n,i; - - if ( matrix==(MAT *)NULL ) - error(E_NULL,"sm_mlt"); - if ( out==(MAT *)NULL || out->m != matrix->m || out->n != matrix->n ) - out = m_resize(out,matrix->m,matrix->n); - m = matrix->m; n = matrix->n; - for ( i=0; ime[i],(double)scalar,out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = scalar*matrix->me[i][j]; - **************************************************/ - return (out); -} - -/* vm_mlt -- vector-matrix multiplication - -- Note: b is treated as a row vector */ -VEC *vm_mlt(A,b,out) -MAT *A; -VEC *b,*out; -{ - u_int j,m,n; - /* Real sum,**A_v,*b_v; */ - - if ( A==(MAT *)NULL || b==(VEC *)NULL ) - error(E_NULL,"vm_mlt"); - if ( A->m != b->dim ) - error(E_SIZES,"vm_mlt"); - if ( b == out ) - error(E_INSITU,"vm_mlt"); - if ( out == (VEC *)NULL || out->dim != A->n ) - out = v_resize(out,A->n); - - m = A->m; n = A->n; - - v_zero(out); - for ( j = 0; j < m; j++ ) - if ( b->ve[j] != 0.0 ) - __mltadd__(out->ve,A->me[j],b->ve[j],(int)n); - /************************************************** - A_v = A->me; b_v = b->ve; - for ( j=0; jve[j] = sum; - } - **************************************************/ - - return out; -} - -/* m_transp -- transpose matrix */ -MAT *m_transp(in,out) -MAT *in, *out; -{ - int i, j; - int in_situ; - Real tmp; - - if ( in == (MAT *)NULL ) - error(E_NULL,"m_transp"); - if ( in == out && in->n != in->m ) - error(E_INSITU2,"m_transp"); - in_situ = ( in == out ); - if ( out == (MAT *)NULL || out->m != in->n || out->n != in->m ) - out = m_resize(out,in->n,in->m); - - if ( ! in_situ ) - for ( i = 0; i < in->m; i++ ) - for ( j = 0; j < in->n; j++ ) - out->me[j][i] = in->me[i][j]; - else - for ( i = 1; i < in->m; i++ ) - for ( j = 0; j < i; j++ ) - { tmp = in->me[i][j]; - in->me[i][j] = in->me[j][i]; - in->me[j][i] = tmp; - } - - return out; -} - -/* swap_rows -- swaps rows i and j of matrix A upto column lim */ -MAT *swap_rows(A,i,j,lo,hi) -MAT *A; -int i, j, lo, hi; -{ - int k; - Real **A_me, tmp; - - if ( ! A ) - error(E_NULL,"swap_rows"); - if ( i < 0 || j < 0 || i >= A->m || j >= A->m ) - error(E_SIZES,"swap_rows"); - lo = max(0,lo); - hi = min(hi,A->n-1); - A_me = A->me; - - for ( k = lo; k <= hi; k++ ) - { - tmp = A_me[k][i]; - A_me[k][i] = A_me[k][j]; - A_me[k][j] = tmp; - } - return A; -} - -/* swap_cols -- swap columns i and j of matrix A upto row lim */ -MAT *swap_cols(A,i,j,lo,hi) -MAT *A; -int i, j, lo, hi; -{ - int k; - Real **A_me, tmp; - - if ( ! A ) - error(E_NULL,"swap_cols"); - if ( i < 0 || j < 0 || i >= A->n || j >= A->n ) - error(E_SIZES,"swap_cols"); - lo = max(0,lo); - hi = min(hi,A->m-1); - A_me = A->me; - - for ( k = lo; k <= hi; k++ ) - { - tmp = A_me[i][k]; - A_me[i][k] = A_me[j][k]; - A_me[j][k] = tmp; - } - return A; -} - -/* ms_mltadd -- matrix-scalar multiply and add - -- may be in situ - -- returns out == A1 + s*A2 */ -MAT *ms_mltadd(A1,A2,s,out) -MAT *A1, *A2, *out; -double s; -{ - /* register Real *A1_e, *A2_e, *out_e; */ - /* register int j; */ - int i, m, n; - - if ( ! A1 || ! A2 ) - error(E_NULL,"ms_mltadd"); - if ( A1->m != A2->m || A1->n != A2->n ) - error(E_SIZES,"ms_mltadd"); - - if ( out != A1 && out != A2 ) - out = m_resize(out,A1->m,A1->n); - - if ( s == 0.0 ) - return m_copy(A1,out); - if ( s == 1.0 ) - return m_add(A1,A2,out); - - tracecatch(out = m_copy(A1,out),"ms_mltadd"); - - m = A1->m; n = A1->n; - for ( i = 0; i < m; i++ ) - { - __mltadd__(out->me[i],A2->me[i],s,(int)n); - /************************************************** - A1_e = A1->me[i]; - A2_e = A2->me[i]; - out_e = out->me[i]; - for ( j = 0; j < n; j++ ) - out_e[j] = A1_e[j] + s*A2_e[j]; - **************************************************/ - } - - return out; -} - -/* mv_mltadd -- matrix-vector multiply and add - -- may not be in situ - -- returns out == v1 + alpha*A*v2 */ -VEC *mv_mltadd(v1,v2,A,alpha,out) -VEC *v1, *v2, *out; -MAT *A; -double alpha; -{ - /* register int j; */ - int i, m, n; - Real *v2_ve, *out_ve; - - if ( ! v1 || ! v2 || ! A ) - error(E_NULL,"mv_mltadd"); - if ( out == v2 ) - error(E_INSITU,"mv_mltadd"); - if ( v1->dim != A->m || v2->dim != A->n ) - error(E_SIZES,"mv_mltadd"); - - tracecatch(out = v_copy(v1,out),"mv_mltadd"); - - v2_ve = v2->ve; out_ve = out->ve; - m = A->m; n = A->n; - - if ( alpha == 0.0 ) - return out; - - for ( i = 0; i < m; i++ ) - { - out_ve[i] += alpha*__ip__(A->me[i],v2_ve,(int)n); - /************************************************** - A_e = A->me[i]; - sum = 0.0; - for ( j = 0; j < n; j++ ) - sum += A_e[j]*v2_ve[j]; - out_ve[i] = v1->ve[i] + alpha*sum; - **************************************************/ - } - - return out; -} - -/* vm_mltadd -- vector-matrix multiply and add - -- may not be in situ - -- returns out' == v1' + v2'*A */ -VEC *vm_mltadd(v1,v2,A,alpha,out) -VEC *v1, *v2, *out; -MAT *A; -double alpha; -{ - int /* i, */ j, m, n; - Real tmp, /* *A_e, */ *out_ve; - - if ( ! v1 || ! v2 || ! A ) - error(E_NULL,"vm_mltadd"); - if ( v2 == out ) - error(E_INSITU,"vm_mltadd"); - if ( v1->dim != A->n || A->m != v2->dim ) - error(E_SIZES,"vm_mltadd"); - - tracecatch(out = v_copy(v1,out),"vm_mltadd"); - - out_ve = out->ve; m = A->m; n = A->n; - for ( j = 0; j < m; j++ ) - { - tmp = v2->ve[j]*alpha; - if ( tmp != 0.0 ) - __mltadd__(out_ve,A->me[j],tmp,(int)n); - /************************************************** - A_e = A->me[j]; - for ( i = 0; i < n; i++ ) - out_ve[i] += A_e[i]*tmp; - **************************************************/ - } - - return out; -} - diff --git a/src/mesch/matrix.h b/src/mesch/matrix.h deleted file mode 100755 index 34965ec04c..0000000000 --- a/src/mesch/matrix.h +++ /dev/null @@ -1,691 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -*************************************************************************** - -Date Author Modification -18 Feb 2000 Gary Holt Removed definition of u_int since it's now - handled in config.h. Was causing compilation - warnings unnecessarily. -*/ - -/* - Type definitions for general purpose maths package -*/ - -#ifndef MATRIXH - -/* RCS id: $Id: matrix.h 616 2004-04-24 21:28:33Z hines $ */ - -#define MATRIXH - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "machine.h" -#include "err.h" -#include "meminfo.h" - -#define m_move mesch_m_move -#define OUT mesch_out - -#if defined(__MWERKS__) && !defined(_MSC_VER) -#include -#else -#include -#endif -/* unsigned integer type */ -/* This is no longer needed; it's defined in config.h if the compiler hasn't - * defined it already. */ -/* #ifndef U_INT_DEF */ -/* typedef unsigned int u_int; */ -/* #define U_INT_DEF */ -/* #endif */ - -/* vector definition */ -typedef struct { - u_int dim, max_dim; - Real *ve; - } VEC; - -/* matrix definition */ -typedef struct { - u_int m, n; - u_int max_m, max_n, max_size; - Real **me,*base; /* base is base of alloc'd mem */ - } MAT; - -/* band matrix definition */ -typedef struct { - MAT *mat; /* matrix */ - int lb,ub; /* lower and upper bandwidth */ - } BAND; - - -/* permutation definition */ -typedef struct { - u_int size, max_size, *pe; - } PERM; - -/* integer vector definition */ -typedef struct { - u_int dim, max_dim; - int *ive; - } IVEC; - - -#if 1 -#include -#else -#ifndef MALLOCDECL -#ifndef ANSI_C -extern char *malloc(), *calloc(), *realloc(); -#else -extern void *malloc(size_t), - *calloc(size_t,size_t), - *realloc(void *,size_t); -#endif -#endif -#endif - -#ifndef ANSI_C -extern void m_version(); -#else -void m_version( void ); -#endif - -#ifndef ANSI_C -/* allocate one object of given type */ -#define NEW(type) ((type *)calloc(1,sizeof(type))) - -/* allocate num objects of given type */ -#define NEW_A(num,type) ((type *)calloc((unsigned)(num),sizeof(type))) - - /* re-allocate arry to have num objects of the given type */ -#define RENEW(var,num,type) \ - ((var)=(type *)((var) ? \ - realloc((char *)(var),(unsigned)(num)*sizeof(type)) : \ - calloc((unsigned)(num),sizeof(type)))) - -#define MEMCOPY(from,to,n_items,type) \ - MEM_COPY((char *)(from),(char *)(to),(unsigned)(n_items)*sizeof(type)) - -#else -/* allocate one object of given type */ -#define NEW(type) ((type *)calloc((size_t)1,(size_t)sizeof(type))) - -/* allocate num objects of given type */ -#define NEW_A(num,type) ((type *)calloc((size_t)(num),(size_t)sizeof(type))) - - /* re-allocate arry to have num objects of the given type */ -#define RENEW(var,num,type) \ - ((var)=(type *)((var) ? \ - realloc((char *)(var),(size_t)((num)*sizeof(type))) : \ - calloc((size_t)(num),(size_t)sizeof(type)))) - -#define MEMCOPY(from,to,n_items,type) \ - MEM_COPY((char *)(from),(char *)(to),(unsigned)(n_items)*sizeof(type)) - -#endif - -/* type independent min and max operations */ -#ifndef max -#define max(a,b) ((a) > (b) ? (a) : (b)) -#endif -#ifndef min -#define min(a,b) ((a) > (b) ? (b) : (a)) -#endif - - -#undef TRUE -#define TRUE 1 -#undef FALSE -#define FALSE 0 - - -/* for input routines */ -#define MAXLINE 81 - - -/* Dynamic memory allocation */ - -/* Should use M_FREE/V_FREE/PX_FREE in programs instead of m/v/px_free() - as this is considerably safer -- also provides a simple type check ! */ - -#ifndef ANSI_C - -extern VEC *v_get(), *v_resize(); -extern MAT *m_get(), *m_resize(); -extern PERM *px_get(), *px_resize(); -extern IVEC *iv_get(), *iv_resize(); -extern int m_free(),v_free(); -extern int px_free(); -extern int iv_free(); -extern BAND *bd_get(), *bd_resize(); -extern int bd_free(); - -#else - -/* get/resize vector to given dimension */ -extern VEC *v_get(int), *v_resize(VEC *,int); -/* get/resize matrix to be m x n */ -extern MAT *m_get(int,int), *m_resize(MAT *,int,int); -/* get/resize permutation to have the given size */ -extern PERM *px_get(int), *px_resize(PERM *,int); -/* get/resize an integer vector to given dimension */ -extern IVEC *iv_get(int), *iv_resize(IVEC *,int); -/* get/resize a band matrix to given dimension */ -extern BAND *bd_get(int,int,int), *bd_resize(BAND *,int,int,int); - -/* free (de-allocate) (band) matrices, vectors, permutations and - integer vectors */ -extern int iv_free(IVEC *); -extern int m_free(MAT *),v_free(VEC *),px_free(PERM *); -extern int bd_free(BAND *); - -#endif - - -/* MACROS */ - -/* macros that also check types and sets pointers to NULL */ -#define M_FREE(mat) ( m_free(mat), (mat)=(MAT *)NULL ) -#define V_FREE(vec) ( v_free(vec), (vec)=(VEC *)NULL ) -#define PX_FREE(px) ( px_free(px), (px)=(PERM *)NULL ) -#define IV_FREE(iv) ( iv_free(iv), (iv)=(IVEC *)NULL ) - -#define MAXDIM 2001 - - -/* Entry level access to data structures */ -#ifdef DEBUG - -/* returns x[i] */ -#define v_entry(x,i) (((i) < 0 || (i) >= (x)->dim) ? \ - error(E_BOUNDS,"v_entry"), 0.0 : (x)->ve[i] ) - -/* x[i] <- val */ -#define v_set_val(x,i,val) ((x)->ve[i] = ((i) < 0 || (i) >= (x)->dim) ? \ - error(E_BOUNDS,"v_set_val"), 0.0 : (val)) - -/* x[i] <- x[i] + val */ -#define v_add_val(x,i,val) ((x)->ve[i] += ((i) < 0 || (i) >= (x)->dim) ? \ - error(E_BOUNDS,"v_add_val"), 0.0 : (val)) - -/* x[i] <- x[i] - val */ -#define v_sub_val(x,i,val) ((x)->ve[i] -= ((i) < 0 || (i) >= (x)->dim) ? \ - error(E_BOUNDS,"v_sub_val"), 0.0 : (val)) - -/* returns A[i][j] */ -#define m_entry(A,i,j) (((i) < 0 || (i) >= (A)->m || \ - (j) < 0 || (j) >= (A)->n) ? \ - error(E_BOUNDS,"m_entry"), 0.0 : (A)->me[i][j] ) - -/* A[i][j] <- val */ -#define m_set_val(A,i,j,val) ((A)->me[i][j] = ((i) < 0 || (i) >= (A)->m || \ - (j) < 0 || (j) >= (A)->n) ? \ - error(E_BOUNDS,"m_set_val"), 0.0 : (val) ) - -/* A[i][j] <- A[i][j] + val */ -#define m_add_val(A,i,j,val) ((A)->me[i][j] += ((i) < 0 || (i) >= (A)->m || \ - (j) < 0 || (j) >= (A)->n) ? \ - error(E_BOUNDS,"m_add_val"), 0.0 : (val) ) - -/* A[i][j] <- A[i][j] - val */ -#define m_sub_val(A,i,j,val) ((A)->me[i][j] -= ((i) < 0 || (i) >= (A)->m || \ - (j) < 0 || (j) >= (A)->n) ? \ - error(E_BOUNDS,"m_sub_val"), 0.0 : (val) ) -#else - -/* returns x[i] */ -#define v_entry(x,i) ((x)->ve[i]) - -/* x[i] <- val */ -#define v_set_val(x,i,val) ((x)->ve[i] = (val)) - -/* x[i] <- x[i] + val */ -#define v_add_val(x,i,val) ((x)->ve[i] += (val)) - - /* x[i] <- x[i] - val */ -#define v_sub_val(x,i,val) ((x)->ve[i] -= (val)) - -/* returns A[i][j] */ -#define m_entry(A,i,j) ((A)->me[i][j]) - -/* A[i][j] <- val */ -#define m_set_val(A,i,j,val) ((A)->me[i][j] = (val) ) - -/* A[i][j] <- A[i][j] + val */ -#define m_add_val(A,i,j,val) ((A)->me[i][j] += (val) ) - -/* A[i][j] <- A[i][j] - val */ -#define m_sub_val(A,i,j,val) ((A)->me[i][j] -= (val) ) - -#endif - - -/* I/O routines */ -#ifndef ANSI_C - -extern void v_foutput(),m_foutput(),px_foutput(); -extern void iv_foutput(); -extern VEC *v_finput(); -extern MAT *m_finput(); -extern PERM *px_finput(); -extern IVEC *iv_finput(); -extern int fy_or_n(), fin_int(), yn_dflt(), skipjunk(); -extern double fin_double(); - -#else - -/* print x on file fp */ -void v_foutput(FILE *fp,VEC *x), - /* print A on file fp */ - m_foutput(FILE *fp,MAT *A), - /* print px on file fp */ - px_foutput(FILE *fp,PERM *px); -/* print ix on file fp */ -void iv_foutput(FILE *fp,IVEC *ix); - -/* Note: if out is NULL, then returned object is newly allocated; - Also: if out is not NULL, then that size is assumed */ - -/* read in vector from fp */ -VEC *v_finput(FILE *fp,VEC *out); -/* read in matrix from fp */ -MAT *m_finput(FILE *fp,MAT *out); -/* read in permutation from fp */ -PERM *px_finput(FILE *fp,PERM *out); -/* read in int vector from fp */ -IVEC *iv_finput(FILE *fp,IVEC *out); - -/* fy_or_n -- yes-or-no to question in string s - -- question written to stderr, input from fp - -- if fp is NOT a tty then return y_n_dflt */ -int fy_or_n(FILE *fp,char *s); - -/* yn_dflt -- sets the value of y_n_dflt to val */ -int yn_dflt(int val); - -/* fin_int -- return integer read from file/stream fp - -- prompt s on stderr if fp is a tty - -- check that x lies between low and high: re-prompt if - fp is a tty, error exit otherwise - -- ignore check if low > high */ -int fin_int(FILE *fp,char *s,int low,int high); - -/* fin_double -- return double read from file/stream fp - -- prompt s on stderr if fp is a tty - -- check that x lies between low and high: re-prompt if - fp is a tty, error exit otherwise - -- ignore check if low > high */ -double fin_double(FILE *fp,char *s,double low,double high); - -/* it skips white spaces and strings of the form #....\n - Here .... is a comment string */ -int skipjunk(FILE *fp); - -#endif - - -/* MACROS */ - -/* macros to use stdout and stdin instead of explicit fp */ -#define v_output(vec) v_foutput(stdout,vec) -#define v_input(vec) v_finput(stdin,vec) -#define m_output(mat) m_foutput(stdout,mat) -#define m_input(mat) m_finput(stdin,mat) -#define px_output(px) px_foutput(stdout,px) -#define px_input(px) px_finput(stdin,px) -#define iv_output(iv) iv_foutput(stdout,iv) -#define iv_input(iv) iv_finput(stdin,iv) - -/* general purpose input routine; skips comments # ... \n */ -#define finput(fp,prompt,fmt,var) \ - ( ( isatty(fileno(fp)) ? fprintf(stderr,prompt) : skipjunk(fp) ), \ - fscanf(fp,fmt,var) ) -#define input(prompt,fmt,var) finput(stdin,prompt,fmt,var) -#define fprompter(fp,prompt) \ - ( isatty(fileno(fp)) ? fprintf(stderr,prompt) : skipjunk(fp) ) -#define prompter(prompt) fprompter(stdin,prompt) -#define y_or_n(s) fy_or_n(stdin,s) -#define in_int(s,lo,hi) fin_int(stdin,s,lo,hi) -#define in_double(s,lo,hi) fin_double(stdin,s,lo,hi) - -/* Copying routines */ -#ifndef ANSI_C -extern MAT *_m_copy(), *m_move(), *vm_move(); -extern VEC *_v_copy(), *v_move(), *mv_move(); -extern PERM *px_copy(); -extern IVEC *iv_copy(), *iv_move(); -extern BAND *bd_copy(); - -#else - -/* copy in to out starting at out[i0][j0] */ -extern MAT *_m_copy(MAT *in,MAT *out,u_int i0,u_int j0), - * m_move(MAT *in, int, int, int, int, MAT *out, int, int), - *vm_move(VEC *in, int, MAT *out, int, int, int, int); -/* copy in to out starting at out[i0] */ -extern VEC *_v_copy(VEC *in,VEC *out,u_int i0), - * v_move(VEC *in, int, int, VEC *out, int), - *mv_move(MAT *in, int, int, int, int, VEC *out, int); -extern PERM *px_copy(PERM *in,PERM *out); -extern IVEC *iv_copy(IVEC *in,IVEC *out), - *iv_move(IVEC *in, int, int, IVEC *out, int); -extern BAND *bd_copy(BAND *in,BAND *out); - -#endif - - -/* MACROS */ -#define m_copy(in,out) _m_copy(in,out,0,0) -#define v_copy(in,out) _v_copy(in,out,0) - - -/* Initialisation routines -- to be zero, ones, random or identity */ -#ifndef ANSI_C -extern VEC *v_zero(), *v_rand(), *v_ones(); -extern MAT *m_zero(), *m_ident(), *m_rand(), *m_ones(); -extern PERM *px_ident(); -extern IVEC *iv_zero(); -#else -extern VEC *v_zero(VEC *), *v_rand(VEC *), *v_ones(VEC *); -extern MAT *m_zero(MAT *), *m_ident(MAT *), *m_rand(MAT *), - *m_ones(MAT *); -extern PERM *px_ident(PERM *); -extern IVEC *iv_zero(IVEC *); -#endif - -/* Basic vector operations */ -#ifndef ANSI_C -extern VEC *sv_mlt(), *mv_mlt(), *vm_mlt(), *v_add(), *v_sub(), - *px_vec(), *pxinv_vec(), *v_mltadd(), *v_map(), *_v_map(), - *v_lincomb(), *v_linlist(); -extern double v_min(), v_max(), v_sum(); -extern VEC *v_star(), *v_slash(), *v_sort(); -extern double _in_prod(), __ip__(); -extern void __mltadd__(), __add__(), __sub__(), - __smlt__(), __zero__(); -#else - -extern VEC *sv_mlt(double,VEC *,VEC *), /* out <- s.x */ - *mv_mlt(MAT *,VEC *,VEC *), /* out <- A.x */ - *vm_mlt(MAT *,VEC *,VEC *), /* out^T <- x^T.A */ - *v_add(VEC *,VEC *,VEC *), /* out <- x + y */ - *v_sub(VEC *,VEC *,VEC *), /* out <- x - y */ - *px_vec(PERM *,VEC *,VEC *), /* out <- P.x */ - *pxinv_vec(PERM *,VEC *,VEC *), /* out <- P^{-1}.x */ - *v_mltadd(VEC *,VEC *,double,VEC *), /* out <- x + s.y */ -#ifdef PROTOTYPES_IN_STRUCT - *v_map(double (*f)(double),VEC *,VEC *), - /* out[i] <- f(x[i]) */ - *_v_map(double (*f)(void *,double),void *,VEC *,VEC *), -#else - *v_map(double (*f)(),VEC *,VEC *), /* out[i] <- f(x[i]) */ - *_v_map(double (*f)(),void *,VEC *,VEC *), -#endif - *v_lincomb(int,VEC **,Real *,VEC *), - /* out <- sum_i s[i].x[i] */ - *v_linlist(VEC *out,VEC *v1,double a1,...); - /* out <- s1.x1 + s2.x2 + ... */ - -/* returns min_j x[j] (== x[i]) */ -extern double v_min(VEC *, int *), - /* returns max_j x[j] (== x[i]) */ - v_max(VEC *, int *), - /* returns sum_i x[i] */ - v_sum(VEC *); - -/* Hadamard product: out[i] <- x[i].y[i] */ -extern VEC *v_star(VEC *, VEC *, VEC *), - /* out[i] <- x[i] / y[i] */ - *v_slash(VEC *, VEC *, VEC *), - /* sorts x, and sets order so that sorted x[i] = x[order[i]] */ - *v_sort(VEC *, PERM *); - -/* returns inner product starting at component i0 */ -extern double _in_prod(VEC *x,VEC *y,u_int i0), - /* returns sum_{i=0}^{len-1} x[i].y[i] */ - __ip__(Real *,Real *,int); - -/* see v_mltadd(), v_add(), v_sub() and v_zero() */ -extern void __mltadd__(Real *,Real *,double,int), - __add__(Real *,Real *,Real *,int), - __sub__(Real *,Real *,Real *,int), - __smlt__(Real *,double,Real *,int), - __zero__(Real *,int); - -#endif - - -/* MACRO */ -/* usual way of computing the inner product */ -#define in_prod(a,b) _in_prod(a,b,0) - -/* Norms */ -/* scaled vector norms -- scale == NULL implies unscaled */ -#ifndef ANSI_C - -extern double _v_norm1(), _v_norm2(), _v_norm_inf(), - m_norm1(), m_norm_inf(), m_norm_frob(); - -#else - /* returns sum_i |x[i]/scale[i]| */ -extern double _v_norm1(VEC *x,VEC *scale), - /* returns (scaled) Euclidean norm */ - _v_norm2(VEC *x,VEC *scale), - /* returns max_i |x[i]/scale[i]| */ - _v_norm_inf(VEC *x,VEC *scale); - -/* unscaled matrix norms */ -extern double m_norm1(MAT *A), m_norm_inf(MAT *A), m_norm_frob(MAT *A); - -#endif - - -/* MACROS */ -/* unscaled vector norms */ -#define v_norm1(x) _v_norm1(x,VNULL) -#define v_norm2(x) _v_norm2(x,VNULL) -#define v_norm_inf(x) _v_norm_inf(x,VNULL) - -/* Basic matrix operations */ -#ifndef ANSI_C - -extern MAT *sm_mlt(), *m_mlt(), *mmtr_mlt(), *mtrm_mlt(), *m_add(), *m_sub(), - *sub_mat(), *m_transp(), *ms_mltadd(); - -extern BAND *bd_transp(); -extern MAT *px_rows(), *px_cols(), *swap_rows(), *swap_cols(), - *_set_row(), *_set_col(); -extern VEC *get_row(), *get_col(), *sub_vec(), - *mv_mltadd(), *vm_mltadd(); - -#else - -extern MAT *sm_mlt(double s,MAT *A,MAT *out), /* out <- s.A */ - *m_mlt(MAT *A,MAT *B,MAT *out), /* out <- A.B */ - *mmtr_mlt(MAT *A,MAT *B,MAT *out), /* out <- A.B^T */ - *mtrm_mlt(MAT *A,MAT *B,MAT *out), /* out <- A^T.B */ - *m_add(MAT *A,MAT *B,MAT *out), /* out <- A + B */ - *m_sub(MAT *A,MAT *B,MAT *out), /* out <- A - B */ - *sub_mat(MAT *A,u_int,u_int,u_int,u_int,MAT *out), - *m_transp(MAT *A,MAT *out), /* out <- A^T */ - /* out <- A + s.B */ - *ms_mltadd(MAT *A,MAT *B,double s,MAT *out); - - -extern BAND *bd_transp(BAND *in, BAND *out); /* out <- A^T */ -extern MAT *px_rows(PERM *px,MAT *A,MAT *out), /* out <- P.A */ - *px_cols(PERM *px,MAT *A,MAT *out), /* out <- A.P^T */ - *swap_rows(MAT *,int,int,int,int), - *swap_cols(MAT *,int,int,int,int), - /* A[i][j] <- out[j], j >= j0 */ - *_set_col(MAT *A,u_int i,VEC *out,u_int j0), - /* A[i][j] <- out[i], i >= i0 */ - *_set_row(MAT *A,u_int j,VEC *out,u_int i0); - -extern VEC *get_row(MAT *,u_int,VEC *), - *get_col(MAT *,u_int,VEC *), - *sub_vec(VEC *,int,int,VEC *), - /* out <- x + s.A.y */ - *mv_mltadd(VEC *x,VEC *y,MAT *A,double s,VEC *out), - /* out^T <- x^T + s.y^T.A */ - *vm_mltadd(VEC *x,VEC *y,MAT *A,double s,VEC *out); -#endif - - -/* MACROS */ -/* row i of A <- vec */ -#define set_row(mat,row,vec) _set_row(mat,row,vec,0) -/* col j of A <- vec */ -#define set_col(mat,col,vec) _set_col(mat,col,vec,0) - - -/* Basic permutation operations */ -#ifndef ANSI_C - -extern PERM *px_mlt(), *px_inv(), *px_transp(); -extern int px_sign(); - -#else - -extern PERM *px_mlt(PERM *px1,PERM *px2,PERM *out), /* out <- px1.px2 */ - *px_inv(PERM *px,PERM *out), /* out <- px^{-1} */ - /* swap px[i] and px[j] */ - *px_transp(PERM *px,u_int i,u_int j); - - /* returns sign(px) = +1 if px product of even # transpositions - -1 if ps product of odd # transpositions */ -extern int px_sign(PERM *); - -#endif - - -/* Basic integer vector operations */ -#ifndef ANSI_C - -extern IVEC *iv_add(), *iv_sub(), *iv_sort(); - -#else - -extern IVEC *iv_add(IVEC *ix,IVEC *iy,IVEC *out), /* out <- ix + iy */ - *iv_sub(IVEC *ix,IVEC *iy,IVEC *out), /* out <- ix - iy */ - /* sorts ix & sets order so that sorted ix[i] = old ix[order[i]] */ - *iv_sort(IVEC *ix, PERM *order); - -#endif - - -/* miscellaneous functions */ - -#ifndef ANSI_C - -extern double square(), cube(), mrand(); -extern void smrand(), mrandlist(); -extern void m_dump(), px_dump(), v_dump(), iv_dump(); -extern MAT *band2mat(); -extern BAND *mat2band(); - -#else - -double square(double x), /* returns x^2 */ - cube(double x), /* returns x^3 */ - mrand(void); /* returns random # in [0,1) */ - -void smrand(int seed), /* seeds mrand() */ - mrandlist(Real *x, int len); /* generates len random numbers */ - -void m_dump(FILE *fp,MAT *a), px_dump(FILE *,PERM *px), - v_dump(FILE *fp,VEC *x), iv_dump(FILE *fp, IVEC *ix); - -MAT *band2mat(BAND *bA, MAT *A); -BAND *mat2band(MAT *A, int lb,int ub, BAND *bA); - -#endif - - -/* miscellaneous constants */ -#define VNULL ((VEC *)NULL) -#define MNULL ((MAT *)NULL) -#define PNULL ((PERM *)NULL) -#define IVNULL ((IVEC *)NULL) -#define BDNULL ((BAND *)NULL) - - - -/* varying number of arguments */ - -#ifdef ANSI_C -#include - -/* prototypes */ - -int v_get_vars(int dim,...); -int iv_get_vars(int dim,...); -int m_get_vars(int m,int n,...); -int px_get_vars(int dim,...); - -int v_resize_vars(int new_dim,...); -int iv_resize_vars(int new_dim,...); -int m_resize_vars(int m,int n,...); -int px_resize_vars(int new_dim,...); - -int v_free_vars(VEC **,...); -int iv_free_vars(IVEC **,...); -int px_free_vars(PERM **,...); -int m_free_vars(MAT **,...); - -#elif VARARGS -/* old varargs is used */ - -#include - -/* prototypes */ - -int v_get_vars(); -int iv_get_vars(); -int m_get_vars(); -int px_get_vars(); - -int v_resize_vars(); -int iv_resize_vars(); -int m_resize_vars(); -int px_resize_vars(); - -int v_free_vars(); -int iv_free_vars(); -int px_free_vars(); -int m_free_vars(); - -#endif - -#if defined(__cplusplus) -} -#endif - -#endif - - diff --git a/src/mesch/matrix2.h b/src/mesch/matrix2.h deleted file mode 100755 index ba12fa9fb0..0000000000 --- a/src/mesch/matrix2.h +++ /dev/null @@ -1,236 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Header file for ``matrix2.a'' library file -*/ - - -#ifndef MATRIX2H -#define MATRIX2H - -#if defined(__cplusplus) -extern "C" { -#endif - -#include "matrix.h" - -/* Unless otherwise specified, factorisation routines overwrite the - matrix that is being factorised */ - -#ifndef ANSI_C - -extern MAT *BKPfactor(), *CHfactor(), *LUfactor(), *QRfactor(), - *QRCPfactor(), *LDLfactor(), *Hfactor(), *MCHfactor(), - *m_inverse(); -extern double LUcondest(), QRcondest(); -extern MAT *makeQ(), *makeR(), *makeHQ(), *makeH(); -extern MAT *LDLupdate(), *QRupdate(); - -extern VEC *BKPsolve(), *CHsolve(), *LUsolve(), *_Qsolve(), *QRsolve(), - *LDLsolve(), *Usolve(), *Lsolve(), *Dsolve(), *LTsolve(), - *UTsolve(), *LUTsolve(), *QRCPsolve(); - -extern BAND *bdLUfactor(), *bdLDLfactor(); -extern VEC *bdLUsolve(), *bdLDLsolve(); - -extern VEC *hhvec(); -extern VEC *hhtrvec(); -extern MAT *hhtrrows(); -extern MAT *hhtrcols(); - -extern void givens(); -extern VEC *rot_vec(); /* in situ */ -extern MAT *rot_rows(); /* in situ */ -extern MAT *rot_cols(); /* in situ */ - - -/* eigenvalue routines */ -extern VEC *trieig(), *symmeig(); -extern MAT *schur(); -extern void schur_evals(); -extern MAT *schur_vecs(); - -/* singular value decomposition */ -extern VEC *bisvd(), *svd(); - -/* matrix powers and exponent */ -MAT *_m_pow(); -MAT *m_pow(); -MAT *m_exp(), *_m_exp(); -MAT *m_poly(); - -/* FFT */ -void fft(); -void ifft(); - - -#else - - /* forms Bunch-Kaufman-Parlett factorisation for - symmetric indefinite matrices */ -extern MAT *BKPfactor(MAT *A,PERM *pivot,PERM *blocks), - /* Cholesky factorisation of A - (symmetric, positive definite) */ - *CHfactor(MAT *A), - /* LU factorisation of A (with partial pivoting) */ - *LUfactor(MAT *A,PERM *pivot), - /* QR factorisation of A; need dim(diag) >= # rows of A */ - *QRfactor(MAT *A,VEC *diag), - /* QR factorisation of A with column pivoting */ - *QRCPfactor(MAT *A,VEC *diag,PERM *pivot), - /* L.D.L^T factorisation of A */ - *LDLfactor(MAT *A), - /* Hessenberg factorisation of A -- for schur() */ - *Hfactor(MAT *A,VEC *diag1,VEC *diag2), - /* modified Cholesky factorisation of A; - actually factors A+D, D diagonal with no - diagonal entry in the factor < sqrt(tol) */ - *MCHfactor(MAT *A,double tol), - *m_inverse(MAT *A,MAT *out); - - /* returns condition estimate for A after LUfactor() */ -extern double LUcondest(MAT *A,PERM *pivot), - /* returns condition estimate for Q after QRfactor() */ - QRcondest(MAT *A); - -/* Note: The make..() and ..update() routines assume that the factorisation - has already been carried out */ - - /* Qout is the "Q" (orthongonal) matrix from QR factorisation */ -extern MAT *makeQ(MAT *A,VEC *diag,MAT *Qout), - /* Rout is the "R" (upper triangular) matrix - from QR factorisation */ - *makeR(MAT *A,MAT *Rout), - /* Qout is orthogonal matrix in Hessenberg factorisation */ - *makeHQ(MAT *A,VEC *diag1,VEC *diag2,MAT *Qout), - /* Hout is the Hessenberg matrix in Hessenberg factorisation */ - *makeH(MAT *A,MAT *Hout); - - /* updates L.D.L^T factorisation for A <- A + alpha.u.u^T */ -extern MAT *LDLupdate(MAT *A,VEC *u,double alpha), - /* updates QR factorisation for QR <- Q.(R+u.v^T) - Note: we need explicit Q & R matrices, - from makeQ() and makeR() */ - *QRupdate(MAT *Q,MAT *R,VEC *u,VEC *v); - -/* Solve routines assume that the corresponding factorisation routine - has already been applied to the matrix along with auxiliary - objects (such as pivot permutations) - - These solve the system A.x = b, - except for LUTsolve and QRTsolve which solve the transposed system - A^T.x. = b. - If x is NULL on entry, then it is created. -*/ - -extern VEC *BKPsolve(MAT *A,PERM *pivot,PERM *blocks,VEC *b,VEC *x), - *CHsolve(MAT *A,VEC *b,VEC *x), - *LDLsolve(MAT *A,VEC *b,VEC *x), - *LUsolve(MAT *A,PERM *pivot,VEC *b,VEC *x), - *_Qsolve(MAT *A,VEC *,VEC *,VEC *, VEC *), - *QRsolve(MAT *A,VEC *,VEC *b,VEC *x), - *QRTsolve(MAT *A,VEC *,VEC *b,VEC *x), - - - /* Triangular equations solve routines; - U for upper triangular, L for lower traingular, D for diagonal - if diag_val == 0.0 use that values in the matrix */ - - *Usolve(MAT *A,VEC *b,VEC *x,double diag_val), - *Lsolve(MAT *A,VEC *b,VEC *x,double diag_val), - *Dsolve(MAT *A,VEC *b,VEC *x), - *LTsolve(MAT *A,VEC *b,VEC *x,double diag_val), - *UTsolve(MAT *A,VEC *b,VEC *x,double diag_val), - *LUTsolve(MAT *A,PERM *,VEC *,VEC *), - *QRCPsolve(MAT *QR,VEC *diag,PERM *pivot,VEC *b,VEC *x); - -extern BAND *bdLUfactor(BAND *A,PERM *pivot), - *bdLDLfactor(BAND *A); -extern VEC *bdLUsolve(BAND *A,PERM *pivot,VEC *b,VEC *x), - *bdLDLsolve(BAND *A,VEC *b,VEC *x); - - - -extern VEC *hhvec(VEC *,u_int,Real *,VEC *,Real *); -extern VEC *hhtrvec(VEC *,double,u_int,VEC *,VEC *); -extern MAT *hhtrrows(MAT *,u_int,u_int,VEC *,double); -extern MAT *hhtrcols(MAT *,u_int,u_int,VEC *,double); - -extern void givens(double,double,Real *,Real *); -extern VEC *rot_vec(VEC *,u_int,u_int,double,double,VEC *); /* in situ */ -extern MAT *rot_rows(MAT *,u_int,u_int,double,double,MAT *); /* in situ */ -extern MAT *rot_cols(MAT *,u_int,u_int,double,double,MAT *); /* in situ */ - - -/* eigenvalue routines */ - - /* compute eigenvalues of tridiagonal matrix - with diagonal entries a[i], super & sub diagonal entries - b[i]; eigenvectors stored in Q (if not NULL) */ -extern VEC *trieig(VEC *a,VEC *b,MAT *Q), - /* sets out to be vector of eigenvectors; eigenvectors - stored in Q (if not NULL). A is unchanged */ - *symmeig(MAT *A,MAT *Q,VEC *out); - - /* computes real Schur form = Q^T.A.Q */ -extern MAT *schur(MAT *A,MAT *Q); - /* computes real and imaginary parts of the eigenvalues - of A after schur() */ -extern void schur_evals(MAT *A,VEC *re_part,VEC *im_part); - /* computes real and imaginary parts of the eigenvectors - of A after schur() */ -extern MAT *schur_vecs(MAT *T,MAT *Q,MAT *X_re,MAT *X_im); - - -/* singular value decomposition */ - - /* computes singular values of bi-diagonal matrix with - diagonal entries a[i] and superdiagonal entries b[i]; - singular vectors stored in U and V (if not NULL) */ -VEC *bisvd(VEC *a,VEC *b,MAT *U,MAT *V), - /* sets out to be vector of singular values; - singular vectors stored in U and V */ - *svd(MAT *A,MAT *U,MAT *V,VEC *out); - -/* matrix powers and exponent */ -MAT *_m_pow(MAT *,int,MAT *,MAT *); -MAT *m_pow(MAT *,int, MAT *); -MAT *m_exp(MAT *,double,MAT *); -MAT *_m_exp(MAT *,double,MAT *,int *,int *); -MAT *m_poly(MAT *,VEC *,MAT *); - -/* FFT */ -void fft(VEC *,VEC *); -void ifft(VEC *,VEC *); - -#endif - -#if defined(__cplusplus) -} -#endif - -#endif diff --git a/src/mesch/matrixio.c b/src/mesch/matrixio.c deleted file mode 100755 index 19d9f5f6d6..0000000000 --- a/src/mesch/matrixio.c +++ /dev/null @@ -1,525 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* 1.6 matrixio.c 11/25/87 */ - - -#include -#include -#include "matrix.h" - -static char rcsid[] = "matrixio.c,v 1.1 1997/12/04 17:55:35 hines Exp"; - - -/* local variables */ -static char line[MAXLINE]; - - -/************************************************************************** - Input routines - **************************************************************************/ -/* skipjunk -- skips white spaces and strings of the form #....\n - Here .... is a comment string */ -int skipjunk(fp) -FILE *fp; -{ - int c; - - for ( ; ; ) /* forever do... */ - { - /* skip blanks */ - do - c = getc(fp); - while ( isspace(c) ); - - /* skip comments (if any) */ - if ( c == '#' ) - /* yes it is a comment (line) */ - while ( (c=getc(fp)) != '\n' ) - ; - else - { - ungetc(c,fp); - break; - } - } - return 0; -} - -MAT *m_finput(fp,a) -FILE *fp; -MAT *a; -{ - MAT *im_finput(),*bm_finput(); - - if ( isatty(fileno(fp)) ) - return im_finput(fp,a); - else - return bm_finput(fp,a); -} - -/* im_finput -- interactive input of matrix */ -MAT *im_finput(fp,mat) -FILE *fp; -MAT *mat; -{ - char c; - u_int i, j, m, n, dynamic; - /* dynamic set to TRUE if memory allocated here */ - - /* get matrix size */ - if ( mat != (MAT *)NULL && mat->mnm; n = mat->n; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"Matrix: rows cols:"); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"im_finput"); - } while ( sscanf(line,"%u%u",&m,&n)<2 || m>MAXDIM || n>MAXDIM ); - mat = m_get(m,n); - } - - /* input elements */ - for ( i=0; ime[i][j]); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"im_finput"); - if ( (*line == 'b' || *line == 'B') && j > 0 ) - { j--; dynamic = FALSE; goto redo2; } - if ( (*line == 'f' || *line == 'F') && j < n-1 ) - { j++; dynamic = FALSE; goto redo2; } -#if REAL == DOUBLE - } while ( *line=='\0' || sscanf(line,"%lf",&mat->me[i][j])<1 ); -#elif REAL == FLOAT - } while ( *line=='\0' || sscanf(line,"%f",&mat->me[i][j])<1 ); -#endif - fprintf(stderr,"Continue: "); - if(fscanf(fp,"%c",&c) != 1) { - error(E_INPUT, "im_finput"); - } - if ( c == 'n' || c == 'N' ) - { dynamic = FALSE; goto redo; } - if ( (c == 'b' || c == 'B') /* && i > 0 */ ) - { if ( i > 0 ) - i--; - dynamic = FALSE; goto redo; - } - } - - return (mat); -} - -/* bm_finput -- batch-file input of matrix */ -MAT *bm_finput(fp,mat) -FILE *fp; -MAT *mat; -{ - u_int i,j,m,n,dummy; - int io_code; - - /* get dimension */ - skipjunk(fp); - if ((io_code=fscanf(fp," Matrix: %u by %u",&m,&n)) < 2 || - m>MAXDIM || n>MAXDIM ) - error(io_code==EOF ? E_EOF : E_FORMAT,"bm_finput"); - - /* allocate memory if necessary */ - if ( mat==(MAT *)NULL ) - mat = m_resize(mat,m,n); - - /* get entries */ - for ( i=0; ime[i][j])) < 1 ) -#elif REAL == FLOAT - if ((io_code=fscanf(fp,"%f",&mat->me[i][j])) < 1 ) -#endif - error(io_code==EOF ? 7 : 6,"bm_finput"); - } - - return (mat); -} - -PERM *px_finput(fp,px) -FILE *fp; -PERM *px; -{ - PERM *ipx_finput(),*bpx_finput(); - - if ( isatty(fileno(fp)) ) - return ipx_finput(fp,px); - else - return bpx_finput(fp,px); -} - - -/* ipx_finput -- interactive input of permutation */ -PERM *ipx_finput(fp,px) -FILE *fp; -PERM *px; -{ - u_int i,j,size,dynamic; /* dynamic set if memory allocated here */ - u_int entry,ok; - - /* get permutation size */ - if ( px!=(PERM *)NULL && px->sizesize; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"Permutation: size: "); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"ipx_finput"); - } while ( sscanf(line,"%u",&size)<1 || size>MAXDIM ); - px = px_get(size); - } - - /* get entries */ - i = 0; - while ( i%u new: ", - i,px->pe[i]); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"ipx_finput"); - if ( (*line == 'b' || *line == 'B') && i > 0 ) - { i--; dynamic = FALSE; goto redo; } - } while ( *line=='\0' || sscanf(line,"%u",&entry) < 1 ); - /* check entry */ - ok = (entry < size); - for ( j=0; jpe[j]); - if ( ok ) - { - px->pe[i] = entry; - i++; - } - } - - return (px); -} - -/* bpx_finput -- batch-file input of permutation */ -PERM *bpx_finput(fp,px) -FILE *fp; -PERM *px; -{ - u_int i,j,size,entry,ok; - int io_code; - - /* get size of permutation */ - skipjunk(fp); - if ((io_code=fscanf(fp," Permutation: size:%u",&size)) < 1 || - size>MAXDIM ) - error(io_code==EOF ? 7 : 6,"bpx_finput"); - - /* allocate memory if necessary */ - if ( px==(PERM *)NULL || px->size %u",&entry)) < 1 ) - error(io_code==EOF ? 7 : 6,"bpx_finput"); - /* check entry */ - ok = (entry < size); - for ( j=0; jpe[j]); - if ( ok ) - { - px->pe[i] = entry; - i++; - } - else - error(E_BOUNDS,"bpx_finput"); - } - - return (px); -} - - -VEC *v_finput(fp,x) -FILE *fp; -VEC *x; -{ - VEC *ifin_vec(),*bfin_vec(); - - if ( isatty(fileno(fp)) ) - return ifin_vec(fp,x); - else - return bfin_vec(fp,x); -} - -/* ifin_vec -- interactive input of vector */ -VEC *ifin_vec(fp,vec) -FILE *fp; -VEC *vec; -{ - u_int i,dim,dynamic; /* dynamic set if memory allocated here */ - - /* get vector dimension */ - if ( vec != (VEC *)NULL && vec->dimdim; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"Vector: dim: "); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"ifin_vec"); - } while ( sscanf(line,"%u",&dim)<1 || dim>MAXDIM ); - vec = v_get(dim); - } - - /* input elements */ - for ( i=0; ive[i]); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"ifin_vec"); - if ( (*line == 'b' || *line == 'B') && i > 0 ) - { i--; dynamic = FALSE; goto redo; } - if ( (*line == 'f' || *line == 'F') && i < dim-1 ) - { i++; dynamic = FALSE; goto redo; } -#if REAL == DOUBLE - } while ( *line=='\0' || sscanf(line,"%lf",&vec->ve[i]) < 1 ); -#elif REAL == FLOAT - } while ( *line=='\0' || sscanf(line,"%f",&vec->ve[i]) < 1 ); -#endif - - return (vec); -} - -/* bfin_vec -- batch-file input of vector */ -VEC *bfin_vec(fp,vec) -FILE *fp; -VEC *vec; -{ - u_int i,dim; - int io_code; - - /* get dimension */ - skipjunk(fp); - if ((io_code=fscanf(fp," Vector: dim:%u",&dim)) < 1 || - dim>MAXDIM ) - error(io_code==EOF ? 7 : 6,"bfin_vec"); - - /* allocate memory if necessary */ - if ( vec==(VEC *)NULL ) - vec = v_resize(vec,dim); - - /* get entries */ - skipjunk(fp); - for ( i=0; ive[i])) < 1 ) -#elif REAL == FLOAT - if ((io_code=fscanf(fp,"%f",&vec->ve[i])) < 1 ) -#endif - error(io_code==EOF ? 7 : 6,"bfin_vec"); - - return (vec); -} - -/************************************************************************** - Output routines - **************************************************************************/ -static char *format = "%14.9g "; - -char *setformat(f_string) -char *f_string; -{ - char *old_f_string; - old_f_string = format; - if ( f_string != (char *)NULL && *f_string != '\0' ) - format = f_string; - - return old_f_string; -} - -void m_foutput(fp,a) -FILE *fp; -MAT *a; -{ - u_int i, j, tmp; - - if ( a == (MAT *)NULL ) - { fprintf(fp,"Matrix: NULL\n"); return; } - fprintf(fp,"Matrix: %d by %d\n",a->m,a->n); - if ( a->me == (Real **)NULL ) - { fprintf(fp,"NULL\n"); return; } - for ( i=0; im; i++ ) /* for each row... */ - { - fprintf(fp,"row %u: ",i); - for ( j=0, tmp=2; jn; j++, tmp++ ) - { /* for each col in row... */ - fprintf(fp,format,a->me[i][j]); - if ( ! (tmp % 5) ) putc('\n',fp); - } - if ( tmp % 5 != 1 ) putc('\n',fp); - } -} - -void px_foutput(fp,px) -FILE *fp; -PERM *px; -{ - u_int i; - - if ( px == (PERM *)NULL ) - { fprintf(fp,"Permutation: NULL\n"); return; } - fprintf(fp,"Permutation: size: %u\n",px->size); - if ( px->pe == (u_int *)NULL ) - { fprintf(fp,"NULL\n"); return; } - for ( i=0; isize; i++ ) - if ( ! (i % 8) && i != 0 ) - fprintf(fp,"\n %u->%u ",i,px->pe[i]); - else - fprintf(fp,"%u->%u ",i,px->pe[i]); - fprintf(fp,"\n"); -} - -void v_foutput(fp,x) -FILE *fp; -VEC *x; -{ - u_int i, tmp; - - if ( x == (VEC *)NULL ) - { fprintf(fp,"Vector: NULL\n"); return; } - fprintf(fp,"Vector: dim: %d\n",x->dim); - if ( x->ve == (Real *)NULL ) - { fprintf(fp,"NULL\n"); return; } - for ( i=0, tmp=0; idim; i++, tmp++ ) - { - fprintf(fp,format,x->ve[i]); - if ( tmp % 5 == 4 ) putc('\n',fp); - } - if ( tmp % 5 != 0 ) putc('\n',fp); -} - - -void m_dump(fp,a) -FILE *fp; -MAT *a; -{ - u_int i, j, tmp; - - if ( a == (MAT *)NULL ) - { fprintf(fp,"Matrix: NULL\n"); return; } - fprintf(fp,"Matrix: %d by %d @ 0x%p\n",a->m,a->n,a); - fprintf(fp,"\tmax_m = %d, max_n = %d, max_size = %d\n", - a->max_m, a->max_n, a->max_size); - if ( a->me == (Real **)NULL ) - { fprintf(fp,"NULL\n"); return; } - fprintf(fp,"a->me @ 0x%p\n",(a->me)); - fprintf(fp,"a->base @ 0x%p\n",(a->base)); - for ( i=0; im; i++ ) /* for each row... */ - { - fprintf(fp,"row %u: @ 0x%p ",i,(a->me[i])); - for ( j=0, tmp=2; jn; j++, tmp++ ) - { /* for each col in row... */ - fprintf(fp,format,a->me[i][j]); - if ( ! (tmp % 5) ) putc('\n',fp); - } - if ( tmp % 5 != 1 ) putc('\n',fp); - } -} - -void px_dump(fp,px) -FILE *fp; -PERM *px; -{ - u_int i; - - if ( ! px ) - { fprintf(fp,"Permutation: NULL\n"); return; } - fprintf(fp,"Permutation: size: %u @ 0x%p\n",px->size,(px)); - if ( ! px->pe ) - { fprintf(fp,"NULL\n"); return; } - fprintf(fp,"px->pe @ 0x%p\n",(px->pe)); - for ( i=0; isize; i++ ) - fprintf(fp,"%u->%u ",i,px->pe[i]); - fprintf(fp,"\n"); -} - - -void v_dump(fp,x) -FILE *fp; -VEC *x; -{ - u_int i, tmp; - - if ( ! x ) - { fprintf(fp,"Vector: NULL\n"); return; } - fprintf(fp,"Vector: dim: %d @ 0x%p\n",x->dim,(x)); - if ( ! x->ve ) - { fprintf(fp,"NULL\n"); return; } - fprintf(fp,"x->ve @ 0x%p\n",(x->ve)); - for ( i=0, tmp=0; idim; i++, tmp++ ) - { - fprintf(fp,format,x->ve[i]); - if ( tmp % 5 == 4 ) putc('\n',fp); - } - if ( tmp % 5 != 0 ) putc('\n',fp); -} - diff --git a/src/mesch/meminfo.c b/src/mesch/meminfo.c deleted file mode 100755 index fc7b6beb85..0000000000 --- a/src/mesch/meminfo.c +++ /dev/null @@ -1,392 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* meminfo.c revised 22/11/93 */ - -/* - contains basic functions, types and arrays - to keep track of memory allocation/deallocation -*/ - -#include -#include "matrix.h" -#include "meminfo.h" -#ifdef COMPLEX -#include "zmatrix.h" -#endif -#ifdef SPARSE -#include "sparse.h" -#include "iter.h" -#endif - -static char rcsid[] = "meminfo.c,v 1.1 1997/12/04 17:55:37 hines Exp"; - -/* this array is defined further in this file */ -extern MEM_CONNECT mem_connect[MEM_CONNECT_MAX_LISTS]; - - -/* names of types */ -static char *mem_type_names[] = { - "MAT", - "BAND", - "PERM", - "VEC", - "IVEC" -#ifdef SPARSE - ,"ITER", - "SPROW", - "SPMAT" -#endif -#ifdef COMPLEX - ,"ZVEC", - "ZMAT" -#endif - }; - - -#define MEM_NUM_STD_TYPES (sizeof(mem_type_names)/sizeof(mem_type_names[0])) - - -/* local array for keeping track of memory */ -static MEM_ARRAY mem_info_sum[MEM_NUM_STD_TYPES]; - - -/* for freeing various types */ -static int (*mem_free_funcs[MEM_NUM_STD_TYPES])() = { - m_free, - bd_free, - px_free, - v_free, - iv_free -#ifdef SPARSE - ,iter_free, - sprow_free, - sp_free -#endif -#ifdef COMPLEX - ,zv_free, - zm_free -#endif - }; - - - -/* it is a global variable for passing - pointers to local arrays defined here */ -MEM_CONNECT mem_connect[MEM_CONNECT_MAX_LISTS] = { - { mem_type_names, mem_free_funcs, MEM_NUM_STD_TYPES, - mem_info_sum } -}; - - -/* attach a new list of types */ - -int mem_attach_list(list, ntypes, type_names, free_funcs, info_sum) -int list,ntypes; /* number of a list and number of types there */ -char *type_names[]; /* list of names of types */ -int (*free_funcs[])(); /* list of releasing functions */ -MEM_ARRAY info_sum[]; /* local table */ -{ - if (list < 0 || list >= MEM_CONNECT_MAX_LISTS) - return -1; - - if (type_names == NULL || free_funcs == NULL - || info_sum == NULL || ntypes < 0) - return -1; - - /* if a list exists do not overwrite */ - if ( mem_connect[list].ntypes != 0 ) - error(E_OVERWRITE,"mem_attach_list"); - - mem_connect[list].ntypes = ntypes; - mem_connect[list].type_names = type_names; - mem_connect[list].free_funcs = free_funcs; - mem_connect[list].info_sum = info_sum; - return 0; -} - - -/* release a list of types */ -int mem_free_vars(list) -int list; -{ - if (list < 0 || list >= MEM_CONNECT_MAX_LISTS) - return -1; - - mem_connect[list].ntypes = 0; - mem_connect[list].type_names = NULL; - mem_connect[list].free_funcs = NULL; - mem_connect[list].info_sum = NULL; - - return 0; -} - - - -/* check if list is attached */ - -int mem_is_list_attached(list) -int list; -{ - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return FALSE; - - if ( mem_connect[list].type_names != NULL && - mem_connect[list].free_funcs != NULL && - mem_connect[list].info_sum != NULL) - return TRUE; - else return FALSE; -} - -/* to print out the contents of mem_connect[list] */ - -void mem_dump_list(fp,list) -FILE *fp; -int list; -{ - int i; - MEM_CONNECT *mlist; - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return; - - mlist = &mem_connect[list]; - fprintf(fp," %15s[%d]:\n","CONTENTS OF mem_connect",list); - fprintf(fp," %-7s %-12s %-9s %s\n", - "name of", - "alloc.", "# alloc.", - "address" - ); - fprintf(fp," %-7s %-12s %-9s %s\n", - " type", - "bytes", "variables", - "of *_free()" - ); - - for (i=0; i < mlist->ntypes; i++) - fprintf(fp," %-7s %-12ld %-9d %p\n", - mlist->type_names[i], mlist->info_sum[i].bytes, - mlist->info_sum[i].numvar, mlist->free_funcs[i] - ); - - fprintf(fp,"\n"); -} - - - -/*=============================================================*/ - - -/* local variables */ - -static int mem_switched_on = MEM_SWITCH_ON_DEF; /* on/off */ - - -/* switch on/off memory info */ - -int mem_info_on(sw) -int sw; -{ - int old = mem_switched_on; - - mem_switched_on = sw; - return old; -} - -#ifdef ANSI_C -int mem_info_is_on(void) -#else -int mem_info_is_on() -#endif -{ - return mem_switched_on; -} - - -/* information about allocated memory */ - -/* return the number of allocated bytes for type 'type' */ - -long mem_info_bytes(type,list) -int type,list; -{ - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return 0l; - if ( !mem_switched_on || type < 0 - || type >= mem_connect[list].ntypes - || mem_connect[list].free_funcs[type] == NULL ) - return 0l; - - return mem_connect[list].info_sum[type].bytes; -} - -/* return the number of allocated variables for type 'type' */ -int mem_info_numvar(type,list) -int type,list; -{ - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return 0l; - if ( !mem_switched_on || type < 0 - || type >= mem_connect[list].ntypes - || mem_connect[list].free_funcs[type] == NULL ) - return 0l; - - return mem_connect[list].info_sum[type].numvar; -} - - - -/* print out memory info to the file fp */ -void mem_info_file(fp,list) -FILE *fp; -int list; -{ - unsigned int type; - long t = 0l, d; - int n = 0, nt = 0; - MEM_CONNECT *mlist; - - if (!mem_switched_on) return; - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return; - - if (list == 0) - fprintf(fp," MEMORY INFORMATION (standard types):\n"); - else - fprintf(fp," MEMORY INFORMATION (list no. %d):\n",list); - - mlist = &mem_connect[list]; - - for (type=0; type < mlist->ntypes; type++) { - if (mlist->type_names[type] == NULL ) continue; - d = mlist->info_sum[type].bytes; - t += d; - n = mlist->info_sum[type].numvar; - nt += n; - fprintf(fp," type %-7s %10ld alloc. byte%c %6d alloc. variable%c\n", - mlist->type_names[type], d, (d!=1 ? 's' : ' '), - n, (n!=1 ? 's' : ' ')); - } - - fprintf(fp," %-12s %10ld alloc. byte%c %6d alloc. variable%c\n\n", - "total:",t, (t!=1 ? 's' : ' '), - nt, (nt!=1 ? 's' : ' ')); -} - - -/* function for memory information */ - - -/* mem_bytes_list - - Arguments: - type - the number of type; - old_size - old size of allocated memory (in bytes); - new_size - new size of allocated memory (in bytes); - list - list of types - */ - - -void mem_bytes_list(type,old_size,new_size,list) -int type,list; -int old_size,new_size; -{ - MEM_CONNECT *mlist; - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return; - - mlist = &mem_connect[list]; - if ( type < 0 || type >= mlist->ntypes - || mlist->free_funcs[type] == NULL ) - return; - - if ( old_size < 0 || new_size < 0 ) - error(E_NEG,"mem_bytes_list"); - - mlist->info_sum[type].bytes += new_size - old_size; - - /* check if the number of bytes is non-negative */ - if ( old_size > 0 ) { - - if (mlist->info_sum[type].bytes < 0) - { - fprintf(stderr, - "\n WARNING !! memory info: allocated memory is less than 0\n"); - fprintf(stderr,"\t TYPE %s \n\n", mlist->type_names[type]); - - if ( !isatty(fileno(stdout)) ) { - fprintf(stdout, - "\n WARNING !! memory info: allocated memory is less than 0\n"); - fprintf(stdout,"\t TYPE %s \n\n", mlist->type_names[type]); - } - } - } -} - - -/* mem_numvar_list - - Arguments: - type - the number of type; - num - # of variables allocated (> 0) or deallocated ( < 0) - list - list of types - */ - - -void mem_numvar_list(type,num,list) -int type,list,num; -{ - MEM_CONNECT *mlist; - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return; - - mlist = &mem_connect[list]; - if ( type < 0 || type >= mlist->ntypes - || mlist->free_funcs[type] == NULL ) - return; - - mlist->info_sum[type].numvar += num; - - /* check if the number of variables is non-negative */ - if ( num < 0 ) { - - if (mlist->info_sum[type].numvar < 0) - { - fprintf(stderr, - "\n WARNING !! memory info: allocated # of variables is less than 0\n"); - fprintf(stderr,"\t TYPE %s \n\n", mlist->type_names[type]); - if ( !isatty(fileno(stdout)) ) { - fprintf(stdout, - "\n WARNING !! memory info: allocated # of variables is less than 0\n"); - fprintf(stdout,"\t TYPE %s \n\n", mlist->type_names[type]); - } - } - } -} - diff --git a/src/mesch/meminfo.h b/src/mesch/meminfo.h deleted file mode 100755 index e28ddb01e6..0000000000 --- a/src/mesch/meminfo.h +++ /dev/null @@ -1,155 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* meminfo.h 26/08/93 */ -/* changed 11/12/93 */ - - -#ifndef MEM_INFOH -#define MEM_INFOH - - - -/* for hash table in mem_stat.c */ -/* Note: the hash size should be a prime, or at very least odd */ -#define MEM_HASHSIZE 509 -#define MEM_HASHSIZE_FILE "meminfo.h" - - -/* default: memory information is off */ -/* set it to 1 if you want it all the time */ -#define MEM_SWITCH_ON_DEF 0 - - -/* available standard types */ -#define TYPE_NULL (-1) -#define TYPE_MAT 0 -#define TYPE_BAND 1 -#define TYPE_PERM 2 -#define TYPE_VEC 3 -#define TYPE_IVEC 4 - -#ifdef SPARSE -#define TYPE_ITER 5 -#define TYPE_SPROW 6 -#define TYPE_SPMAT 7 -#endif - -#ifdef COMPLEX -#ifdef SPARSE -#define TYPE_ZVEC 8 -#define TYPE_ZMAT 9 -#else -#define TYPE_ZVEC 5 -#define TYPE_ZMAT 6 -#endif -#endif - -/* structure for memory information */ -typedef struct { - long bytes; /* # of allocated bytes for each type (summary) */ - int numvar; /* # of allocated variables for each type */ -} MEM_ARRAY; - - - -#ifdef ANSI_C - -int mem_info_is_on(void); -int mem_info_on(int sw); - -long mem_info_bytes(int type,int list); -int mem_info_numvar(int type,int list); -void mem_info_file(FILE * fp,int list); - -void mem_bytes_list(int type,int old_size,int new_size, - int list); -void mem_numvar_list(int type, int num, int list); - -int mem_stat_reg_list(void **var,int type,int list); -int mem_stat_mark(int mark); -int mem_stat_free_list(int mark,int list); -int mem_stat_show_mark(void); -void mem_stat_dump(FILE *fp,int list); -int mem_attach_list(int list,int ntypes,char *type_names[], - int (*free_funcs[])(), MEM_ARRAY info_sum[]); -int mem_free_vars(int list); -int mem_is_list_attached(int list); -void mem_dump_list(FILE *fp,int list); -int mem_stat_reg_vars(int list,int type,...); - -#else -int mem_info_is_on(); -int mem_info_on(); - -long mem_info_bytes(); -int mem_info_numvar(); -void mem_info_file(); - -void mem_bytes_list(); -void mem_numvar_list(); - -int mem_stat_reg_list(); -int mem_stat_mark(); -int mem_stat_free_list(); -int mem_stat_show_mark(); -void mem_stat_dump(); -int mem_attach_list(); -int mem_free_vars(); -int mem_is_list_attached(); -void mem_dump_list(); -int mem_stat_reg_vars(); - -#endif - -/* macros */ - -#define mem_info() mem_info_file(stdout,0) - -#define mem_stat_reg(var,type) mem_stat_reg_list((void **)var,type,0) -#define MEM_STAT_REG(var,type) mem_stat_reg_list((void **)&(var),type,0) -#define mem_stat_free(mark) mem_stat_free_list(mark,0) - -#define mem_bytes(type,old_size,new_size) \ - mem_bytes_list(type,old_size,new_size,0) - -#define mem_numvar(type,num) mem_numvar_list(type,num,0) - - -/* internal type */ - -typedef struct { - char **type_names; /* array of names of types (strings) */ - int (**free_funcs)(); /* array of functions for releasing types */ - unsigned ntypes; /* max number of types */ - MEM_ARRAY *info_sum; /* local array for keeping track of memory */ -} MEM_CONNECT; - -/* max number of lists of types */ -#define MEM_CONNECT_MAX_LISTS 5 - - -#endif diff --git a/src/mesch/memory.c b/src/mesch/memory.c deleted file mode 100755 index 7199a4d892..0000000000 --- a/src/mesch/memory.c +++ /dev/null @@ -1,1004 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* memory.c 1.3 11/25/87 */ - -#include "matrix.h" - - -static char rcsid[] = "memory.c,v 1.1 1997/12/04 17:55:38 hines Exp"; - -/* m_get -- gets an mxn matrix (in MAT form) by dynamic memory allocation */ -MAT *m_get(m,n) -int m,n; -{ - MAT *matrix; - int i; - - if (m < 0 || n < 0) - error(E_NEG,"m_get"); - - if ((matrix=NEW(MAT)) == (MAT *)NULL ) - error(E_MEM,"m_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,sizeof(MAT)); - mem_numvar(TYPE_MAT,1); - } - - matrix->m = m; matrix->n = matrix->max_n = n; - matrix->max_m = m; matrix->max_size = m*n; -#ifndef SEGMENTED - if ((matrix->base = NEW_A(m*n,Real)) == (Real *)NULL ) - { - free(matrix); - error(E_MEM,"m_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,m*n*sizeof(Real)); - } -#else - matrix->base = (Real *)NULL; -#endif - if ((matrix->me = (Real **)calloc(m,sizeof(Real *))) == - (Real **)NULL ) - { free(matrix->base); free(matrix); - error(E_MEM,"m_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,m*sizeof(Real *)); - } - -#ifndef SEGMENTED - /* set up pointers */ - for ( i=0; ime[i] = &(matrix->base[i*n]); -#else - for ( i = 0; i < m; i++ ) - if ( (matrix->me[i]=NEW_A(n,Real)) == (Real *)NULL ) - error(E_MEM,"m_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,n*sizeof(Real)); - } -#endif - - return (matrix); -} - - -/* px_get -- gets a PERM of given 'size' by dynamic memory allocation - -- Note: initialized to the identity permutation */ -PERM *px_get(size) -int size; -{ - PERM *permute; - int i; - - if (size < 0) - error(E_NEG,"px_get"); - - if ((permute=NEW(PERM)) == (PERM *)NULL ) - error(E_MEM,"px_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_PERM,0,sizeof(PERM)); - mem_numvar(TYPE_PERM,1); - } - - permute->size = permute->max_size = size; - if ((permute->pe = NEW_A(size,u_int)) == (u_int *)NULL ) - error(E_MEM,"px_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_PERM,0,size*sizeof(u_int)); - } - - for ( i=0; ipe[i] = i; - - return (permute); -} - -/* v_get -- gets a VEC of dimension 'dim' - -- Note: initialized to zero */ -VEC *v_get(size) -int size; -{ - VEC *vector; - - if (size < 0) - error(E_NEG,"v_get"); - - if ((vector=NEW(VEC)) == (VEC *)NULL ) - error(E_MEM,"v_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,0,sizeof(VEC)); - mem_numvar(TYPE_VEC,1); - } - - vector->dim = vector->max_dim = size; - if ((vector->ve=NEW_A(size,Real)) == (Real *)NULL ) - { - free(vector); - error(E_MEM,"v_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,0,size*sizeof(Real)); - } - - return (vector); -} - -/* m_free -- returns MAT & asoociated memory back to memory heap */ -int m_free(mat) -MAT *mat; -{ -#ifdef SEGMENTED - int i; -#endif - - if ( mat==(MAT *)NULL || (int)(mat->m) < 0 || - (int)(mat->n) < 0 ) - /* don't trust it */ - return (-1); - -#ifndef SEGMENTED - if ( mat->base != (Real *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,mat->max_m*mat->max_n*sizeof(Real),0); - } - free((char *)(mat->base)); - } -#else - for ( i = 0; i < mat->max_m; i++ ) - if ( mat->me[i] != (Real *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,mat->max_n*sizeof(Real),0); - } - free((char *)(mat->me[i])); - } -#endif - if ( mat->me != (Real **)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,mat->max_m*sizeof(Real *),0); - } - free((char *)(mat->me)); - } - - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,sizeof(MAT),0); - mem_numvar(TYPE_MAT,-1); - } - free((char *)mat); - - return (0); -} - - - -/* px_free -- returns PERM & asoociated memory back to memory heap */ -int px_free(px) -PERM *px; -{ - if ( px==(PERM *)NULL || (int)(px->size) < 0 ) - /* don't trust it */ - return (-1); - - if ( px->pe == (u_int *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_PERM,sizeof(PERM),0); - mem_numvar(TYPE_PERM,-1); - } - free((char *)px); - } - else - { - if (mem_info_is_on()) { - mem_bytes(TYPE_PERM,sizeof(PERM)+px->max_size*sizeof(u_int),0); - mem_numvar(TYPE_PERM,-1); - } - free((char *)px->pe); - free((char *)px); - } - - return (0); -} - - - -/* v_free -- returns VEC & asoociated memory back to memory heap */ -int v_free(vec) -VEC *vec; -{ - if ( vec==(VEC *)NULL || (int)(vec->dim) < 0 ) - /* don't trust it */ - return (-1); - - if ( vec->ve == (Real *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,sizeof(VEC),0); - mem_numvar(TYPE_VEC,-1); - } - free((char *)vec); - } - else - { - if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,sizeof(VEC)+vec->max_dim*sizeof(Real),0); - mem_numvar(TYPE_VEC,-1); - } - free((char *)vec->ve); - free((char *)vec); - } - - return (0); -} - - - -/* m_resize -- returns the matrix A of size new_m x new_n; A is zeroed - -- if A == NULL on entry then the effect is equivalent to m_get() */ -MAT *m_resize(A,new_m,new_n) -MAT *A; -int new_m, new_n; -{ - int i; - int new_max_m, new_max_n, new_size, old_m, old_n; - - if (new_m < 0 || new_n < 0) - error(E_NEG,"m_resize"); - - if ( ! A ) - return m_get(new_m,new_n); - - /* nothing was changed */ - if (new_m == A->m && new_n == A->n) - return A; - - old_m = A->m; old_n = A->n; - if ( new_m > A->max_m ) - { /* re-allocate A->me */ - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,A->max_m*sizeof(Real *), - new_m*sizeof(Real *)); - } - - A->me = RENEW(A->me,new_m,Real *); - if ( ! A->me ) - error(E_MEM,"m_resize"); - } - new_max_m = max(new_m,A->max_m); - new_max_n = max(new_n,A->max_n); - -#ifndef SEGMENTED - new_size = new_max_m*new_max_n; - if ( new_size > A->max_size ) - { /* re-allocate A->base */ - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,A->max_m*A->max_n*sizeof(Real), - new_size*sizeof(Real)); - } - - A->base = RENEW(A->base,new_size,Real); - if ( ! A->base ) - error(E_MEM,"m_resize"); - A->max_size = new_size; - } - - /* now set up A->me[i] */ - for ( i = 0; i < new_m; i++ ) - A->me[i] = &(A->base[i*new_n]); - - /* now shift data in matrix */ - if ( old_n > new_n ) - { - for ( i = 1; i < min(old_m,new_m); i++ ) - MEM_COPY((char *)&(A->base[i*old_n]), - (char *)&(A->base[i*new_n]), - sizeof(Real)*new_n); - } - else if ( old_n < new_n ) - { - for ( i = (int)(min(old_m,new_m))-1; i > 0; i-- ) - { /* copy & then zero extra space */ - MEM_COPY((char *)&(A->base[i*old_n]), - (char *)&(A->base[i*new_n]), - sizeof(Real)*old_n); - __zero__(&(A->base[i*new_n+old_n]),(new_n-old_n)); - } - __zero__(&(A->base[old_n]),(new_n-old_n)); - A->max_n = new_n; - } - /* zero out the new rows.. */ - for ( i = old_m; i < new_m; i++ ) - __zero__(&(A->base[i*new_n]),new_n); -#else - if ( A->max_n < new_n ) - { - Real *tmp; - - for ( i = 0; i < A->max_m; i++ ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,A->max_n*sizeof(Real), - new_max_n*sizeof(Real)); - } - - if ( (tmp = RENEW(A->me[i],new_max_n,Real)) == NULL ) - error(E_MEM,"m_resize"); - else { - A->me[i] = tmp; - } - } - for ( i = A->max_m; i < new_max_m; i++ ) - { - if ( (tmp = NEW_A(new_max_n,Real)) == NULL ) - error(E_MEM,"m_resize"); - else { - A->me[i] = tmp; - - if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,new_max_n*sizeof(Real)); - } - } - } - } - else if ( A->max_m < new_m ) - { - for ( i = A->max_m; i < new_m; i++ ) - if ( (A->me[i] = NEW_A(new_max_n,Real)) == NULL ) - error(E_MEM,"m_resize"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,new_max_n*sizeof(Real)); - } - - } - - if ( old_n < new_n ) - { - for ( i = 0; i < old_m; i++ ) - __zero__(&(A->me[i][old_n]),new_n-old_n); - } - - /* zero out the new rows.. */ - for ( i = old_m; i < new_m; i++ ) - __zero__(A->me[i],new_n); -#endif - - A->max_m = new_max_m; - A->max_n = new_max_n; - A->max_size = A->max_m*A->max_n; - A->m = new_m; A->n = new_n; - - return A; -} - -/* px_resize -- returns the permutation px with size new_size - -- px is set to the identity permutation */ -PERM *px_resize(px,new_size) -PERM *px; -int new_size; -{ - int i; - - if (new_size < 0) - error(E_NEG,"px_resize"); - - if ( ! px ) - return px_get(new_size); - - /* nothing is changed */ - if (new_size == px->size) - return px; - - if ( new_size > px->max_size ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_PERM,px->max_size*sizeof(u_int), - new_size*sizeof(u_int)); - } - px->pe = RENEW(px->pe,new_size,u_int); - if ( ! px->pe ) - error(E_MEM,"px_resize"); - px->max_size = new_size; - } - if ( px->size <= new_size ) - /* extend permutation */ - for ( i = px->size; i < new_size; i++ ) - px->pe[i] = i; - else - for ( i = 0; i < new_size; i++ ) - px->pe[i] = i; - - px->size = new_size; - - return px; -} - -/* v_resize -- returns the vector x with dim new_dim - -- x is set to the zero vector */ -VEC *v_resize(x,new_dim) -VEC *x; -int new_dim; -{ - - if (new_dim < 0) - error(E_NEG,"v_resize"); - - if ( ! x ) - return v_get(new_dim); - - /* nothing is changed */ - if (new_dim == x->dim) - return x; - - if ( x->max_dim == 0 ) /* assume that it's from sub_vec */ - return v_get(new_dim); - - if ( new_dim > x->max_dim ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,x->max_dim*sizeof(Real), - new_dim*sizeof(Real)); - } - - x->ve = RENEW(x->ve,new_dim,Real); - if ( ! x->ve ) - error(E_MEM,"v_resize"); - x->max_dim = new_dim; - } - - if ( new_dim > x->dim ) - __zero__(&(x->ve[x->dim]),new_dim - x->dim); - x->dim = new_dim; - - return x; -} - - - - -/* Varying number of arguments */ -/* other functions of this type are in sparse.c and zmemory.c */ - - - -#ifdef ANSI_C - - -/* To allocate memory to many arguments. - The function should be called: - v_get_vars(dim,&x,&y,&z,...,NULL); - where - int dim; - VEC *x, *y, *z,...; - The last argument should be NULL ! - dim is the length of vectors x,y,z,... - returned value is equal to the number of allocated variables - Other gec_... functions are similar. -*/ - -int v_get_vars(int dim,...) -{ - va_list ap; - int i=0; - VEC **par; - - va_start(ap, dim); - while ((par = va_arg(ap,VEC **))) { /* NULL ends the list*/ - *par = v_get(dim); - i++; - } - - va_end(ap); - return i; -} - - -int iv_get_vars(int dim,...) -{ - va_list ap; - int i=0; - IVEC **par; - - va_start(ap, dim); - while ((par = va_arg(ap,IVEC **))) { /* NULL ends the list*/ - *par = iv_get(dim); - i++; - } - - va_end(ap); - return i; -} - -int m_get_vars(int m,int n,...) -{ - va_list ap; - int i=0; - MAT **par; - - va_start(ap, n); - while ((par = va_arg(ap,MAT **))) { /* NULL ends the list*/ - *par = m_get(m,n); - i++; - } - - va_end(ap); - return i; -} - -int px_get_vars(int dim,...) -{ - va_list ap; - int i=0; - PERM **par; - - va_start(ap, dim); - while ((par = va_arg(ap,PERM **))) { /* NULL ends the list*/ - *par = px_get(dim); - i++; - } - - va_end(ap); - return i; -} - - - -/* To resize memory for many arguments. - The function should be called: - v_resize_vars(new_dim,&x,&y,&z,...,NULL); - where - int new_dim; - VEC *x, *y, *z,...; - The last argument should be NULL ! - rdim is the resized length of vectors x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. - Other *_resize_list() functions are similar. -*/ - -int v_resize_vars(int new_dim,...) -{ - va_list ap; - int i=0; - VEC **par; - - va_start(ap, new_dim); - while ((par = va_arg(ap,VEC **))) { /* NULL ends the list*/ - *par = v_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - - - -int iv_resize_vars(int new_dim,...) -{ - va_list ap; - int i=0; - IVEC **par; - - va_start(ap, new_dim); - while ((par = va_arg(ap,IVEC **))) { /* NULL ends the list*/ - *par = iv_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - -int m_resize_vars(int m,int n,...) -{ - va_list ap; - int i=0; - MAT **par; - - va_start(ap, n); - while ((par = va_arg(ap,MAT **))) { /* NULL ends the list*/ - *par = m_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - - -int px_resize_vars(int new_dim,...) -{ - va_list ap; - int i=0; - PERM **par; - - va_start(ap, new_dim); - while ((par = va_arg(ap,PERM **))) { /* NULL ends the list*/ - *par = px_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - -/* To deallocate memory for many arguments. - The function should be called: - v_free_vars(&x,&y,&z,...,NULL); - where - VEC *x, *y, *z,...; - The last argument should be NULL ! - There must be at least one not NULL argument. - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. - Other *_free_list() functions are similar. -*/ - - -int v_free_vars(VEC **pv,...) -{ - va_list ap; - int i=1; - VEC **par; - - v_free(*pv); - *pv = VNULL; - va_start(ap, pv); - while ((par = va_arg(ap,VEC **))) { /* NULL ends the list*/ - v_free(*par); - *par = VNULL; - i++; - } - - va_end(ap); - return i; -} - - -int iv_free_vars(IVEC **ipv,...) -{ - va_list ap; - int i=1; - IVEC **par; - - iv_free(*ipv); - *ipv = IVNULL; - va_start(ap, ipv); - while ((par = va_arg(ap,IVEC **))) { /* NULL ends the list*/ - iv_free(*par); - *par = IVNULL; - i++; - } - - va_end(ap); - return i; -} - - -int px_free_vars(PERM **vpx,...) -{ - va_list ap; - int i=1; - PERM **par; - - px_free(*vpx); - *vpx = PNULL; - va_start(ap, vpx); - while ((par = va_arg(ap,PERM **))) { /* NULL ends the list*/ - px_free(*par); - *par = PNULL; - i++; - } - - va_end(ap); - return i; -} - -int m_free_vars(MAT **va,...) -{ - va_list ap; - int i=1; - MAT **par; - - m_free(*va); - *va = MNULL; - va_start(ap, va); - while ((par = va_arg(ap,MAT **))) { /* NULL ends the list*/ - m_free(*par); - *par = MNULL; - i++; - } - - va_end(ap); - return i; -} - - -#elif VARARGS -/* old varargs is used */ - - - -/* To allocate memory to many arguments. - The function should be called: - v_get_vars(dim,&x,&y,&z,...,VNULL); - where - int dim; - VEC *x, *y, *z,...; - The last argument should be VNULL ! - dim is the length of vectors x,y,z,... -*/ - -int v_get_vars(va_alist) va_dcl -{ - va_list ap; - int dim,i=0; - VEC **par; - - va_start(ap); - dim = va_arg(ap,int); - while (par = va_arg(ap,VEC **)) { /* NULL ends the list*/ - *par = v_get(dim); - i++; - } - - va_end(ap); - return i; -} - - -int iv_get_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, dim; - IVEC **par; - - va_start(ap); - dim = va_arg(ap,int); - while (par = va_arg(ap,IVEC **)) { /* NULL ends the list*/ - *par = iv_get(dim); - i++; - } - - va_end(ap); - return i; -} - -int m_get_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, n, m; - MAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - while (par = va_arg(ap,MAT **)) { /* NULL ends the list*/ - *par = m_get(m,n); - i++; - } - - va_end(ap); - return i; -} - - - -int px_get_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, dim; - PERM **par; - - va_start(ap); - dim = va_arg(ap,int); - while (par = va_arg(ap,PERM **)) { /* NULL ends the list*/ - *par = px_get(dim); - i++; - } - - va_end(ap); - return i; -} - - - -/* To resize memory for many arguments. - The function should be called: - v_resize_vars(new_dim,&x,&y,&z,...,NULL); - where - int new_dim; - VEC *x, *y, *z,...; - The last argument should be NULL ! - rdim is the resized length of vectors x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. - Other *_resize_list() functions are similar. -*/ - -int v_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, new_dim; - VEC **par; - - va_start(ap); - new_dim = va_arg(ap,int); - while (par = va_arg(ap,VEC **)) { /* NULL ends the list*/ - *par = v_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - - - -int iv_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, new_dim; - IVEC **par; - - va_start(ap); - new_dim = va_arg(ap,int); - while (par = va_arg(ap,IVEC **)) { /* NULL ends the list*/ - *par = iv_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - -int m_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, m, n; - MAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - while (par = va_arg(ap,MAT **)) { /* NULL ends the list*/ - *par = m_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - -int px_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, new_dim; - PERM **par; - - va_start(ap); - new_dim = va_arg(ap,int); - while (par = va_arg(ap,PERM **)) { /* NULL ends the list*/ - *par = px_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - - -/* To deallocate memory for many arguments. - The function should be called: - v_free_vars(&x,&y,&z,...,NULL); - where - VEC *x, *y, *z,...; - The last argument should be NULL ! - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. - Other *_free_list() functions are similar. -*/ - - -int v_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - VEC **par; - - va_start(ap); - while (par = va_arg(ap,VEC **)) { /* NULL ends the list*/ - v_free(*par); - *par = VNULL; - i++; - } - - va_end(ap); - return i; -} - - - -int iv_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - IVEC **par; - - va_start(ap); - while (par = va_arg(ap,IVEC **)) { /* NULL ends the list*/ - iv_free(*par); - *par = IVNULL; - i++; - } - - va_end(ap); - return i; -} - - -int px_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - PERM **par; - - va_start(ap); - while (par = va_arg(ap,PERM **)) { /* NULL ends the list*/ - px_free(*par); - *par = PNULL; - i++; - } - - va_end(ap); - return i; -} - -int m_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - MAT **par; - - va_start(ap); - while (par = va_arg(ap,MAT **)) { /* NULL ends the list*/ - m_free(*par); - *par = MNULL; - i++; - } - - va_end(ap); - return i; -} - - - -#endif /* VARARGS */ - - diff --git a/src/mesch/memstat.c b/src/mesch/memstat.c deleted file mode 100755 index c216e6e620..0000000000 --- a/src/mesch/memstat.c +++ /dev/null @@ -1,384 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* mem_stat.c 6/09/93 */ - -/* Deallocation of static arrays */ - - -#include -#include "matrix.h" -#include "meminfo.h" -#ifdef COMPLEX -#include "zmatrix.h" -#endif -#ifdef SPARSE -#include "sparse.h" -#include "iter.h" -#endif - -static char rcsid[] = "memstat.c,v 1.1 1997/12/04 17:55:39 hines Exp"; - -/* global variable */ - -extern MEM_CONNECT mem_connect[MEM_CONNECT_MAX_LISTS]; - - -/* local type */ - -typedef struct { - void **var; /* for &A, where A is a pointer */ - int type; /* type of A */ - int mark; /* what mark is chosen */ -} MEM_STAT_STRUCT; - - -/* local variables */ - -/* how many marks are used */ -static int mem_stat_mark_many = 0; - -/* current mark */ -static int mem_stat_mark_curr = 0; - - -static MEM_STAT_STRUCT mem_stat_var[MEM_HASHSIZE]; - -/* array of indices (+1) to mem_stat_var */ -static unsigned int mem_hash_idx[MEM_HASHSIZE]; - -/* points to the first unused element in mem_hash_idx */ -static unsigned int mem_hash_idx_end = 0; - - - -/* hashing function */ - -static unsigned int mem_hash(ptr) -void **ptr; -{ - unsigned long lp = (size_t)ptr; - - return (lp % MEM_HASHSIZE); -} - - -/* look for a place in mem_stat_var */ -static int mem_lookup(var) -void **var; -{ - int k, j; - - k = mem_hash(var); - - if (mem_stat_var[k].var == var) { - return -1; - } - else if (mem_stat_var[k].var == NULL) { - return k; - } - else { /* look for an empty place */ - j = k; - while (mem_stat_var[j].var != var && j < MEM_HASHSIZE - && mem_stat_var[j].var != NULL) - j++; - - if (mem_stat_var[j].var == NULL) return j; - else if (mem_stat_var[j].var == var) return -1; - else { /* if (j == MEM_HASHSIZE) */ - j = 0; - while (mem_stat_var[j].var != var && j < k - && mem_stat_var[j].var != NULL) - j++; - if (mem_stat_var[j].var == NULL) return j; - else if (mem_stat_var[j].var == var) return -1; - else { /* if (j == k) */ - fprintf(stderr, - "\n WARNING !!! static memory: mem_stat_var is too small\n"); - fprintf(stderr, - " Increase MEM_HASHSIZE in file: %s (currently = %d)\n\n", - MEM_HASHSIZE_FILE, MEM_HASHSIZE); - if ( !isatty(fileno(stdout)) ) { - fprintf(stdout, - "\n WARNING !!! static memory: mem_stat_var is too small\n"); - fprintf(stdout, - " Increase MEM_HASHSIZE in file: %s (currently = %d)\n\n", - MEM_HASHSIZE_FILE, MEM_HASHSIZE); - } - error(E_MEM,"mem_lookup"); - } - } - } - - return -1; -} - - -/* register static variables; - Input arguments: - var - variable to be registered, - type - type of this variable; - list - list of types - - returned value < 0 --> error, - returned value == 0 --> not registered, - returned value >= 0 --> registered with this mark; -*/ - -int mem_stat_reg_list(var,type,list) -void **var; -int type,list; -{ - int n; - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS ) - return -1; - - if (mem_stat_mark_curr == 0) return 0; /* not registered */ - if (var == NULL) return -1; /* error */ - - if ( type < 0 || type >= mem_connect[list].ntypes || - mem_connect[list].free_funcs[type] == NULL ) - { - warning(WARN_WRONG_TYPE,"mem_stat_reg_list"); - return -1; - } - - if ((n = mem_lookup(var)) >= 0) { - mem_stat_var[n].var = var; - mem_stat_var[n].mark = mem_stat_mark_curr; - mem_stat_var[n].type = type; - /* save n+1, not n */ - mem_hash_idx[mem_hash_idx_end++] = n+1; - } - - return mem_stat_mark_curr; -} - - -/* set a mark; - Input argument: - mark - positive number denoting a mark; - returned: - mark if mark > 0, - 0 if mark == 0, - -1 if mark is negative. -*/ - -int mem_stat_mark(mark) -int mark; -{ - if (mark < 0) { - mem_stat_mark_curr = 0; - return -1; /* error */ - } - else if (mark == 0) { - mem_stat_mark_curr = 0; - return 0; - } - - mem_stat_mark_curr = mark; - mem_stat_mark_many++; - - return mark; -} - - - -/* deallocate static variables; - Input argument: - mark - a positive number denoting the mark; - - Returned: - -1 if mark < 0 (error); - 0 if mark == 0; -*/ - -int mem_stat_free_list(mark,list) -int mark,list; -{ - u_int i,j; - int (*free_fn)(); - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS - || mem_connect[list].free_funcs == NULL ) - return -1; - - if (mark < 0) { - mem_stat_mark_curr = 0; - return -1; - } - else if (mark == 0) { - mem_stat_mark_curr = 0; - return 0; - } - - if (mem_stat_mark_many <= 0) { - warning(WARN_NO_MARK,"mem_stat_free"); - return -1; - } - - /* deallocate the marked variables */ - for (i=0; i < mem_hash_idx_end; i++) { - j = mem_hash_idx[i]; - if (j == 0) continue; - else { - j--; - if (mem_stat_var[j].mark == mark) { - free_fn = mem_connect[list].free_funcs[mem_stat_var[j].type]; - if ( free_fn != NULL ) - (*free_fn)(*mem_stat_var[j].var); - else - warning(WARN_WRONG_TYPE,"mem_stat_free"); - - *(mem_stat_var[j].var) = NULL; - mem_stat_var[j].var = NULL; - mem_stat_var[j].mark = 0; - mem_hash_idx[i] = 0; - } - } - } - - while (mem_hash_idx_end > 0 && mem_hash_idx[mem_hash_idx_end-1] == 0) - mem_hash_idx_end--; - - mem_stat_mark_curr = 0; - mem_stat_mark_many--; - return 0; -} - - -/* only for diagnostic purposes */ - -void mem_stat_dump(fp,list) -FILE *fp; -int list; -{ - u_int i,j,k=1; - - if ( list < 0 || list >= MEM_CONNECT_MAX_LISTS - || mem_connect[list].free_funcs == NULL ) - return; - - fprintf(fp," Array mem_stat_var (list no. %d):\n",list); - for (i=0; i < mem_hash_idx_end; i++) { - j = mem_hash_idx[i]; - if (j == 0) continue; - else { - j--; - fprintf(fp," %d. var = 0x%p, type = %s, mark = %d\n", - k,mem_stat_var[j].var, - mem_stat_var[j].type < mem_connect[list].ntypes && - mem_connect[list].free_funcs[mem_stat_var[j].type] != NULL ? - mem_connect[list].type_names[(int)mem_stat_var[j].type] : - "???", - mem_stat_var[j].mark); - k++; - } - } - - fprintf(fp,"\n"); -} - - -/* query function about the current mark */ -#ifdef ANSI_C -int mem_stat_show_mark(void) -#else -int mem_stat_show_mark() -#endif -{ - return mem_stat_mark_curr; -} - - -/* Varying number of arguments */ - - -#ifdef ANSI_C - -/* To allocate memory to many arguments. - The function should be called: - mem_stat_vars(list,type,&v1,&v2,&v3,...,VNULL); - where - int list,type; - void **v1, **v2, **v3,...; - The last argument should be VNULL ! - type is the type of variables v1,v2,v3,... - (of course they must be of the same type) -*/ - -int mem_stat_reg_vars(int list,int type,...) -{ - va_list ap; - int i=0; - void **par; - - va_start(ap, type); - while ((par = va_arg(ap,void **))) { /* NULL ends the list*/ - mem_stat_reg_list(par,type,list); - i++; - } - - va_end(ap); - return i; -} - -#elif VARARGS -/* old varargs is used */ - -/* To allocate memory to many arguments. - The function should be called: - mem_stat_vars(list,type,&v1,&v2,&v3,...,VNULL); - where - int list,type; - void **v1, **v2, **v3,...; - The last argument should be VNULL ! - type is the type of variables v1,v2,v3,... - (of course they must be of the same type) -*/ - -int mem_stat_reg_vars(va_alist) va_dcl -{ - va_list ap; - int type,list,i=0; - void **par; - - va_start(ap); - list = va_arg(ap,int); - type = va_arg(ap,int); - while ((par = va_arg(ap,void **))) { /* NULL ends the list*/ - mem_stat_reg_list(par,type,list); - i++; - } - - va_end(ap); - return i; -} - - -#endif diff --git a/src/mesch/mfunc.c b/src/mesch/mfunc.c deleted file mode 100755 index f170f45510..0000000000 --- a/src/mesch/mfunc.c +++ /dev/null @@ -1,399 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file contains routines for computing functions of matrices - especially polynomials and exponential functions - Copyright (C) Teresa Leyk and David Stewart, 1993 - */ - -#include -#include "matrix.h" -#include "matrix2.h" -#include - -static char rcsid[] = "mfunc.c,v 1.1 1997/12/04 17:55:41 hines Exp"; - - - -/* _m_pow -- computes integer powers of a square matrix A, A^p - -- uses tmp as temporary workspace */ -MAT *_m_pow(A, p, tmp, out) -MAT *A, *tmp, *out; -int p; -{ - int it_cnt, k, max_bit; - - /* - File containing routines for evaluating matrix functions - esp. the exponential function - */ - -#define Z(k) (((k) & 1) ? tmp : out) - - if ( ! A ) - error(E_NULL,"_m_pow"); - if ( A->m != A->n ) - error(E_SQUARE,"_m_pow"); - if ( p < 0 ) - error(E_NEG,"_m_pow"); - out = m_resize(out,A->m,A->n); - tmp = m_resize(tmp,A->m,A->n); - - if ( p == 0 ) - m_ident(out); - else if ( p > 0 ) - { - it_cnt = 1; - for ( max_bit = 0; ; max_bit++ ) - if ( (p >> (max_bit+1)) == 0 ) - break; - tmp = m_copy(A,tmp); - - for ( k = 0; k < max_bit; k++ ) - { - m_mlt(Z(it_cnt),Z(it_cnt),Z(it_cnt+1)); - it_cnt++; - if ( p & (1 << (max_bit-1)) ) - { - m_mlt(A,Z(it_cnt),Z(it_cnt+1)); - /* m_copy(Z(it_cnt),out); */ - it_cnt++; - } - p <<= 1; - } - if (it_cnt & 1) - out = m_copy(Z(it_cnt),out); - } - - return out; - -#undef Z -} - -/* m_pow -- computes integer powers of a square matrix A, A^p */ -MAT *m_pow(A, p, out) -MAT *A, *out; -int p; -{ - static MAT *wkspace, *tmp; - - if ( ! A ) - error(E_NULL,"m_pow"); - if ( A->m != A->n ) - error(E_SQUARE,"m_pow"); - - wkspace = m_resize(wkspace,A->m,A->n); - MEM_STAT_REG(wkspace,TYPE_MAT); - if ( p < 0 ) - { - tmp = m_resize(tmp,A->m,A->n); - MEM_STAT_REG(tmp,TYPE_MAT); - tracecatch(m_inverse(A,tmp),"m_pow"); - return _m_pow(tmp, -p, wkspace, out); - } - else - return _m_pow(A, p, wkspace, out); - -} - -/**************************************************/ - -/* _m_exp -- compute matrix exponential of A and save it in out - -- uses Pade approximation followed by repeated squaring - -- eps is the tolerance used for the Pade approximation - -- A is not changed - -- q_out - degree of the Pade approximation (q_out,q_out) - -- j_out - the power of 2 for scaling the matrix A - such that ||A/2^j_out|| <= 0.5 -*/ -MAT *_m_exp(A,eps,out,q_out,j_out) -MAT *A,*out; -double eps; -int *q_out, *j_out; -{ - static MAT *D = MNULL, *Apow = MNULL, *N = MNULL, *Y = MNULL; - static VEC *c1 = VNULL, *tmp = VNULL; - VEC y0, y1; /* additional structures */ - static PERM *pivot = PNULL; - int j, k, l, q, r, s, j2max, t; - double inf_norm, eqq, power2, c, sign; - - if ( ! A ) - error(E_SIZES,"_m_exp"); - if ( A->m != A->n ) - error(E_SIZES,"_m_exp"); - if ( A == out ) - error(E_INSITU,"_m_exp"); - if ( eps < 0.0 ) - error(E_RANGE,"_m_exp"); - else if (eps == 0.0) - eps = MACHEPS; - - N = m_resize(N,A->m,A->n); - D = m_resize(D,A->m,A->n); - Apow = m_resize(Apow,A->m,A->n); - out = m_resize(out,A->m,A->n); - - MEM_STAT_REG(N,TYPE_MAT); - MEM_STAT_REG(D,TYPE_MAT); - MEM_STAT_REG(Apow,TYPE_MAT); - - /* normalise A to have ||A||_inf <= 1 */ - inf_norm = m_norm_inf(A); - if (inf_norm <= 0.0) { - m_ident(out); - *q_out = -1; - *j_out = 0; - return out; - } - else { - j2max = floor(1+log(inf_norm)/log(2.0)); - j2max = max(0, j2max); - } - - power2 = 1.0; - for ( k = 1; k <= j2max; k++ ) - power2 *= 2; - power2 = 1.0/power2; - if ( j2max > 0 ) - sm_mlt(power2,A,A); - - /* compute order for polynomial approximation */ - eqq = 1.0/6.0; - for ( q = 1; eqq > eps; q++ ) - eqq /= 16.0*(2.0*q+1.0)*(2.0*q+3.0); - - /* construct vector of coefficients */ - c1 = v_resize(c1,q+1); - MEM_STAT_REG(c1,TYPE_VEC); - c1->ve[0] = 1.0; - for ( k = 1; k <= q; k++ ) - c1->ve[k] = c1->ve[k-1]*(q-k+1)/((2*q-k+1)*(double)k); - - tmp = v_resize(tmp,A->n); - MEM_STAT_REG(tmp,TYPE_VEC); - - s = (int)floor(sqrt((double)q/2.0)); - if ( s <= 0 ) s = 1; - _m_pow(A,s,out,Apow); - r = q/s; - - Y = m_resize(Y,s,A->n); - MEM_STAT_REG(Y,TYPE_MAT); - /* y0 and y1 are pointers to rows of Y, N and D */ - y0.dim = y0.max_dim = A->n; - y1.dim = y1.max_dim = A->n; - - m_zero(Y); - m_zero(N); - m_zero(D); - - for( j = 0; j < A->n; j++ ) - { - if (j > 0) - Y->me[0][j-1] = 0.0; - y0.ve = Y->me[0]; - y0.ve[j] = 1.0; - for ( k = 0; k < s-1; k++ ) - { - y1.ve = Y->me[k+1]; - mv_mlt(A,&y0,&y1); - y0.ve = y1.ve; - } - - y0.ve = N->me[j]; - y1.ve = D->me[j]; - t = s*r; - for ( l = 0; l <= q-t; l++ ) - { - c = c1->ve[t+l]; - sign = ((t+l) & 1) ? -1.0 : 1.0; - __mltadd__(y0.ve,Y->me[l],c, Y->n); - __mltadd__(y1.ve,Y->me[l],c*sign,Y->n); - } - - for (k=1; k <= r; k++) - { - v_copy(mv_mlt(Apow,&y0,tmp),&y0); - v_copy(mv_mlt(Apow,&y1,tmp),&y1); - t = s*(r-k); - for (l=0; l < s; l++) - { - c = c1->ve[t+l]; - sign = ((t+l) & 1) ? -1.0 : 1.0; - __mltadd__(y0.ve,Y->me[l],c, Y->n); - __mltadd__(y1.ve,Y->me[l],c*sign,Y->n); - } - } - } - - pivot = px_resize(pivot,A->m); - MEM_STAT_REG(pivot,TYPE_PERM); - - /* note that N and D are transposed, - therefore we use LUTsolve; - out is saved row-wise, and must be transposed - after this */ - - LUfactor(D,pivot); - for (k=0; k < A->n; k++) - { - y0.ve = N->me[k]; - y1.ve = out->me[k]; - LUTsolve(D,pivot,&y0,&y1); - } - m_transp(out,out); - - - /* Use recursive squaring to turn the normalised exponential to the - true exponential */ - -#define Z(k) ((k) & 1 ? Apow : out) - - for( k = 1; k <= j2max; k++) - m_mlt(Z(k-1),Z(k-1),Z(k)); - - if (Z(k) == out) - m_copy(Apow,out); - - /* output parameters */ - *j_out = j2max; - *q_out = q; - - /* restore the matrix A */ - sm_mlt(1.0/power2,A,A); - return out; - -#undef Z -} - - -/* simple interface for _m_exp */ -MAT *m_exp(A,eps,out) -MAT *A,*out; -double eps; -{ - int q_out, j_out; - - return _m_exp(A,eps,out,&q_out,&j_out); -} - - -/*--------------------------------*/ - -/* m_poly -- computes sum_i a[i].A^i, where i=0,1,...dim(a); - -- uses C. Van Loan's fast and memory efficient method */ -MAT *m_poly(A,a,out) -MAT *A,*out; -VEC *a; -{ - static MAT *Apow = MNULL, *Y = MNULL; - static VEC *tmp; - VEC y0, y1; /* additional vectors */ - int j, k, l, q, r, s, t; - - if ( ! A || ! a ) - error(E_NULL,"m_poly"); - if ( A->m != A->n ) - error(E_SIZES,"m_poly"); - if ( A == out ) - error(E_INSITU,"m_poly"); - - out = m_resize(out,A->m,A->n); - Apow = m_resize(Apow,A->m,A->n); - MEM_STAT_REG(Apow,TYPE_MAT); - tmp = v_resize(tmp,A->n); - MEM_STAT_REG(tmp,TYPE_VEC); - - q = a->dim - 1; - if ( q == 0 ) { - m_zero(out); - for (j=0; j < out->n; j++) - out->me[j][j] = a->ve[0]; - return out; - } - else if ( q == 1) { - sm_mlt(a->ve[1],A,out); - for (j=0; j < out->n; j++) - out->me[j][j] += a->ve[0]; - return out; - } - - s = (int)floor(sqrt((double)q/2.0)); - if ( s <= 0 ) s = 1; - _m_pow(A,s,out,Apow); - r = q/s; - - Y = m_resize(Y,s,A->n); - MEM_STAT_REG(Y,TYPE_MAT); - /* pointers to rows of Y */ - y0.dim = y0.max_dim = A->n; - y1.dim = y1.max_dim = A->n; - - m_zero(Y); - m_zero(out); - -#define Z(k) ((k) & 1 ? tmp : &y0) -#define ZZ(k) ((k) & 1 ? tmp->ve : y0.ve) - - for( j = 0; j < A->n; j++) - { - if( j > 0 ) - Y->me[0][j-1] = 0.0; - Y->me[0][j] = 1.0; - - y0.ve = Y->me[0]; - for (k = 0; k < s-1; k++) - { - y1.ve = Y->me[k+1]; - mv_mlt(A,&y0,&y1); - y0.ve = y1.ve; - } - - y0.ve = out->me[j]; - - t = s*r; - for ( l = 0; l <= q-t; l++ ) - __mltadd__(y0.ve,Y->me[l],a->ve[t+l],Y->n); - - for (k=1; k <= r; k++) - { - mv_mlt(Apow,Z(k-1),Z(k)); - t = s*(r-k); - for (l=0; l < s; l++) - __mltadd__(ZZ(k),Y->me[l],a->ve[t+l],Y->n); - } - if (Z(k) == &y0) v_copy(tmp,&y0); - } - - m_transp(out,out); - - return out; -} - - diff --git a/src/mesch/norm.c b/src/mesch/norm.c deleted file mode 100755 index 013794f974..0000000000 --- a/src/mesch/norm.c +++ /dev/null @@ -1,199 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - A collection of functions for computing norms: scaled and unscaled -*/ -static char rcsid[] = "norm.c,v 1.1 1997/12/04 17:55:43 hines Exp"; - -#include -#include "matrix.h" -#include - - -/* _v_norm1 -- computes (scaled) 1-norms of vectors */ -double _v_norm1(x,scale) -VEC *x, *scale; -{ - int i, dim; - Real s, sum; - - if ( x == (VEC *)NULL ) - error(E_NULL,"_v_norm1"); - dim = x->dim; - - sum = 0.0; - if ( scale == (VEC *)NULL ) - for ( i = 0; i < dim; i++ ) - sum += fabs(x->ve[i]); - else if ( scale->dim < dim ) - error(E_SIZES,"_v_norm1"); - else - for ( i = 0; i < dim; i++ ) - { s = scale->ve[i]; - sum += ( s== 0.0 ) ? fabs(x->ve[i]) : fabs(x->ve[i]/s); - } - - return sum; -} - -/* square -- returns x^2 */ -double square(x) -double x; -{ return x*x; } - -/* cube -- returns x^3 */ -double cube(x) -double x; -{ return x*x*x; } - -/* _v_norm2 -- computes (scaled) 2-norm (Euclidean norm) of vectors */ -double _v_norm2(x,scale) -VEC *x, *scale; -{ - int i, dim; - Real s, sum; - - if ( x == (VEC *)NULL ) - error(E_NULL,"_v_norm2"); - dim = x->dim; - - sum = 0.0; - if ( scale == (VEC *)NULL ) - for ( i = 0; i < dim; i++ ) - sum += square(x->ve[i]); - else if ( scale->dim < dim ) - error(E_SIZES,"_v_norm2"); - else - for ( i = 0; i < dim; i++ ) - { s = scale->ve[i]; - sum += ( s== 0.0 ) ? square(x->ve[i]) : - square(x->ve[i]/s); - } - - return sqrt(sum); -} - -#define max(a,b) ((a) > (b) ? (a) : (b)) - -/* _v_norm_inf -- computes (scaled) infinity-norm (supremum norm) of vectors */ -double _v_norm_inf(x,scale) -VEC *x, *scale; -{ - int i, dim; - Real s, maxval, tmp; - - if ( x == (VEC *)NULL ) - error(E_NULL,"_v_norm_inf"); - dim = x->dim; - - maxval = 0.0; - if ( scale == (VEC *)NULL ) - for ( i = 0; i < dim; i++ ) - { tmp = fabs(x->ve[i]); - maxval = max(maxval,tmp); - } - else if ( scale->dim < dim ) - error(E_SIZES,"_v_norm_inf"); - else - for ( i = 0; i < dim; i++ ) - { s = scale->ve[i]; - tmp = ( s== 0.0 ) ? fabs(x->ve[i]) : fabs(x->ve[i]/s); - maxval = max(maxval,tmp); - } - - return maxval; -} - -/* m_norm1 -- compute matrix 1-norm -- unscaled */ -double m_norm1(A) -MAT *A; -{ - int i, j, m, n; - Real maxval, sum; - - if ( A == (MAT *)NULL ) - error(E_NULL,"m_norm1"); - - m = A->m; n = A->n; - maxval = 0.0; - - for ( j = 0; j < n; j++ ) - { - sum = 0.0; - for ( i = 0; i < m; i ++ ) - sum += fabs(A->me[i][j]); - maxval = max(maxval,sum); - } - - return maxval; -} - -/* m_norm_inf -- compute matrix infinity-norm -- unscaled */ -double m_norm_inf(A) -MAT *A; -{ - int i, j, m, n; - Real maxval, sum; - - if ( A == (MAT *)NULL ) - error(E_NULL,"m_norm_inf"); - - m = A->m; n = A->n; - maxval = 0.0; - - for ( i = 0; i < m; i++ ) - { - sum = 0.0; - for ( j = 0; j < n; j ++ ) - sum += fabs(A->me[i][j]); - maxval = max(maxval,sum); - } - - return maxval; -} - -/* m_norm_frob -- compute matrix frobenius-norm -- unscaled */ -double m_norm_frob(A) -MAT *A; -{ - int i, j, m, n; - Real sum; - - if ( A == (MAT *)NULL ) - error(E_NULL,"m_norm_frob"); - - m = A->m; n = A->n; - sum = 0.0; - - for ( i = 0; i < m; i++ ) - for ( j = 0; j < n; j ++ ) - sum += square(A->me[i][j]); - - return sqrt(sum); -} - diff --git a/src/mesch/oldnames.h b/src/mesch/oldnames.h deleted file mode 100755 index d014d5332c..0000000000 --- a/src/mesch/oldnames.h +++ /dev/null @@ -1,150 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* macros for names used in versions 1.0 and 1.1 */ -/* 8/11/93 */ - - -#ifndef OLDNAMESH -#define OLDNAMESH - - -/* type IVEC */ - -#define get_ivec iv_get -#define freeivec IV_FREE -#define cp_ivec iv_copy -#define fout_ivec iv_foutput -#define out_ivec iv_output -#define fin_ivec iv_finput -#define in_ivec iv_input -#define dump_ivec iv_dump - - -/* type ZVEC */ - -#define get_zvec zv_get -#define freezvec ZV_FREE -#define cp_zvec zv_copy -#define fout_zvec zv_foutput -#define out_zvec zv_output -#define fin_zvec zv_finput -#define in_zvec zv_input -#define zero_zvec zv_zero -#define rand_zvec zv_rand -#define dump_zvec zv_dump - -/* type ZMAT */ - -#define get_zmat zm_get -#define freezmat ZM_FREE -#define cp_zmat zm_copy -#define fout_zmat zm_foutput -#define out_zmat zm_output -#define fin_zmat zm_finput -#define in_zmat zm_input -#define zero_zmat zm_zero -#define rand_zmat zm_rand -#define dump_zmat zm_dump - -/* types SPMAT */ - -#define sp_mat SPMAT -#define sp_get_mat sp_get -#define sp_free_mat sp_free -#define sp_cp_mat sp_copy -#define sp_cp_mat2 sp_copy2 -#define sp_fout_mat sp_foutput -#define sp_fout_mat2 sp_foutput2 -#define sp_out_mat sp_output -#define sp_out_mat2 sp_output2 -#define sp_fin_mat sp_finput -#define sp_in_mat sp_input -#define sp_zero_mat sp_zero -#define sp_dump_mat sp_dump - - -/* type SPROW */ - -#define sp_row SPROW -#define sp_get_idx sprow_idx -#define row_xpd sprow_xpd -#define sp_get_row sprow_get -#define row_set_val sprow_set_val -#define fout_row sprow_foutput -#define _row_mltadd sprow_mltadd -#define sp_row_copy sprow_copy -#define sp_row_merge sprow_merge -#define sp_row_ip sprow_ip -#define sp_row_sqr sprow_sqr - - -/* type MAT */ - -#define get_mat m_get -#define freemat M_FREE -#define cp_mat m_copy -#define fout_mat m_foutput -#define out_mat m_output -#define fin_mat m_finput -#define in_mat m_input -#define zero_mat m_zero -#define id_mat m_ident -#define rand_mat m_rand -#define ones_mat m_ones -#define dump_mat m_dump - -/* type VEC */ - -#define get_vec v_get -#define freevec V_FREE -#define cp_vec v_copy -#define fout_vec v_foutput -#define out_vec v_output -#define fin_vec v_finput -#define in_vec v_input -#define zero_vec v_zero -#define rand_vec v_rand -#define ones_vec v_ones -#define dump_vec v_dump - - -/* type PERM */ - -#define get_perm px_get -#define freeperm PX_FREE -#define cp_perm px_copy -#define fout_perm px_foutput -#define out_perm px_output -#define fin_perm px_finput -#define in_perm px_input -#define id_perm px_ident -#define px_id px_ident -#define trans_px px_transp -#define sign_px px_sign -#define dump_perm px_dump - -#endif diff --git a/src/mesch/otherio.c b/src/mesch/otherio.c deleted file mode 100755 index 9cad60bda6..0000000000 --- a/src/mesch/otherio.c +++ /dev/null @@ -1,165 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File for doing assorted I/O operations not invlolving - MAT/VEC/PERM objects -*/ -static char rcsid[] = "otherio.c,v 1.1 1997/12/04 17:55:44 hines Exp"; - -#include -#include -#include "matrix.h" - - - -/* scratch area -- enough for a single line */ -static char scratch[MAXLINE+1]; - -/* default value for fy_or_n */ -static int y_n_dflt = TRUE; - -/* fy_or_n -- yes-or-no to question is string s - -- question written to stderr, input from fp - -- if fp is NOT a tty then return y_n_dflt */ -int fy_or_n(fp,s) -FILE *fp; -char *s; -{ - char *cp; - - if ( ! isatty(fileno(fp)) ) - return y_n_dflt; - - for ( ; ; ) - { - fprintf(stderr,"%s (y/n) ? ",s); - if ( fgets(scratch,MAXLINE,fp)==NULL ) - error(E_INPUT,"fy_or_n"); - cp = scratch; - while ( isspace(*cp) ) - cp++; - if ( *cp == 'y' || *cp == 'Y' ) - return TRUE; - if ( *cp == 'n' || *cp == 'N' ) - return FALSE; - fprintf(stderr,"Please reply with 'y' or 'Y' for yes "); - fprintf(stderr,"and 'n' or 'N' for no.\n"); - } -} - -/* yn_dflt -- sets the value of y_n_dflt to val */ -int yn_dflt(val) -int val; -{ return y_n_dflt = val; } - -/* fin_int -- return integer read from file/stream fp - -- prompt s on stderr if fp is a tty - -- check that x lies between low and high: re-prompt if - fp is a tty, error exit otherwise - -- ignore check if low > high */ -int fin_int(fp,s,low,high) -FILE *fp; -char *s; -int low, high; -{ - int retcode, x; - - if ( ! isatty(fileno(fp)) ) - { - skipjunk(fp); - if ( (retcode=fscanf(fp,"%d",&x)) == EOF ) - error(E_INPUT,"fin_int"); - if ( retcode <= 0 ) - error(E_FORMAT,"fin_int"); - if ( low <= high && ( x < low || x > high ) ) - error(E_BOUNDS,"fin_int"); - return x; - } - - for ( ; ; ) - { - fprintf(stderr,"%s: ",s); - if ( fgets(scratch,MAXLINE,stdin)==NULL ) - error(E_INPUT,"fin_int"); - retcode = sscanf(scratch,"%d",&x); - if ( ( retcode==1 && low > high ) || - ( x >= low && x <= high ) ) - return x; - fprintf(stderr,"Please type an integer in range [%d,%d].\n", - low,high); - } -} - - -/* fin_double -- return double read from file/stream fp - -- prompt s on stderr if fp is a tty - -- check that x lies between low and high: re-prompt if - fp is a tty, error exit otherwise - -- ignore check if low > high */ -double fin_double(fp,s,low,high) -FILE *fp; -char *s; -double low, high; -{ - Real retcode, x; - - if ( ! isatty(fileno(fp)) ) - { - skipjunk(fp); -#if REAL == DOUBLE - if ( (retcode=fscanf(fp,"%lf",&x)) == EOF ) -#elif REAL == FLOAT - if ( (retcode=fscanf(fp,"%f",&x)) == EOF ) -#endif - error(E_INPUT,"fin_double"); - if ( retcode <= 0 ) - error(E_FORMAT,"fin_double"); - if ( low <= high && ( x < low || x > high ) ) - error(E_BOUNDS,"fin_double"); - return (double)x; - } - - for ( ; ; ) - { - fprintf(stderr,"%s: ",s); - if ( fgets(scratch,MAXLINE,stdin)==NULL ) - error(E_INPUT,"fin_double"); -#if REAL == DOUBLE - retcode = sscanf(scratch,"%lf",&x); -#elif REAL == FLOAT - retcode = sscanf(scratch,"%f",&x); -#endif - if ( ( retcode==1 && low > high ) || - ( x >= low && x <= high ) ) - return (double)x; - fprintf(stderr,"Please type an double in range [%g,%g].\n", - low,high); - } -} - - diff --git a/src/mesch/pxop.c b/src/mesch/pxop.c deleted file mode 100755 index 909e10a826..0000000000 --- a/src/mesch/pxop.c +++ /dev/null @@ -1,358 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* pxop.c 1.5 12/03/87 */ - - -#include -#include "matrix.h" - -static char rcsid[] = "pxop.c,v 1.1 1997/12/04 17:55:44 hines Exp"; - -/********************************************************************** -Note: A permutation is often interpreted as a matrix - (i.e. a permutation matrix). - A permutation px represents a permutation matrix P where - P[i][j] == 1 if and only if px->pe[i] == j -**********************************************************************/ - - -/* px_inv -- invert permutation -- in situ - -- taken from ACM Collected Algorithms #250 */ -PERM *px_inv(px,out) -PERM *px, *out; -{ - int i, j, k, n, *p; - - out = px_copy(px, out); - n = out->size; - p = (int *)(out->pe); - for ( n--; n>=0; n-- ) - { - i = p[n]; - if ( i < 0 ) p[n] = -1 - i; - else if ( i != n ) - { - k = n; - while (TRUE) - { - if ( i < 0 || i >= out->size ) - error(E_BOUNDS,"px_inv"); - j = p[i]; p[i] = -1 - k; - if ( j == n ) - { p[n] = i; break; } - k = i; i = j; - } - } - } - return out; -} - -/* px_mlt -- permutation multiplication (composition) */ -PERM *px_mlt(px1,px2,out) -PERM *px1,*px2,*out; -{ - u_int i,size; - - if ( px1==(PERM *)NULL || px2==(PERM *)NULL ) - error(E_NULL,"px_mlt"); - if ( px1->size != px2->size ) - error(E_SIZES,"px_mlt"); - if ( px1 == out || px2 == out ) - error(E_INSITU,"px_mlt"); - if ( out==(PERM *)NULL || out->size < px1->size ) - out = px_resize(out,px1->size); - - size = px1->size; - for ( i=0; ipe[i] >= size ) - error(E_BOUNDS,"px_mlt"); - else - out->pe[i] = px1->pe[px2->pe[i]]; - - return out; -} - -/* px_vec -- permute vector */ -VEC *px_vec(px,vector,out) -PERM *px; -VEC *vector,*out; -{ - u_int old_i, i, size, start; - Real tmp; - - if ( px==(PERM *)NULL || vector==(VEC *)NULL ) - error(E_NULL,"px_vec"); - if ( px->size > vector->dim ) - error(E_SIZES,"px_vec"); - if ( out==(VEC *)NULL || out->dim < vector->dim ) - out = v_resize(out,vector->dim); - - size = px->size; - if ( size == 0 ) - return v_copy(vector,out); - if ( out != vector ) - { - for ( i=0; ipe[i] >= size ) - error(E_BOUNDS,"px_vec"); - else - out->ve[i] = vector->ve[px->pe[i]]; - } - else - { /* in situ algorithm */ - start = 0; - while ( start < size ) - { - old_i = start; - i = px->pe[old_i]; - if ( i >= size ) - { - start++; - continue; - } - tmp = vector->ve[start]; - while ( TRUE ) - { - vector->ve[old_i] = vector->ve[i]; - px->pe[old_i] = i+size; - old_i = i; - i = px->pe[old_i]; - if ( i >= size ) - break; - if ( i == start ) - { - vector->ve[old_i] = tmp; - px->pe[old_i] = i+size; - break; - } - } - start++; - } - - for ( i = 0; i < size; i++ ) - if ( px->pe[i] < size ) - error(E_BOUNDS,"px_vec"); - else - px->pe[i] = px->pe[i]-size; - } - - return out; -} - -/* pxinv_vec -- apply the inverse of px to x, returning the result in out */ -VEC *pxinv_vec(px,x,out) -PERM *px; -VEC *x, *out; -{ - u_int i, size; - - if ( ! px || ! x ) - error(E_NULL,"pxinv_vec"); - if ( px->size > x->dim ) - error(E_SIZES,"pxinv_vec"); - /* if ( x == out ) - error(E_INSITU,"pxinv_vec"); */ - if ( ! out || out->dim < x->dim ) - out = v_resize(out,x->dim); - - size = px->size; - if ( size == 0 ) - return v_copy(x,out); - if ( out != x ) - { - for ( i=0; ipe[i] >= size ) - error(E_BOUNDS,"pxinv_vec"); - else - out->ve[px->pe[i]] = x->ve[i]; - } - else - { /* in situ algorithm --- cheat's way out */ - px_inv(px,px); - px_vec(px,x,out); - px_inv(px,px); - } - - return out; -} - - - -/* px_transp -- transpose elements of permutation - -- Really multiplying a permutation by a transposition */ -PERM *px_transp(px,i1,i2) -PERM *px; /* permutation to transpose */ -u_int i1,i2; /* elements to transpose */ -{ - u_int temp; - - if ( px==(PERM *)NULL ) - error(E_NULL,"px_transp"); - - if ( i1 < px->size && i2 < px->size ) - { - temp = px->pe[i1]; - px->pe[i1] = px->pe[i2]; - px->pe[i2] = temp; - } - - return px; -} - -/* myqsort -- a cheap implementation of Quicksort on integers - -- returns number of swaps */ -static int myqsort(a,num) -int *a, num; -{ - int i, j, tmp, v; - int numswaps; - - numswaps = 0; - if ( num <= 1 ) - return 0; - - i = 0; j = num; v = a[0]; - for ( ; ; ) - { - while ( a[++i] < v ) - ; - while ( a[--j] > v ) - ; - if ( i >= j ) break; - - tmp = a[i]; - a[i] = a[j]; - a[j] = tmp; - numswaps++; - } - - tmp = a[0]; - a[0] = a[j]; - a[j] = tmp; - if ( j != 0 ) - numswaps++; - - numswaps += myqsort(&a[0],j); - numswaps += myqsort(&a[j+1],num-(j+1)); - - return numswaps; -} - - -/* px_sign -- compute the ``sign'' of a permutation = +/-1 where - px is the product of an even/odd # transpositions */ -int px_sign(px) -PERM *px; -{ - int numtransp; - PERM *px2; - - if ( px==(PERM *)NULL ) - error(E_NULL,"px_sign"); - px2 = px_copy(px,PNULL); - numtransp = myqsort((int*)px2->pe,px2->size); - px_free(px2); - - return ( numtransp % 2 ) ? -1 : 1; -} - - -/* px_cols -- permute columns of matrix A; out = A.px' - -- May NOT be in situ */ -MAT *px_cols(px,A,out) -PERM *px; -MAT *A, *out; -{ - int i, j, m, n, px_j; - Real **A_me, **out_me; -#ifdef ANSI_C - MAT *m_get(int, int); -#else - extern MAT *m_get(); -#endif - - if ( ! A || ! px ) - error(E_NULL,"px_cols"); - if ( px->size != A->n ) - error(E_SIZES,"px_cols"); - if ( A == out ) - error(E_INSITU,"px_cols"); - m = A->m; n = A->n; - if ( ! out || out->m != m || out->n != n ) - out = m_get(m,n); - A_me = A->me; out_me = out->me; - - for ( j = 0; j < n; j++ ) - { - px_j = px->pe[j]; - if ( px_j >= n ) - error(E_BOUNDS,"px_cols"); - for ( i = 0; i < m; i++ ) - out_me[i][px_j] = A_me[i][j]; - } - - return out; -} - -/* px_rows -- permute columns of matrix A; out = px.A - -- May NOT be in situ */ -MAT *px_rows(px,A,out) -PERM *px; -MAT *A, *out; -{ - int i, j, m, n, px_i; - Real **A_me, **out_me; -#ifdef ANSI_C - MAT *m_get(int, int); -#else - extern MAT *m_get(); -#endif - - if ( ! A || ! px ) - error(E_NULL,"px_rows"); - if ( px->size != A->m ) - error(E_SIZES,"px_rows"); - if ( A == out ) - error(E_INSITU,"px_rows"); - m = A->m; n = A->n; - if ( ! out || out->m != m || out->n != n ) - out = m_get(m,n); - A_me = A->me; out_me = out->me; - - for ( i = 0; i < m; i++ ) - { - px_i = px->pe[i]; - if ( px_i >= m ) - error(E_BOUNDS,"px_rows"); - for ( j = 0; j < n; j++ ) - out_me[i][j] = A_me[px_i][j]; - } - - return out; -} - diff --git a/src/mesch/qrfactor.c b/src/mesch/qrfactor.c deleted file mode 100755 index c6fd4f55bc..0000000000 --- a/src/mesch/qrfactor.c +++ /dev/null @@ -1,516 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file contains the routines needed to perform QR factorisation - of matrices, as well as Householder transformations. - The internal "factored form" of a matrix A is not quite standard. - The diagonal of A is replaced by the diagonal of R -- not by the 1st non-zero - entries of the Householder vectors. The 1st non-zero entries are held in - the diag parameter of QRfactor(). The reason for this non-standard - representation is that it enables direct use of the Usolve() function - rather than requiring that a seperate function be written just for this case. - See, e.g., QRsolve() below for more details. - -*/ - - -static char rcsid[] = "qrfactor.c,v 1.1 1997/12/04 17:55:45 hines Exp"; - -#include -#include "matrix2.h" -#include - - - - - -#define sign(x) ((x) > 0.0 ? 1 : ((x) < 0.0 ? -1 : 0 )) - -extern VEC *Usolve(); /* See matrix2.h */ - -/* Note: The usual representation of a Householder transformation is taken - to be: - P = I - beta.u.uT - where beta = 2/(uT.u) and u is called the Householder vector - */ - -/* QRfactor -- forms the QR factorisation of A -- factorisation stored in - compact form as described above ( not quite standard format ) */ -/* MAT *QRfactor(A,diag,beta) */ -MAT *QRfactor(A,diag) -MAT *A; -VEC *diag /* ,*beta */; -{ - u_int k,limit; - Real beta; - static VEC *tmp1=VNULL; - - if ( ! A || ! diag ) - error(E_NULL,"QRfactor"); - limit = min(A->m,A->n); - if ( diag->dim < limit ) - error(E_SIZES,"QRfactor"); - - tmp1 = v_resize(tmp1,A->m); - MEM_STAT_REG(tmp1,TYPE_VEC); - - for ( k=0; kve[k],tmp1,&A->me[k][k]); */ - hhvec(tmp1,k,&beta,tmp1,&A->me[k][k]); - diag->ve[k] = tmp1->ve[k]; - - /* apply H/holder vector to remaining columns */ - /* hhtrcols(A,k,k+1,tmp1,beta->ve[k]); */ - hhtrcols(A,k,k+1,tmp1,beta); - } - - return (A); -} - -/* QRCPfactor -- forms the QR factorisation of A with column pivoting - -- factorisation stored in compact form as described above - ( not quite standard format ) */ -/* MAT *QRCPfactor(A,diag,beta,px) */ -MAT *QRCPfactor(A,diag,px) -MAT *A; -VEC *diag /* , *beta */; -PERM *px; -{ - u_int i, i_max, j, k, limit; - static VEC *gamma=VNULL, *tmp1=VNULL, *tmp2=VNULL; - Real beta, maxgamma, sum, tmp; - - if ( ! A || ! diag || ! px ) - error(E_NULL,"QRCPfactor"); - limit = min(A->m,A->n); - if ( diag->dim < limit || px->size != A->n ) - error(E_SIZES,"QRCPfactor"); - - tmp1 = v_resize(tmp1,A->m); - tmp2 = v_resize(tmp2,A->m); - gamma = v_resize(gamma,A->n); - MEM_STAT_REG(tmp1,TYPE_VEC); - MEM_STAT_REG(tmp2,TYPE_VEC); - MEM_STAT_REG(gamma,TYPE_VEC); - - /* initialise gamma and px */ - for ( j=0; jn; j++ ) - { - px->pe[j] = j; - sum = 0.0; - for ( i=0; im; i++ ) - sum += square(A->me[i][j]); - gamma->ve[j] = sum; - } - - for ( k=0; kve[k]; - for ( i=k+1; in; i++ ) - /* Loop invariant:maxgamma=gamma[i_max] - >=gamma[l];l=k,...,i-1 */ - if ( gamma->ve[i] > maxgamma ) - { maxgamma = gamma->ve[i]; i_max = i; } - - /* swap columns if necessary */ - if ( i_max != k ) - { - /* swap gamma values */ - tmp = gamma->ve[k]; - gamma->ve[k] = gamma->ve[i_max]; - gamma->ve[i_max] = tmp; - - /* update column permutation */ - px_transp(px,k,i_max); - - /* swap columns of A */ - for ( i=0; im; i++ ) - { - tmp = A->me[i][k]; - A->me[i][k] = A->me[i][i_max]; - A->me[i][i_max] = tmp; - } - } - - /* get H/holder vector for the k-th column */ - get_col(A,k,tmp1); - /* hhvec(tmp1,k,&beta->ve[k],tmp1,&A->me[k][k]); */ - hhvec(tmp1,k,&beta,tmp1,&A->me[k][k]); - diag->ve[k] = tmp1->ve[k]; - - /* apply H/holder vector to remaining columns */ - /* hhtrcols(A,k,k+1,tmp1,beta->ve[k]); */ - hhtrcols(A,k,k+1,tmp1,beta); - - /* update gamma values */ - for ( j=k+1; jn; j++ ) - gamma->ve[j] -= square(A->me[k][j]); - } - - return (A); -} - -/* Qsolve -- solves Qx = b, Q is an orthogonal matrix stored in compact - form a la QRfactor() -- may be in-situ */ -/* VEC *_Qsolve(QR,diag,beta,b,x,tmp) */ -VEC *_Qsolve(QR,diag,b,x,tmp) -MAT *QR; -VEC *diag /* ,*beta */ , *b, *x, *tmp; -{ - u_int dynamic; - int k, limit; - Real beta, r_ii, tmp_val; - - limit = min(QR->m,QR->n); - dynamic = FALSE; - if ( ! QR || ! diag || ! b ) - error(E_NULL,"_Qsolve"); - if ( diag->dim < limit || b->dim != QR->m ) - error(E_SIZES,"_Qsolve"); - x = v_resize(x,QR->m); - if ( tmp == VNULL ) - dynamic = TRUE; - tmp = v_resize(tmp,QR->m); - - /* apply H/holder transforms in normal order */ - x = v_copy(b,x); - for ( k = 0 ; k < limit ; k++ ) - { - get_col(QR,k,tmp); - r_ii = fabs(tmp->ve[k]); - tmp->ve[k] = diag->ve[k]; - tmp_val = (r_ii*fabs(diag->ve[k])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - /* hhtrvec(tmp,beta->ve[k],k,x,x); */ - hhtrvec(tmp,beta,k,x,x); - } - - if ( dynamic ) - V_FREE(tmp); - - return (x); -} - -/* makeQ -- constructs orthogonal matrix from Householder vectors stored in - compact QR form */ -/* MAT *makeQ(QR,diag,beta,Qout) */ -MAT *makeQ(QR,diag,Qout) -MAT *QR,*Qout; -VEC *diag /* , *beta */; -{ - static VEC *tmp1=VNULL,*tmp2=VNULL; - u_int i, limit; - Real beta, r_ii, tmp_val; - int j; - - limit = min(QR->m,QR->n); - if ( ! QR || ! diag ) - error(E_NULL,"makeQ"); - if ( diag->dim < limit ) - error(E_SIZES,"makeQ"); - if ( Qout==(MAT *)NULL || Qout->m < QR->m || Qout->n < QR->m ) - Qout = m_get(QR->m,QR->m); - - tmp1 = v_resize(tmp1,QR->m); /* contains basis vec & columns of Q */ - tmp2 = v_resize(tmp2,QR->m); /* contains H/holder vectors */ - MEM_STAT_REG(tmp1,TYPE_VEC); - MEM_STAT_REG(tmp2,TYPE_VEC); - - for ( i=0; im ; i++ ) - { /* get i-th column of Q */ - /* set up tmp1 as i-th basis vector */ - for ( j=0; jm ; j++ ) - tmp1->ve[j] = 0.0; - tmp1->ve[i] = 1.0; - - /* apply H/h transforms in reverse order */ - for ( j=limit-1; j>=0; j-- ) - { - get_col(QR,j,tmp2); - r_ii = fabs(tmp2->ve[j]); - tmp2->ve[j] = diag->ve[j]; - tmp_val = (r_ii*fabs(diag->ve[j])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - /* hhtrvec(tmp2,beta->ve[j],j,tmp1,tmp1); */ - hhtrvec(tmp2,beta,j,tmp1,tmp1); - } - - /* insert into Q */ - set_col(Qout,i,tmp1); - } - - return (Qout); -} - -/* makeR -- constructs upper triangular matrix from QR (compact form) - -- may be in-situ (all it does is zero the lower 1/2) */ -MAT *makeR(QR,Rout) -MAT *QR,*Rout; -{ - u_int i,j; - - if ( QR==(MAT *)NULL ) - error(E_NULL,"makeR"); - Rout = m_copy(QR,Rout); - - for ( i=1; im; i++ ) - for ( j=0; jn && jme[i][j] = 0.0; - - return (Rout); -} - -/* QRsolve -- solves the system Q.R.x=b where Q & R are stored in compact form - -- returns x, which is created if necessary */ -/* VEC *QRsolve(QR,diag,beta,b,x) */ -VEC *QRsolve(QR,diag,b,x) -MAT *QR; -VEC *diag /* , *beta */ , *b, *x; -{ - int limit; - static VEC *tmp = VNULL; - - if ( ! QR || ! diag || ! b ) - error(E_NULL,"QRsolve"); - limit = min(QR->m,QR->n); - if ( diag->dim < limit || b->dim != QR->m ) - error(E_SIZES,"QRsolve"); - tmp = v_resize(tmp,limit); - MEM_STAT_REG(tmp,TYPE_VEC); - - x = v_resize(x,QR->n); - _Qsolve(QR,diag,b,x,tmp); - x = Usolve(QR,x,x,0.0); - v_resize(x,QR->n); - - return x; -} - -/* QRCPsolve -- solves A.x = b where A is factored by QRCPfactor() - -- assumes that A is in the compact factored form */ -/* VEC *QRCPsolve(QR,diag,beta,pivot,b,x) */ -VEC *QRCPsolve(QR,diag,pivot,b,x) -MAT *QR; -VEC *diag /* , *beta */; -PERM *pivot; -VEC *b, *x; -{ - static VEC *tmp=VNULL; - - if ( ! QR || ! diag || ! pivot || ! b ) - error(E_NULL,"QRCPsolve"); - if ( (QR->m > diag->dim &&QR->n > diag->dim) || QR->n != pivot->size ) - error(E_SIZES,"QRCPsolve"); - - tmp = QRsolve(QR,diag /* , beta */ ,b,tmp); - MEM_STAT_REG(tmp,TYPE_VEC); - x = pxinv_vec(pivot,tmp,x); - - return x; -} - -/* Umlt -- compute out = upper_triang(U).x - -- may be in situ */ -static VEC *Umlt(U,x,out) -MAT *U; -VEC *x, *out; -{ - int i, limit; - - if ( U == MNULL || x == VNULL ) - error(E_NULL,"Umlt"); - limit = min(U->m,U->n); - if ( limit != x->dim ) - error(E_SIZES,"Umlt"); - if ( out == VNULL || out->dim < limit ) - out = v_resize(out,limit); - - for ( i = 0; i < limit; i++ ) - out->ve[i] = __ip__(&(x->ve[i]),&(U->me[i][i]),limit - i); - return out; -} - -/* UTmlt -- returns out = upper_triang(U)^T.x */ -static VEC *UTmlt(U,x,out) -MAT *U; -VEC *x, *out; -{ - Real sum; - int i, j, limit; - - if ( U == MNULL || x == VNULL ) - error(E_NULL,"UTmlt"); - limit = min(U->m,U->n); - if ( out == VNULL || out->dim < limit ) - out = v_resize(out,limit); - - for ( i = limit-1; i >= 0; i-- ) - { - sum = 0.0; - for ( j = 0; j <= i; j++ ) - sum += U->me[j][i]*x->ve[j]; - out->ve[i] = sum; - } - return out; -} - -/* QRTsolve -- solve A^T.sc = c where the QR factors of A are stored in - compact form - -- returns sc - -- original due to Mike Osborne modified Wed 09th Dec 1992 */ -VEC *QRTsolve(A,diag,c,sc) -MAT *A; -VEC *diag, *c, *sc; -{ - int i, j, k, n, p; - Real beta, r_ii, s, tmp_val; - - if ( ! A || ! diag || ! c ) - error(E_NULL,"QRTsolve"); - if ( diag->dim < min(A->m,A->n) ) - error(E_SIZES,"QRTsolve"); - sc = v_resize(sc,A->m); - n = sc->dim; - p = c->dim; - if ( n == p ) - k = p-2; - else - k = p-1; - v_zero(sc); - sc->ve[0] = c->ve[0]/A->me[0][0]; - if ( n == 1) - return sc; - if ( p > 1) - { - for ( i = 1; i < p; i++ ) - { - s = 0.0; - for ( j = 0; j < i; j++ ) - s += A->me[j][i]*sc->ve[j]; - if ( A->me[i][i] == 0.0 ) - error(E_SING,"QRTsolve"); - sc->ve[i]=(c->ve[i]-s)/A->me[i][i]; - } - } - for (i = k; i >= 0; i--) - { - s = diag->ve[i]*sc->ve[i]; - for ( j = i+1; j < n; j++ ) - s += A->me[j][i]*sc->ve[j]; - r_ii = fabs(A->me[i][i]); - tmp_val = (r_ii*fabs(diag->ve[i])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - tmp_val = beta*s; - sc->ve[i] -= tmp_val*diag->ve[i]; - for ( j = i+1; j < n; j++ ) - sc->ve[j] -= tmp_val*A->me[j][i]; - } - - return sc; -} - -/* QRcondest -- returns an estimate of the 2-norm condition number of the - matrix factorised by QRfactor() or QRCPfactor() - -- note that as Q does not affect the 2-norm condition number, - it is not necessary to pass the diag, beta (or pivot) vectors - -- generates a lower bound on the true condition number - -- if the matrix is exactly singular, HUGE is returned - -- note that QRcondest() is likely to be more reliable for - matrices factored using QRCPfactor() */ -double QRcondest(QR) -MAT *QR; -{ - static VEC *y=VNULL; - Real norm1, norm2, sum, tmp1, tmp2; - int i, j, limit; - - if ( QR == MNULL ) - error(E_NULL,"QRcondest"); - - limit = min(QR->m,QR->n); - for ( i = 0; i < limit; i++ ) - if ( QR->me[i][i] == 0.0 ) - return HUGE; - - y = v_resize(y,limit); - MEM_STAT_REG(y,TYPE_VEC); - /* use the trick for getting a unit vector y with ||R.y||_inf small - from the LU condition estimator */ - for ( i = 0; i < limit; i++ ) - { - sum = 0.0; - for ( j = 0; j < i; j++ ) - sum -= QR->me[j][i]*y->ve[j]; - sum -= (sum < 0.0) ? 1.0 : -1.0; - y->ve[i] = sum / QR->me[i][i]; - } - UTmlt(QR,y,y); - - /* now apply inverse power method to R^T.R */ - for ( i = 0; i < 3; i++ ) - { - tmp1 = v_norm2(y); - sv_mlt(1/tmp1,y,y); - UTsolve(QR,y,y,0.0); - tmp2 = v_norm2(y); - sv_mlt(1/v_norm2(y),y,y); - Usolve(QR,y,y,0.0); - } - /* now compute approximation for ||R^{-1}||_2 */ - norm1 = sqrt(tmp1)*sqrt(tmp2); - - /* now use complementary approach to compute approximation to ||R||_2 */ - for ( i = limit-1; i >= 0; i-- ) - { - sum = 0.0; - for ( j = i+1; j < limit; j++ ) - sum += QR->me[i][j]*y->ve[j]; - y->ve[i] = (sum >= 0.0) ? 1.0 : -1.0; - y->ve[i] = (QR->me[i][i] >= 0.0) ? y->ve[i] : - y->ve[i]; - } - - /* now apply power method to R^T.R */ - for ( i = 0; i < 3; i++ ) - { - tmp1 = v_norm2(y); - sv_mlt(1/tmp1,y,y); - Umlt(QR,y,y); - tmp2 = v_norm2(y); - sv_mlt(1/tmp2,y,y); - UTmlt(QR,y,y); - } - norm2 = sqrt(tmp1)*sqrt(tmp2); - - /* printf("QRcondest: norm1 = %g, norm2 = %g\n",norm1,norm2); */ - - return norm1*norm2; -} diff --git a/src/mesch/schur.c b/src/mesch/schur.c deleted file mode 100755 index 84f432334d..0000000000 --- a/src/mesch/schur.c +++ /dev/null @@ -1,669 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File containing routines for computing the Schur decomposition - of a real non-symmetric matrix - See also: hessen.c -*/ - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -static char rcsid[] = "schur.c,v 1.1 1997/12/04 17:55:46 hines Exp"; - - - -#ifndef ANSI_C -static void hhldr3(x,y,z,nu1,beta,newval) -double x, y, z; -Real *nu1, *beta, *newval; -#else -static void hhldr3(double x, double y, double z, - Real *nu1, Real *beta, Real *newval) -#endif -{ - Real alpha; - - if ( x >= 0.0 ) - alpha = sqrt(x*x+y*y+z*z); - else - alpha = -sqrt(x*x+y*y+z*z); - *nu1 = x + alpha; - *beta = 1.0/(alpha*(*nu1)); - *newval = alpha; -} - -#ifndef ANSI_C -static void hhldr3cols(A,k,j0,beta,nu1,nu2,nu3) -MAT *A; -int k, j0; -double beta, nu1, nu2, nu3; -#else -static void hhldr3cols(MAT *A, int k, int j0, double beta, - double nu1, double nu2, double nu3) -#endif -{ - Real **A_me, ip, prod; - int j, n; - - if ( k < 0 || k+3 > A->m || j0 < 0 ) - error(E_BOUNDS,"hhldr3cols"); - A_me = A->me; n = A->n; - - /* printf("hhldr3cols:(l.%d) j0 = %d, k = %d, A at 0x%lx, m = %d, n = %d\n", - __LINE__, j0, k, (long)A, A->m, A->n); */ - /* printf("hhldr3cols: A (dumped) =\n"); m_dump(stdout,A); */ - - for ( j = j0; j < n; j++ ) - { - /***** - ip = nu1*A_me[k][j] + nu2*A_me[k+1][j] + nu3*A_me[k+2][j]; - prod = ip*beta; - A_me[k][j] -= prod*nu1; - A_me[k+1][j] -= prod*nu2; - A_me[k+2][j] -= prod*nu3; - *****/ - /* printf("hhldr3cols: j = %d\n", j); */ - - ip = nu1*m_entry(A,k,j)+nu2*m_entry(A,k+1,j)+nu3*m_entry(A,k+2,j); - prod = ip*beta; - /***** - m_set_val(A,k ,j,m_entry(A,k ,j) - prod*nu1); - m_set_val(A,k+1,j,m_entry(A,k+1,j) - prod*nu2); - m_set_val(A,k+2,j,m_entry(A,k+2,j) - prod*nu3); - *****/ - m_add_val(A,k ,j,-prod*nu1); - m_add_val(A,k+1,j,-prod*nu2); - m_add_val(A,k+2,j,-prod*nu3); - - } - /* printf("hhldr3cols:(l.%d) j0 = %d, k = %d, m = %d, n = %d\n", - __LINE__, j0, k, A->m, A->n); */ - /* putc('\n',stdout); */ -} - -#ifndef ANSI_C -static void hhldr3rows(A,k,i0,beta,nu1,nu2,nu3) -MAT *A; -int k, i0; -double beta, nu1, nu2, nu3; -#else -static void hhldr3rows(MAT *A, int k, int i0, double beta, - double nu1, double nu2, double nu3) -#endif -{ - Real **A_me, ip, prod; - int i, m; - - /* printf("hhldr3rows:(l.%d) A at 0x%lx\n", __LINE__, (long)A); */ - /* printf("hhldr3rows: k = %d\n", k); */ - if ( k < 0 || k+3 > A->n ) - error(E_BOUNDS,"hhldr3rows"); - A_me = A->me; m = A->m; - i0 = min(i0,m-1); - - for ( i = 0; i <= i0; i++ ) - { - /**** - ip = nu1*A_me[i][k] + nu2*A_me[i][k+1] + nu3*A_me[i][k+2]; - prod = ip*beta; - A_me[i][k] -= prod*nu1; - A_me[i][k+1] -= prod*nu2; - A_me[i][k+2] -= prod*nu3; - ****/ - - ip = nu1*m_entry(A,i,k)+nu2*m_entry(A,i,k+1)+nu3*m_entry(A,i,k+2); - prod = ip*beta; - m_add_val(A,i,k , - prod*nu1); - m_add_val(A,i,k+1, - prod*nu2); - m_add_val(A,i,k+2, - prod*nu3); - - } -} - -/* schur -- computes the Schur decomposition of the matrix A in situ - -- optionally, gives Q matrix such that Q^T.A.Q is upper triangular - -- returns upper triangular Schur matrix */ -MAT *schur(A,Q) -MAT *A, *Q; -{ - int i, j, iter, k, k_min, k_max, k_tmp, n, split; - Real beta2, c, discrim, dummy, nu1, s, t, tmp, x, y, z; - Real **A_me; - Real sqrt_macheps; - static VEC *diag=VNULL, *beta=VNULL; - - if ( ! A ) - error(E_NULL,"schur"); - if ( A->m != A->n || ( Q && Q->m != Q->n ) ) - error(E_SQUARE,"schur"); - if ( Q != MNULL && Q->m != A->m ) - error(E_SIZES,"schur"); - n = A->n; - diag = v_resize(diag,A->n); - beta = v_resize(beta,A->n); - MEM_STAT_REG(diag,TYPE_VEC); - MEM_STAT_REG(beta,TYPE_VEC); - /* compute Hessenberg form */ - Hfactor(A,diag,beta); - - /* save Q if necessary */ - if ( Q ) - Q = makeHQ(A,diag,beta,Q); - makeH(A,A); - - sqrt_macheps = sqrt(MACHEPS); - - k_min = 0; A_me = A->me; - - while ( k_min < n ) - { - Real a00, a01, a10, a11; - double scale, t, numer, denom; - - /* find k_max to suit: - submatrix k_min..k_max should be irreducible */ - k_max = n-1; - for ( k = k_min; k < k_max; k++ ) - /* if ( A_me[k+1][k] == 0.0 ) */ - if ( m_entry(A,k+1,k) == 0.0 ) - { k_max = k; break; } - - if ( k_max <= k_min ) - { - k_min = k_max + 1; - continue; /* outer loop */ - } - - /* check to see if we have a 2 x 2 block - with complex eigenvalues */ - if ( k_max == k_min + 1 ) - { - /* tmp = A_me[k_min][k_min] - A_me[k_max][k_max]; */ - a00 = m_entry(A,k_min,k_min); - a01 = m_entry(A,k_min,k_max); - a10 = m_entry(A,k_max,k_min); - a11 = m_entry(A,k_max,k_max); - tmp = a00 - a11; - /* discrim = tmp*tmp + - 4*A_me[k_min][k_max]*A_me[k_max][k_min]; */ - discrim = tmp*tmp + - 4*a01*a10; - if ( discrim < 0.0 ) - { /* yes -- e-vals are complex - -- put 2 x 2 block in form [a b; c a]; - then eigenvalues have real part a & imag part sqrt(|bc|) */ - numer = - tmp; - denom = ( a01+a10 >= 0.0 ) ? - (a01+a10) + sqrt((a01+a10)*(a01+a10)+tmp*tmp) : - (a01+a10) - sqrt((a01+a10)*(a01+a10)+tmp*tmp); - if ( denom != 0.0 ) - { /* t = s/c = numer/denom */ - t = numer/denom; - scale = c = 1.0/sqrt(1+t*t); - s = c*t; - } - else - { - c = 1.0; - s = 0.0; - } - rot_cols(A,k_min,k_max,c,s,A); - rot_rows(A,k_min,k_max,c,s,A); - if ( Q != MNULL ) - rot_cols(Q,k_min,k_max,c,s,Q); - k_min = k_max + 1; - continue; - } - else /* discrim >= 0; i.e. block has two real eigenvalues */ - { /* no -- e-vals are not complex; - split 2 x 2 block and continue */ - /* s/c = numer/denom */ - numer = ( tmp >= 0.0 ) ? - - tmp - sqrt(discrim) : - tmp + sqrt(discrim); - denom = 2*a01; - if ( fabs(numer) < fabs(denom) ) - { /* t = s/c = numer/denom */ - t = numer/denom; - scale = c = 1.0/sqrt(1+t*t); - s = c*t; - } - else if ( numer != 0.0 ) - { /* t = c/s = denom/numer */ - t = denom/numer; - scale = 1.0/sqrt(1+t*t); - c = fabs(t)*scale; - s = ( t >= 0.0 ) ? scale : -scale; - } - else /* numer == denom == 0 */ - { - c = 0.0; - s = 1.0; - } - rot_cols(A,k_min,k_max,c,s,A); - rot_rows(A,k_min,k_max,c,s,A); - /* A->me[k_max][k_min] = 0.0; */ - if ( Q != MNULL ) - rot_cols(Q,k_min,k_max,c,s,Q); - k_min = k_max + 1; /* go to next block */ - continue; - } - } - - /* now have r x r block with r >= 2: - apply Francis QR step until block splits */ - split = FALSE; iter = 0; - while ( ! split ) - { - iter++; - - /* set up Wilkinson/Francis complex shift */ - k_tmp = k_max - 1; - - a00 = m_entry(A,k_tmp,k_tmp); - a01 = m_entry(A,k_tmp,k_max); - a10 = m_entry(A,k_max,k_tmp); - a11 = m_entry(A,k_max,k_max); - - /* treat degenerate cases differently - -- if there are still no splits after five iterations - and the bottom 2 x 2 looks degenerate, force it to - split */ - if ( iter >= 5 && - fabs(a00-a11) < sqrt_macheps*(fabs(a00)+fabs(a11)) && - (fabs(a01) < sqrt_macheps*(fabs(a00)+fabs(a11)) || - fabs(a10) < sqrt_macheps*(fabs(a00)+fabs(a11))) ) - { - if ( fabs(a01) < sqrt_macheps*(fabs(a00)+fabs(a11)) ) - m_set_val(A,k_tmp,k_max,0.0); - if ( fabs(a10) < sqrt_macheps*(fabs(a00)+fabs(a11)) ) - { - m_set_val(A,k_max,k_tmp,0.0); - split = TRUE; - continue; - } - } - - s = a00 + a11; - t = a00*a11 - a01*a10; - - /* break loop if a 2 x 2 complex block */ - if ( k_max == k_min + 1 && s*s < 4.0*t ) - { - split = TRUE; - continue; - } - - /* perturb shift if convergence is slow */ - if ( (iter % 10) == 0 ) - { s += iter*0.02; t += iter*0.02; - } - - /* set up Householder transformations */ - k_tmp = k_min + 1; - /******************** - x = A_me[k_min][k_min]*A_me[k_min][k_min] + - A_me[k_min][k_tmp]*A_me[k_tmp][k_min] - - s*A_me[k_min][k_min] + t; - y = A_me[k_tmp][k_min]* - (A_me[k_min][k_min]+A_me[k_tmp][k_tmp]-s); - if ( k_min + 2 <= k_max ) - z = A_me[k_tmp][k_min]*A_me[k_min+2][k_tmp]; - else - z = 0.0; - ********************/ - - a00 = m_entry(A,k_min,k_min); - a01 = m_entry(A,k_min,k_tmp); - a10 = m_entry(A,k_tmp,k_min); - a11 = m_entry(A,k_tmp,k_tmp); - - /******************** - a00 = A->me[k_min][k_min]; - a01 = A->me[k_min][k_tmp]; - a10 = A->me[k_tmp][k_min]; - a11 = A->me[k_tmp][k_tmp]; - ********************/ - x = a00*a00 + a01*a10 - s*a00 + t; - y = a10*(a00+a11-s); - if ( k_min + 2 <= k_max ) - z = a10* /* m_entry(A,k_min+2,k_tmp) */ A->me[k_min+2][k_tmp]; - else - z = 0.0; - - for ( k = k_min; k <= k_max-1; k++ ) - { - if ( k < k_max - 1 ) - { - hhldr3(x,y,z,&nu1,&beta2,&dummy); - tracecatch(hhldr3cols(A,k,max(k-1,0), beta2,nu1,y,z),"schur"); - tracecatch(hhldr3rows(A,k,min(n-1,k+3),beta2,nu1,y,z),"schur"); - if ( Q != MNULL ) - hhldr3rows(Q,k,n-1,beta2,nu1,y,z); - } - else - { - givens(x,y,&c,&s); - rot_cols(A,k,k+1,c,s,A); - rot_rows(A,k,k+1,c,s,A); - if ( Q ) - rot_cols(Q,k,k+1,c,s,Q); - } - /* if ( k >= 2 ) - m_set_val(A,k,k-2,0.0); */ - /* x = A_me[k+1][k]; */ - x = m_entry(A,k+1,k); - if ( k <= k_max - 2 ) - /* y = A_me[k+2][k];*/ - y = m_entry(A,k+2,k); - else - y = 0.0; - if ( k <= k_max - 3 ) - /* z = A_me[k+3][k]; */ - z = m_entry(A,k+3,k); - else - z = 0.0; - } - /* if ( k_min > 0 ) - m_set_val(A,k_min,k_min-1,0.0); - if ( k_max < n - 1 ) - m_set_val(A,k_max+1,k_max,0.0); */ - for ( k = k_min; k <= k_max-2; k++ ) - { - /* zero appropriate sub-diagonals */ - m_set_val(A,k+2,k,0.0); - if ( k < k_max-2 ) - m_set_val(A,k+3,k,0.0); - } - - /* test to see if matrix should split */ - for ( k = k_min; k < k_max; k++ ) - if ( fabs(A_me[k+1][k]) < MACHEPS* - (fabs(A_me[k][k])+fabs(A_me[k+1][k+1])) ) - { A_me[k+1][k] = 0.0; split = TRUE; } - } - } - - /* polish up A by zeroing strictly lower triangular elements - and small sub-diagonal elements */ - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < i-1; j++ ) - A_me[i][j] = 0.0; - for ( i = 0; i < A->m - 1; i++ ) - if ( fabs(A_me[i+1][i]) < MACHEPS* - (fabs(A_me[i][i])+fabs(A_me[i+1][i+1])) ) - A_me[i+1][i] = 0.0; - - return A; -} - -/* schur_vals -- compute real & imaginary parts of eigenvalues - -- assumes T contains a block upper triangular matrix - as produced by schur() - -- real parts stored in real_pt, imaginary parts in imag_pt */ -void schur_evals(T,real_pt,imag_pt) -MAT *T; -VEC *real_pt, *imag_pt; -{ - int i, n; - Real discrim, **T_me; - Real diff, sum, tmp; - - if ( ! T || ! real_pt || ! imag_pt ) - error(E_NULL,"schur_evals"); - if ( T->m != T->n ) - error(E_SQUARE,"schur_evals"); - n = T->n; T_me = T->me; - real_pt = v_resize(real_pt,(u_int)n); - imag_pt = v_resize(imag_pt,(u_int)n); - - i = 0; - while ( i < n ) - { - if ( i < n-1 && T_me[i+1][i] != 0.0 ) - { /* should be a complex eigenvalue */ - sum = 0.5*(T_me[i][i]+T_me[i+1][i+1]); - diff = 0.5*(T_me[i][i]-T_me[i+1][i+1]); - discrim = diff*diff + T_me[i][i+1]*T_me[i+1][i]; - if ( discrim < 0.0 ) - { /* yes -- complex e-vals */ - real_pt->ve[i] = real_pt->ve[i+1] = sum; - imag_pt->ve[i] = sqrt(-discrim); - imag_pt->ve[i+1] = - imag_pt->ve[i]; - } - else - { /* no -- actually both real */ - tmp = sqrt(discrim); - real_pt->ve[i] = sum + tmp; - real_pt->ve[i+1] = sum - tmp; - imag_pt->ve[i] = imag_pt->ve[i+1] = 0.0; - } - i += 2; - } - else - { /* real eigenvalue */ - real_pt->ve[i] = T_me[i][i]; - imag_pt->ve[i] = 0.0; - i++; - } - } -} - -/* schur_vecs -- returns eigenvectors computed from the real Schur - decomposition of a matrix - -- T is the block upper triangular Schur matrix - -- Q is the orthognal matrix where A = Q.T.Q^T - -- if Q is null, the eigenvectors of T are returned - -- X_re is the real part of the matrix of eigenvectors, - and X_im is the imaginary part of the matrix. - -- X_re is returned */ -MAT *schur_vecs(T,Q,X_re,X_im) -MAT *T, *Q, *X_re, *X_im; -{ - int i, j, limit; - Real t11_re, t11_im, t12, t21, t22_re, t22_im; - Real l_re, l_im, det_re, det_im, invdet_re, invdet_im, - val1_re, val1_im, val2_re, val2_im, - tmp_val1_re, tmp_val1_im, tmp_val2_re, tmp_val2_im, **T_me; - Real sum, diff, discrim, magdet, norm, scale; - static VEC *tmp1_re=VNULL, *tmp1_im=VNULL, - *tmp2_re=VNULL, *tmp2_im=VNULL; - - if ( ! T || ! X_re ) - error(E_NULL,"schur_vecs"); - if ( T->m != T->n || X_re->m != X_re->n || - ( Q != MNULL && Q->m != Q->n ) || - ( X_im != MNULL && X_im->m != X_im->n ) ) - error(E_SQUARE,"schur_vecs"); - if ( T->m != X_re->m || - ( Q != MNULL && T->m != Q->m ) || - ( X_im != MNULL && T->m != X_im->m ) ) - error(E_SIZES,"schur_vecs"); - - tmp1_re = v_resize(tmp1_re,T->m); - tmp1_im = v_resize(tmp1_im,T->m); - tmp2_re = v_resize(tmp2_re,T->m); - tmp2_im = v_resize(tmp2_im,T->m); - MEM_STAT_REG(tmp1_re,TYPE_VEC); - MEM_STAT_REG(tmp1_im,TYPE_VEC); - MEM_STAT_REG(tmp2_re,TYPE_VEC); - MEM_STAT_REG(tmp2_im,TYPE_VEC); - - T_me = T->me; - i = 0; - while ( i < T->m ) - { - if ( i+1 < T->m && T->me[i+1][i] != 0.0 ) - { /* complex eigenvalue */ - sum = 0.5*(T_me[i][i]+T_me[i+1][i+1]); - diff = 0.5*(T_me[i][i]-T_me[i+1][i+1]); - discrim = diff*diff + T_me[i][i+1]*T_me[i+1][i]; - l_re = l_im = 0.0; - if ( discrim < 0.0 ) - { /* yes -- complex e-vals */ - l_re = sum; - l_im = sqrt(-discrim); - } - else /* not correct Real Schur form */ - error(E_RANGE,"schur_vecs"); - } - else - { - l_re = T_me[i][i]; - l_im = 0.0; - } - - v_zero(tmp1_im); - v_rand(tmp1_re); - sv_mlt(MACHEPS,tmp1_re,tmp1_re); - - /* solve (T-l.I)x = tmp1 */ - limit = ( l_im != 0.0 ) ? i+1 : i; - /* printf("limit = %d\n",limit); */ - for ( j = limit+1; j < T->m; j++ ) - tmp1_re->ve[j] = 0.0; - j = limit; - while ( j >= 0 ) - { - if ( j > 0 && T->me[j][j-1] != 0.0 ) - { /* 2 x 2 diagonal block */ - /* printf("checkpoint A\n"); */ - val1_re = tmp1_re->ve[j-1] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j-1][j+1]),limit-j); - /* printf("checkpoint B\n"); */ - val1_im = tmp1_im->ve[j-1] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j-1][j+1]),limit-j); - /* printf("checkpoint C\n"); */ - val2_re = tmp1_re->ve[j] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint D\n"); */ - val2_im = tmp1_im->ve[j] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint E\n"); */ - - t11_re = T_me[j-1][j-1] - l_re; - t11_im = - l_im; - t22_re = T_me[j][j] - l_re; - t22_im = - l_im; - t12 = T_me[j-1][j]; - t21 = T_me[j][j-1]; - - scale = fabs(T_me[j-1][j-1]) + fabs(T_me[j][j]) + - fabs(t12) + fabs(t21) + fabs(l_re) + fabs(l_im); - - det_re = t11_re*t22_re - t11_im*t22_im - t12*t21; - det_im = t11_re*t22_im + t11_im*t22_re; - magdet = det_re*det_re+det_im*det_im; - if ( sqrt(magdet) < MACHEPS*scale ) - { - det_re = MACHEPS*scale; - magdet = det_re*det_re+det_im*det_im; - } - invdet_re = det_re/magdet; - invdet_im = - det_im/magdet; - tmp_val1_re = t22_re*val1_re-t22_im*val1_im-t12*val2_re; - tmp_val1_im = t22_im*val1_re+t22_re*val1_im-t12*val2_im; - tmp_val2_re = t11_re*val2_re-t11_im*val2_im-t21*val1_re; - tmp_val2_im = t11_im*val2_re+t11_re*val2_im-t21*val1_im; - tmp1_re->ve[j-1] = invdet_re*tmp_val1_re - - invdet_im*tmp_val1_im; - tmp1_im->ve[j-1] = invdet_im*tmp_val1_re + - invdet_re*tmp_val1_im; - tmp1_re->ve[j] = invdet_re*tmp_val2_re - - invdet_im*tmp_val2_im; - tmp1_im->ve[j] = invdet_im*tmp_val2_re + - invdet_re*tmp_val2_im; - j -= 2; - } - else - { - t11_re = T_me[j][j] - l_re; - t11_im = - l_im; - magdet = t11_re*t11_re + t11_im*t11_im; - scale = fabs(T_me[j][j]) + fabs(l_re); - if ( sqrt(magdet) < MACHEPS*scale ) - { - t11_re = MACHEPS*scale; - magdet = t11_re*t11_re + t11_im*t11_im; - } - invdet_re = t11_re/magdet; - invdet_im = - t11_im/magdet; - /* printf("checkpoint F\n"); */ - val1_re = tmp1_re->ve[j] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint G\n"); */ - val1_im = tmp1_im->ve[j] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint H\n"); */ - tmp1_re->ve[j] = invdet_re*val1_re - invdet_im*val1_im; - tmp1_im->ve[j] = invdet_im*val1_re + invdet_re*val1_im; - j -= 1; - } - } - - norm = v_norm_inf(tmp1_re) + v_norm_inf(tmp1_im); - sv_mlt(1/norm,tmp1_re,tmp1_re); - if ( l_im != 0.0 ) - sv_mlt(1/norm,tmp1_im,tmp1_im); - mv_mlt(Q,tmp1_re,tmp2_re); - if ( l_im != 0.0 ) - mv_mlt(Q,tmp1_im,tmp2_im); - if ( l_im != 0.0 ) - norm = sqrt(in_prod(tmp2_re,tmp2_re)+in_prod(tmp2_im,tmp2_im)); - else - norm = v_norm2(tmp2_re); - sv_mlt(1/norm,tmp2_re,tmp2_re); - if ( l_im != 0.0 ) - sv_mlt(1/norm,tmp2_im,tmp2_im); - - if ( l_im != 0.0 ) - { - if ( ! X_im ) - error(E_NULL,"schur_vecs"); - set_col(X_re,i,tmp2_re); - set_col(X_im,i,tmp2_im); - sv_mlt(-1.0,tmp2_im,tmp2_im); - set_col(X_re,i+1,tmp2_re); - set_col(X_im,i+1,tmp2_im); - i += 2; - } - else - { - set_col(X_re,i,tmp2_re); - if ( X_im != MNULL ) - set_col(X_im,i,tmp1_im); /* zero vector */ - i += 1; - } - } - - return X_re; -} - diff --git a/src/mesch/solve.c b/src/mesch/solve.c deleted file mode 100755 index b31af8f6d3..0000000000 --- a/src/mesch/solve.c +++ /dev/null @@ -1,289 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. -*/ - -/* solve.c 1.2 11/25/87 */ -static char rcsid[] = "solve.c,v 1.1 1997/12/04 17:55:47 hines Exp"; - -#include -#include "matrix2.h" -#include - - - - - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* Usolve -- back substitution with optional over-riding diagonal - -- can be in-situ but doesn't need to be */ -VEC *Usolve(matrix,b,out,diag) -MAT *matrix; -VEC *b, *out; -double diag; -{ - u_int dim /* , j */; - int i, i_lim; - Real **mat_ent, *mat_row, *b_ent, *out_ent, *out_col, sum, tiny; - - if ( matrix==(MAT *)NULL || b==(VEC *)NULL ) - error(E_NULL,"Usolve"); - dim = min(matrix->m,matrix->n); - if ( b->dim < dim ) - error(E_SIZES,"Usolve"); - if ( out==(VEC *)NULL || out->dim < dim ) - out = v_resize(out,matrix->n); - mat_ent = matrix->me; b_ent = b->ve; out_ent = out->ve; - - tiny = 10.0/HUGE_VAL; - - for ( i=dim-1; i>=0; i-- ) - if ( b_ent[i] != 0.0 ) - break; - else - out_ent[i] = 0.0; - i_lim = i; - - for ( ; i>=0; i-- ) - { - sum = b_ent[i]; - mat_row = &(mat_ent[i][i+1]); - out_col = &(out_ent[i+1]); - sum -= __ip__(mat_row,out_col,i_lim-i); - /****************************************************** - for ( j=i+1; j<=i_lim; j++ ) - sum -= mat_ent[i][j]*out_ent[j]; - sum -= (*mat_row++)*(*out_col++); - ******************************************************/ - if ( diag==0.0 ) - { - if ( fabs(mat_ent[i][i]) <= tiny*fabs(sum) ) - error(E_SING,"Usolve"); - else - out_ent[i] = sum/mat_ent[i][i]; - } - else - out_ent[i] = sum/diag; - } - - return (out); -} - -/* Lsolve -- forward elimination with (optional) default diagonal value */ -VEC *Lsolve(matrix,b,out,diag) -MAT *matrix; -VEC *b,*out; -double diag; -{ - u_int dim, i, i_lim /* , j */; - Real **mat_ent, *mat_row, *b_ent, *out_ent, *out_col, sum, tiny; - - if ( matrix==(MAT *)NULL || b==(VEC *)NULL ) - error(E_NULL,"Lsolve"); - dim = min(matrix->m,matrix->n); - if ( b->dim < dim ) - error(E_SIZES,"Lsolve"); - if ( out==(VEC *)NULL || out->dim < dim ) - out = v_resize(out,matrix->n); - mat_ent = matrix->me; b_ent = b->ve; out_ent = out->ve; - - for ( i=0; im,U->n); - if ( b->dim < dim ) - error(E_SIZES,"UTsolve"); - out = v_resize(out,U->n); - U_me = U->me; b_ve = b->ve; out_ve = out->ve; - - tiny = 10.0/HUGE_VAL; - - for ( i=0; idim); - MEM_COPY(&(b_ve[i_lim]),&(out_ve[i_lim]),(dim-i_lim)*sizeof(Real)); - } - - if ( diag == 0.0 ) - { - for ( ; im,A->n); - if ( b->dim < dim ) - error(E_SIZES,"Dsolve"); - x = v_resize(x,A->n); - - tiny = 10.0/HUGE_VAL; - - dim = b->dim; - for ( i=0; ime[i][i]) <= tiny*fabs(b->ve[i]) ) - error(E_SING,"Dsolve"); - else - x->ve[i] = b->ve[i]/A->me[i][i]; - - return (x); -} - -/* LTsolve -- back substitution with optional over-riding diagonal - using the LOWER triangular part of matrix - -- can be in-situ but doesn't need to be */ -VEC *LTsolve(L,b,out,diag) -MAT *L; -VEC *b, *out; -double diag; -{ - u_int dim; - int i, i_lim; - Real **L_me, *b_ve, *out_ve, tmp, invdiag, tiny; - - if ( ! L || ! b ) - error(E_NULL,"LTsolve"); - dim = min(L->m,L->n); - if ( b->dim < dim ) - error(E_SIZES,"LTsolve"); - out = v_resize(out,L->n); - L_me = L->me; b_ve = b->ve; out_ve = out->ve; - - tiny = 10.0/HUGE_VAL; - - for ( i=dim-1; i>=0; i-- ) - if ( b_ve[i] != 0.0 ) - break; - i_lim = i; - - if ( b != out ) - { - __zero__(out_ve,out->dim); - MEM_COPY(b_ve,out_ve,(i_lim+1)*sizeof(Real)); - } - - if ( diag == 0.0 ) - { - for ( ; i>=0; i-- ) - { - tmp = L_me[i][i]; - if ( fabs(tmp) <= tiny*fabs(out_ve[i]) ) - error(E_SING,"LTsolve"); - out_ve[i] /= tmp; - __mltadd__(out_ve,L_me[i],-out_ve[i],i); - } - } - else - { - invdiag = 1.0/diag; - for ( ; i>=0; i-- ) - { - out_ve[i] *= invdiag; - __mltadd__(out_ve,L_me[i],-out_ve[i],i); - } - } - - return (out); -} diff --git a/src/mesch/sparse.c b/src/mesch/sparse.c deleted file mode 100755 index 7e6ccef19e..0000000000 --- a/src/mesch/sparse.c +++ /dev/null @@ -1,1035 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - Sparse matrix package - See also: sparse.h, matrix.h - */ - -#include -#include -#include -#include "sparse.h" - - -static char rcsid[] = "sparse.c,v 1.1 1997/12/04 17:55:48 hines Exp"; - -#define MINROWLEN 10 - - - -/* sp_get_val -- returns the (i,j) entry of the sparse matrix A */ -double sp_get_val(A,i,j) -SPMAT *A; -int i, j; -{ - SPROW *r; - int idx; - - if ( A == SMNULL ) - error(E_NULL,"sp_get_val"); - if ( i < 0 || i >= A->m || j < 0 || j >= A->n ) - error(E_SIZES,"sp_get_val"); - - r = A->row+i; - idx = sprow_idx(r,j); - if ( idx < 0 ) - return 0.0; - /* else */ - return r->elt[idx].val; -} - -/* sp_set_val -- sets the (i,j) entry of the sparse matrix A */ -double sp_set_val(A,i,j,val) -SPMAT *A; -int i, j; -double val; -{ - SPROW *r; - int idx, idx2, new_len; - - if ( A == SMNULL ) - error(E_NULL,"sp_set_val"); - if ( i < 0 || i >= A->m || j < 0 || j >= A->n ) - error(E_SIZES,"sp_set_val"); - - r = A->row+i; - idx = sprow_idx(r,j); - /* printf("sp_set_val: idx = %d\n",idx); */ - if ( idx >= 0 ) - { r->elt[idx].val = val; return val; } - /* else */ if ( idx < -1 ) - { - /* Note: this destroys the column & diag access paths */ - A->flag_col = A->flag_diag = FALSE; - /* shift & insert new value */ - idx = -(idx+2); /* this is the intended insertion index */ - if ( r->len >= r->maxlen ) - { - r->len = r->maxlen; - new_len = max(2*r->maxlen+1,5); - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->row[i].maxlen*sizeof(row_elt), - new_len*sizeof(row_elt)); - } - - r->elt = RENEW(r->elt,new_len,row_elt); - if ( ! r->elt ) /* can't allocate */ - error(E_MEM,"sp_set_val"); - r->maxlen = 2*r->maxlen+1; - } - for ( idx2 = r->len-1; idx2 >= idx; idx2-- ) - MEM_COPY((char *)(&(r->elt[idx2])), - (char *)(&(r->elt[idx2+1])),sizeof(row_elt)); - /************************************************************ - if ( idx < r->len ) - MEM_COPY((char *)(&(r->elt[idx])),(char *)(&(r->elt[idx+1])), - (r->len-idx)*sizeof(row_elt)); - ************************************************************/ - r->len++; - r->elt[idx].col = j; - return r->elt[idx].val = val; - } - /* else -- idx == -1, error in index/matrix! */ - return 0.0; -} - -/* sp_mv_mlt -- sparse matrix/dense vector multiply - -- result is in out, which is returned unless out==NULL on entry - -- if out==NULL on entry then the result vector is created */ -VEC *sp_mv_mlt(A,x,out) -SPMAT *A; -VEC *x, *out; -{ - int i, j_idx, m, n, max_idx; - Real sum, *x_ve; - SPROW *r; - row_elt *elts; - - if ( ! A || ! x ) - error(E_NULL,"sp_mv_mlt"); - if ( x->dim != A->n ) - error(E_SIZES,"sp_mv_mlt"); - if ( ! out || out->dim < A->m ) - out = v_resize(out,A->m); - if ( out == x ) - error(E_INSITU,"sp_mv_mlt"); - m = A->m; n = A->n; - x_ve = x->ve; - - for ( i = 0; i < m; i++ ) - { - sum = 0.0; - r = &(A->row[i]); - max_idx = r->len; - elts = r->elt; - for ( j_idx = 0; j_idx < max_idx; j_idx++, elts++ ) - sum += elts->val*x_ve[elts->col]; - out->ve[i] = sum; - } - return out; -} - -/* sp_vm_mlt -- sparse matrix/dense vector multiply from left - -- result is in out, which is returned unless out==NULL on entry - -- if out==NULL on entry then result vector is created & returned */ -VEC *sp_vm_mlt(A,x,out) -SPMAT *A; -VEC *x, *out; -{ - int i, j_idx, m, n, max_idx; - Real tmp, *x_ve, *out_ve; - SPROW *r; - row_elt *elts; - - if ( ! A || ! x ) - error(E_NULL,"sp_vm_mlt"); - if ( x->dim != A->m ) - error(E_SIZES,"sp_vm_mlt"); - if ( ! out || out->dim < A->n ) - out = v_resize(out,A->n); - if ( out == x ) - error(E_INSITU,"sp_vm_mlt"); - - m = A->m; n = A->n; - v_zero(out); - x_ve = x->ve; out_ve = out->ve; - - for ( i = 0; i < m; i++ ) - { - r = A->row+i; - max_idx = r->len; - elts = r->elt; - tmp = x_ve[i]; - for ( j_idx = 0; j_idx < max_idx; j_idx++, elts++ ) - out_ve[elts->col] += elts->val*tmp; - } - - return out; -} - - -/* sp_get -- get sparse matrix - -- len is number of elements available for each row without - allocating further memory */ -SPMAT *sp_get(m,n,maxlen) -int m, n, maxlen; -{ - SPMAT *A; - SPROW *rows; - int i; - - if ( m < 0 || n < 0 ) - error(E_NEG,"sp_get"); - - maxlen = max(maxlen,1); - - A = NEW(SPMAT); - if ( ! A ) /* can't allocate */ - error(E_MEM,"sp_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,sizeof(SPMAT)); - mem_numvar(TYPE_SPMAT,1); - } - /* fprintf(stderr,"Have SPMAT structure\n"); */ - - A->row = rows = NEW_A(m,SPROW); - if ( ! A->row ) /* can't allocate */ - error(E_MEM,"sp_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,m*sizeof(SPROW)); - } - /* fprintf(stderr,"Have row structure array\n"); */ - - A->start_row = NEW_A(n,int); - A->start_idx = NEW_A(n,int); - if ( ! A->start_row || ! A->start_idx ) /* can't allocate */ - error(E_MEM,"sp_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,2*n*sizeof(int)); - } - for ( i = 0; i < n; i++ ) - A->start_row[i] = A->start_idx[i] = -1; - /* fprintf(stderr,"Have start_row array\n"); */ - - A->m = A->max_m = m; - A->n = A->max_n = n; - - for ( i = 0; i < m; i++, rows++ ) - { - rows->elt = NEW_A(maxlen,row_elt); - if ( ! rows->elt ) - error(E_MEM,"sp_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,maxlen*sizeof(row_elt)); - } - /* fprintf(stderr,"Have row %d element array\n",i); */ - rows->len = 0; - rows->maxlen = maxlen; - rows->diag = -1; - } - - return A; -} - - -/* sp_free -- frees up the memory for a sparse matrix */ -int sp_free(A) -SPMAT *A; -{ - SPROW *r; - int i; - - if ( ! A ) - return -1; - if ( A->start_row != (int *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->max_n*sizeof(int),0); - } - free((char *)(A->start_row)); - } - if ( A->start_idx != (int *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->max_n*sizeof(int),0); - } - - free((char *)(A->start_idx)); - } - if ( ! A->row ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,sizeof(SPMAT),0); - mem_numvar(TYPE_SPMAT,-1); - } - - free((char *)A); - return 0; - } - for ( i = 0; i < A->m; i++ ) - { - r = &(A->row[i]); - if ( r->elt != (row_elt *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->row[i].maxlen*sizeof(row_elt),0); - } - free((char *)(r->elt)); - } - } - - if (mem_info_is_on()) { - if (A->row) - mem_bytes(TYPE_SPMAT,A->max_m*sizeof(SPROW),0); - mem_bytes(TYPE_SPMAT,sizeof(SPMAT),0); - mem_numvar(TYPE_SPMAT,-1); - } - - free((char *)(A->row)); - free((char *)A); - - return 0; -} - - -/* sp_copy -- constructs a copy of a given matrix - -- note that the max_len fields (etc) are no larger in the copy - than necessary - -- result is returned */ -SPMAT *sp_copy(A) -SPMAT *A; -{ - SPMAT *out; - SPROW *row1, *row2; - int i; - - if ( A == SMNULL ) - error(E_NULL,"sp_copy"); - if ( ! (out=NEW(SPMAT)) ) - error(E_MEM,"sp_copy"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,sizeof(SPMAT)); - mem_numvar(TYPE_SPMAT,1); - } - out->m = out->max_m = A->m; out->n = out->max_n = A->n; - - /* set up rows */ - if ( ! (out->row=NEW_A(A->m,SPROW)) ) - error(E_MEM,"sp_copy"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,A->m*sizeof(SPROW)); - } - for ( i = 0; i < A->m; i++ ) - { - row1 = &(A->row[i]); - row2 = &(out->row[i]); - if ( ! (row2->elt=NEW_A(max(row1->len,3),row_elt)) ) - error(E_MEM,"sp_copy"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,max(row1->len,3)*sizeof(row_elt)); - } - row2->len = row1->len; - row2->maxlen = max(row1->len,3); - row2->diag = row1->diag; - MEM_COPY((char *)(row1->elt),(char *)(row2->elt), - row1->len*sizeof(row_elt)); - } - - /* set up start arrays -- for column access */ - if ( ! (out->start_idx=NEW_A(A->n,int)) || - ! (out->start_row=NEW_A(A->n,int)) ) - error(E_MEM,"sp_copy"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,2*A->n*sizeof(int)); - } - MEM_COPY((char *)(A->start_idx),(char *)(out->start_idx), - A->n*sizeof(int)); - MEM_COPY((char *)(A->start_row),(char *)(out->start_row), - A->n*sizeof(int)); - - return out; -} - -/* sp_col_access -- set column access path; i.e. nxt_row, nxt_idx fields - -- returns A */ -SPMAT *sp_col_access(A) -SPMAT *A; -{ - int i, j, j_idx, len, m, n; - SPROW *row; - row_elt *r_elt; - int *start_row, *start_idx; - - if ( A == SMNULL ) - error(E_NULL,"sp_col_access"); - - m = A->m; n = A->n; - - /* initialise start_row and start_idx */ - start_row = A->start_row; start_idx = A->start_idx; - for ( j = 0; j < n; j++ ) - { *start_row++ = -1; *start_idx++ = -1; } - - start_row = A->start_row; start_idx = A->start_idx; - - /* now work UP the rows, setting nxt_row, nxt_idx fields */ - for ( i = m-1; i >= 0; i-- ) - { - row = &(A->row[i]); - r_elt = row->elt; - len = row->len; - for ( j_idx = 0; j_idx < len; j_idx++, r_elt++ ) - { - j = r_elt->col; - r_elt->nxt_row = start_row[j]; - r_elt->nxt_idx = start_idx[j]; - start_row[j] = i; - start_idx[j] = j_idx; - } - } - - A->flag_col = TRUE; - return A; -} - -/* sp_diag_access -- set diagonal access path(s) */ -SPMAT *sp_diag_access(A) -SPMAT *A; -{ - int i, m; - SPROW *row; - - if ( A == SMNULL ) - error(E_NULL,"sp_diag_access"); - - m = A->m; - - row = A->row; - for ( i = 0; i < m; i++, row++ ) - row->diag = sprow_idx(row,i); - - A->flag_diag = TRUE; - - return A; -} - -/* sp_m2dense -- convert a sparse matrix to a dense one */ -MAT *sp_m2dense(A,out) -SPMAT *A; -MAT *out; -{ - int i, j_idx; - SPROW *row; - row_elt *elt; - - if ( ! A ) - error(E_NULL,"sp_m2dense"); - if ( ! out || out->m < A->m || out->n < A->n ) - out = m_get(A->m,A->n); - - m_zero(out); - for ( i = 0; i < A->m; i++ ) - { - row = &(A->row[i]); - elt = row->elt; - for ( j_idx = 0; j_idx < row->len; j_idx++, elt++ ) - out->me[i][elt->col] = elt->val; - } - - return out; -} - - -/* C = A+B, can be in situ */ -SPMAT *sp_add(A,B,C) -SPMAT *A, *B, *C; -{ - int i, in_situ; - SPROW *rc; - static SPROW *tmp; - - if ( ! A || ! B ) - error(E_NULL,"sp_add"); - if ( A->m != B->m || A->n != B->n ) - error(E_SIZES,"sp_add"); - if (C == A || C == B) - in_situ = TRUE; - else in_situ = FALSE; - - if ( ! C ) - C = sp_get(A->m,A->n,5); - else { - if ( C->m != A->m || C->n != A->n ) - error(E_SIZES,"sp_add"); - if (!in_situ) sp_zero(C); - } - - if (tmp == (SPROW *)NULL && in_situ) { - tmp = sprow_get(MINROWLEN); - MEM_STAT_REG(tmp,TYPE_SPROW); - } - - if (in_situ) - for (i=0; i < A->m; i++) { - rc = &(C->row[i]); - sprow_add(&(A->row[i]),&(B->row[i]),0,tmp,TYPE_SPROW); - sprow_resize(rc,tmp->len,TYPE_SPMAT); - MEM_COPY(tmp->elt,rc->elt,tmp->len*sizeof(row_elt)); - rc->len = tmp->len; - } - else - for (i=0; i < A->m; i++) { - sprow_add(&(A->row[i]),&(B->row[i]),0,&(C->row[i]),TYPE_SPMAT); - } - - C->flag_col = C->flag_diag = FALSE; - - return C; -} - -/* C = A-B, cannot be in situ */ -SPMAT *sp_sub(A,B,C) -SPMAT *A, *B, *C; -{ - int i, in_situ; - SPROW *rc; - static SPROW *tmp; - - if ( ! A || ! B ) - error(E_NULL,"sp_sub"); - if ( A->m != B->m || A->n != B->n ) - error(E_SIZES,"sp_sub"); - if (C == A || C == B) - in_situ = TRUE; - else in_situ = FALSE; - - if ( ! C ) - C = sp_get(A->m,A->n,5); - else { - if ( C->m != A->m || C->n != A->n ) - error(E_SIZES,"sp_sub"); - if (!in_situ) sp_zero(C); - } - - if (tmp == (SPROW *)NULL && in_situ) { - tmp = sprow_get(MINROWLEN); - MEM_STAT_REG(tmp,TYPE_SPROW); - } - - if (in_situ) - for (i=0; i < A->m; i++) { - rc = &(C->row[i]); - sprow_sub(&(A->row[i]),&(B->row[i]),0,tmp,TYPE_SPROW); - sprow_resize(rc,tmp->len,TYPE_SPMAT); - MEM_COPY(tmp->elt,rc->elt,tmp->len*sizeof(row_elt)); - rc->len = tmp->len; - } - else - for (i=0; i < A->m; i++) { - sprow_sub(&(A->row[i]),&(B->row[i]),0,&(C->row[i]),TYPE_SPMAT); - } - - C->flag_col = C->flag_diag = FALSE; - - return C; -} - -/* C = A+alpha*B, cannot be in situ */ -SPMAT *sp_mltadd(A,B,alpha,C) -SPMAT *A, *B, *C; -double alpha; -{ - int i, in_situ; - SPROW *rc; - static SPROW *tmp; - - if ( ! A || ! B ) - error(E_NULL,"sp_mltadd"); - if ( A->m != B->m || A->n != B->n ) - error(E_SIZES,"sp_mltadd"); - if (C == A || C == B) - in_situ = TRUE; - else in_situ = FALSE; - - if ( ! C ) - C = sp_get(A->m,A->n,5); - else { - if ( C->m != A->m || C->n != A->n ) - error(E_SIZES,"sp_mltadd"); - if (!in_situ) sp_zero(C); - } - - if (tmp == (SPROW *)NULL && in_situ) { - tmp = sprow_get(MINROWLEN); - MEM_STAT_REG(tmp,TYPE_SPROW); - } - - if (in_situ) - for (i=0; i < A->m; i++) { - rc = &(C->row[i]); - sprow_mltadd(&(A->row[i]),&(B->row[i]),alpha,0,tmp,TYPE_SPROW); - sprow_resize(rc,tmp->len,TYPE_SPMAT); - MEM_COPY(tmp->elt,rc->elt,tmp->len*sizeof(row_elt)); - rc->len = tmp->len; - } - else - for (i=0; i < A->m; i++) { - sprow_mltadd(&(A->row[i]),&(B->row[i]),alpha,0, - &(C->row[i]),TYPE_SPMAT); - } - - C->flag_col = C->flag_diag = FALSE; - - return C; -} - - - -/* B = alpha*A, can be in situ */ -SPMAT *sp_smlt(A,alpha,B) -SPMAT *A, *B; -double alpha; -{ - int i; - - if ( ! A ) - error(E_NULL,"sp_smlt"); - if ( ! B ) - B = sp_get(A->m,A->n,5); - else - if ( A->m != B->m || A->n != B->n ) - error(E_SIZES,"sp_smlt"); - - for (i=0; i < A->m; i++) { - sprow_smlt(&(A->row[i]),alpha,0,&(B->row[i]),TYPE_SPMAT); - } - return B; -} - - - -/* sp_zero -- zero all the (represented) elements of a sparse matrix */ -SPMAT *sp_zero(A) -SPMAT *A; -{ - int i, idx, len; - row_elt *elt; - - if ( ! A ) - error(E_NULL,"sp_zero"); - - for ( i = 0; i < A->m; i++ ) - { - elt = A->row[i].elt; - len = A->row[i].len; - for ( idx = 0; idx < len; idx++ ) - (*elt++).val = 0.0; - } - - return A; -} - -/* sp_copy2 -- copy sparse matrix (type 2) - -- keeps structure of the OUT matrix */ -SPMAT *sp_copy2(A,OUT) -SPMAT *A, *OUT; -{ - int i /* , idx, len1, len2 */; - SPROW *r1, *r2; - static SPROW *scratch = (SPROW *)NULL; - /* row_elt *e1, *e2; */ - - if ( ! A ) - error(E_NULL,"sp_copy2"); - if ( ! OUT ) - OUT = sp_get(A->m,A->n,10); - if ( ! scratch ) { - scratch = sprow_xpd(scratch,MINROWLEN,TYPE_SPROW); - MEM_STAT_REG(scratch,TYPE_SPROW); - } - - if ( OUT->m < A->m ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->max_m*sizeof(SPROW), - A->m*sizeof(SPROW)); - } - - OUT->row = RENEW(OUT->row,A->m,SPROW); - if ( ! OUT->row ) - error(E_MEM,"sp_copy2"); - - for ( i = OUT->m; i < A->m; i++ ) - { - OUT->row[i].elt = NEW_A(MINROWLEN,row_elt); - if ( ! OUT->row[i].elt ) - error(E_MEM,"sp_copy2"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,MINROWLEN*sizeof(row_elt)); - } - - OUT->row[i].maxlen = MINROWLEN; - OUT->row[i].len = 0; - } - OUT->m = A->m; - } - - OUT->flag_col = OUT->flag_diag = FALSE; - /* sp_zero(OUT); */ - - for ( i = 0; i < A->m; i++ ) - { - r1 = &(A->row[i]); r2 = &(OUT->row[i]); - sprow_copy(r1,r2,scratch,TYPE_SPROW); - if ( r2->maxlen < scratch->len ) - sprow_xpd(r2,scratch->len,TYPE_SPMAT); - MEM_COPY((char *)(scratch->elt),(char *)(r2->elt), - scratch->len*sizeof(row_elt)); - r2->len = scratch->len; - /******************************************************* - e1 = r1->elt; e2 = r2->elt; - len1 = r1->len; len2 = r2->len; - for ( idx = 0; idx < len2; idx++, e2++ ) - e2->val = 0.0; - for ( idx = 0; idx < len1; idx++, e1++ ) - sprow_set_val(r2,e1->col,e1->val); - *******************************************************/ - } - - sp_col_access(OUT); - return OUT; -} - -/* sp_resize -- resize a sparse matrix - -- don't destroying any contents if possible - -- returns resized matrix */ -SPMAT *sp_resize(A,m,n) -SPMAT *A; -int m, n; -{ - int i, len; - SPROW *r; - - if (m < 0 || n < 0) - error(E_NEG,"sp_resize"); - - if ( ! A ) - return sp_get(m,n,10); - - if (m == A->m && n == A->n) - return A; - - if ( m <= A->max_m ) - { - for ( i = A->m; i < m; i++ ) - A->row[i].len = 0; - A->m = m; - } - else - { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,A->max_m*sizeof(SPROW), - m*sizeof(SPROW)); - } - - A->row = RENEW(A->row,(unsigned)m,SPROW); - if ( ! A->row ) - error(E_MEM,"sp_resize"); - for ( i = A->m; i < m; i++ ) - { - if ( ! (A->row[i].elt = NEW_A(MINROWLEN,row_elt)) ) - error(E_MEM,"sp_resize"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT,0,MINROWLEN*sizeof(row_elt)); - } - A->row[i].len = 0; A->row[i].maxlen = MINROWLEN; - } - A->m = A->max_m = m; - } - - /* update number of rows */ - A->n = n; - - /* do we need to increase the size of start_idx[] and start_row[] ? */ - if ( n > A->max_n ) - { /* only have to update the start_idx & start_row arrays */ - if (mem_info_is_on()) - { - mem_bytes(TYPE_SPMAT,2*A->max_n*sizeof(int), - 2*n*sizeof(int)); - } - - A->start_row = RENEW(A->start_row,(unsigned)n,int); - A->start_idx = RENEW(A->start_idx,(unsigned)n,int); - if ( ! A->start_row || ! A->start_idx ) - error(E_MEM,"sp_resize"); - A->max_n = n; /* ...and update max_n */ - - return A; - } - - if ( n <= A->n ) - /* make sure that all rows are truncated just before column n */ - for ( i = 0; i < A->m; i++ ) - { - r = &(A->row[i]); - len = sprow_idx(r,n); - if ( len < 0 ) - len = -(len+2); - if ( len < 0 ) - error(E_MEM,"sp_resize"); - r->len = len; - } - - return A; -} - - -/* sp_compact -- removes zeros and near-zeros from a sparse matrix */ -SPMAT *sp_compact(A,tol) -SPMAT *A; -double tol; -{ - int i, idx1, idx2; - SPROW *r; - row_elt *elt1, *elt2; - - if ( ! A ) - error(E_NULL,"sp_compact"); - if ( tol < 0.0 ) - error(E_RANGE,"sp_compact"); - - A->flag_col = A->flag_diag = FALSE; - - for ( i = 0; i < A->m; i++ ) - { - r = &(A->row[i]); - elt1 = elt2 = r->elt; - idx1 = idx2 = 0; - while ( idx1 < r->len ) - { - /* printf("# sp_compact: idx1 = %d, idx2 = %d\n",idx1,idx2); */ - if ( fabs(elt1->val) <= tol ) - { idx1++; elt1++; continue; } - if ( elt1 != elt2 ) - MEM_COPY(elt1,elt2,sizeof(row_elt)); - idx1++; elt1++; - idx2++; elt2++; - } - r->len = idx2; - } - - return A; -} - -/* varying number of arguments */ - -#ifdef ANSI_C - -/* To allocate memory to many arguments. - The function should be called: - sp_get_vars(m,n,deg,&x,&y,&z,...,NULL); - where - int m,n,deg; - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - m x n is the dimension of matrices x,y,z,... - returned value is equal to the number of allocated variables -*/ - -int sp_get_vars(int m,int n,int deg,...) -{ - va_list ap; - int i=0; - SPMAT **par; - - va_start(ap, deg); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - *par = sp_get(m,n,deg); - i++; - } - - va_end(ap); - return i; -} - - -/* To resize memory for many arguments. - The function should be called: - sp_resize_vars(m,n,&x,&y,&z,...,NULL); - where - int m,n; - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - m X n is the resized dimension of matrices x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. -*/ - -int sp_resize_vars(int m,int n,...) -{ - va_list ap; - int i=0; - SPMAT **par; - - va_start(ap, n); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - *par = sp_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - -/* To deallocate memory for many arguments. - The function should be called: - sp_free_vars(&x,&y,&z,...,NULL); - where - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - There must be at least one not NULL argument. - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. -*/ - -int sp_free_vars(SPMAT **va,...) -{ - va_list ap; - int i=1; - SPMAT **par; - - sp_free(*va); - *va = (SPMAT *) NULL; - va_start(ap, va); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - sp_free(*par); - *par = (SPMAT *)NULL; - i++; - } - - va_end(ap); - return i; -} - - -#elif VARARGS - -/* To allocate memory to many arguments. - The function should be called: - sp_get_vars(m,n,deg,&x,&y,&z,...,NULL); - where - int m,n,deg; - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - m x n is the dimension of matrices x,y,z,... - returned value is equal to the number of allocated variables -*/ - -int sp_get_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, m, n, deg; - SPMAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - deg = va_arg(ap,int); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - *par = sp_get(m,n,deg); - i++; - } - - va_end(ap); - return i; -} - - -/* To resize memory for many arguments. - The function should be called: - sp_resize_vars(m,n,&x,&y,&z,...,NULL); - where - int m,n; - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - m X n is the resized dimension of matrices x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. -*/ - -int sp_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, m, n; - SPMAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - *par = sp_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - - - -/* To deallocate memory for many arguments. - The function should be called: - sp_free_vars(&x,&y,&z,...,NULL); - where - SPMAT *x, *y, *z,...; - The last argument should be NULL ! - There must be at least one not NULL argument. - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. -*/ - -int sp_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - SPMAT **par; - - va_start(ap); - while ((par = va_arg(ap,SPMAT **))) { /* NULL ends the list*/ - sp_free(*par); - *par = (SPMAT *)NULL; - i++; - } - - va_end(ap); - return i; -} - - - -#endif - diff --git a/src/mesch/sparse.h b/src/mesch/sparse.h deleted file mode 100755 index 27df587936..0000000000 --- a/src/mesch/sparse.h +++ /dev/null @@ -1,220 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Header for sparse matrix stuff. - Basic sparse routines to be held in sparse.c -*/ - -/* RCS id: sparse.h,v 1.1 1997/11/03 16:15:55 hines Exp */ - -#ifndef SPARSEH - -#define SPARSEH - - -#include "matrix.h" - - -/* basic sparse types */ - -typedef struct row_elt { - int col, nxt_row, nxt_idx; - Real val; - } row_elt; - -typedef struct SPROW { - int len, maxlen, diag; - row_elt *elt; /* elt[maxlen] */ - } SPROW; - -typedef struct SPMAT { - int m, n, max_m, max_n; - char flag_col, flag_diag; - SPROW *row; /* row[max_m] */ - int *start_row; /* start_row[max_n] */ - int *start_idx; /* start_idx[max_n] */ - } SPMAT; - -/* Note that the first allocated entry in column j is start_row[j]; - This starts the chain down the columns using the nxt_row and nxt_idx - fields of each entry in each row. */ - -/* hines: change pair to mesch_pair so no conflict with c++ standard */ -typedef struct mesch_pair { int pos; Real val; } mesch_pair; - -typedef struct SPVEC { - int dim, max_dim; - mesch_pair *elt; /* elt[max_dim] */ - } SPVEC; - -#define SMNULL ((SPMAT*)NULL) -#define SVNULL ((SPVEC*)NULL) - -/* Macro for speedup */ -#define sprow_idx2(r,c,hint) \ - ( ( (hint) >= 0 && (hint) < (r)->len && \ - (r)->elt[hint].col == (c)) ? (hint) : sprow_idx((r),(c)) ) - - - -/* memory functions */ - -#ifdef ANSI_C -int sp_get_vars(int m,int n,int deg,...); -int sp_resize_vars(int m,int n,...); -int sp_free_vars(SPMAT **,...); -#elif VARARGS -int sp_get_vars(); -int sp_resize_vars(); -int sp_free_vars(); - -#endif - -/* Sparse Matrix Operations and Utilities */ -#ifndef ANSI_C -extern SPMAT *sp_get(), *sp_copy(), *sp_copy2(), - *sp_zero(), *sp_resize(), *sp_compact(); -extern double sp_get_val(), sp_set_val(); -extern VEC *sp_mv_mlt(), *sp_vm_mlt(); -extern int sp_free(); - -/* Access path operations */ -extern SPMAT *sp_col_access(); -extern SPMAT *sp_diag_access(); -extern int chk_col_access(); - -/* Input/output operations */ -extern SPMAT *sp_finput(); -extern void sp_foutput(), sp_foutput2(); - -/* algebraic operations */ -extern SPMAT *sp_smlt(), *sp_add(), *sp_sub(), *sp_mltadd(); - - -/* sparse row operations */ -extern SPROW *sprow_get(), *sprow_xpd(), *sprow_merge(), *sprow_mltadd(), - *sprow_resize(), *sprow_copy(); -extern SPROW *sprow_add(), *sprow_sub(), *sprow_smlt(); -extern double sprow_set_val(); -extern void sprow_foutput(); -extern int sprow_idx(), sprow_free(); - -/* dump */ -extern void sp_dump(), sprow_dump(); -extern MAT *sp_m2dense(); - -#else -SPMAT *sp_get(int,int,int), *sp_copy(SPMAT *), - *sp_copy2(SPMAT *,SPMAT *), - *sp_zero(SPMAT *), *sp_resize(SPMAT *,int,int), - *sp_compact(SPMAT *,double); -double sp_get_val(SPMAT *,int,int), sp_set_val(SPMAT *,int,int,double); -VEC *sp_mv_mlt(SPMAT *,VEC *,VEC *), *sp_vm_mlt(SPMAT *,VEC *,VEC *); -int sp_free(SPMAT *); - -/* Access path operations */ -SPMAT *sp_col_access(SPMAT *); -SPMAT *sp_diag_access(SPMAT *); -int chk_col_access(SPMAT *); - -/* Input/output operations */ -SPMAT *sp_finput(FILE *); -void sp_foutput(FILE *,SPMAT *), sp_foutput2(FILE *,SPMAT *); - -/* algebraic operations */ -SPMAT *sp_smlt(SPMAT *A,double alpha,SPMAT *B), - *sp_add(SPMAT *A,SPMAT *B,SPMAT *C), - *sp_sub(SPMAT *A,SPMAT *B,SPMAT *C), - *sp_mltadd(SPMAT *A,SPMAT *B,double alpha,SPMAT *C); - -/* sparse row operations */ -SPROW *sprow_get(int), *sprow_xpd(SPROW *r,int n,int type), - *sprow_resize(SPROW *r,int n,int type), - *sprow_merge(SPROW *,SPROW *,SPROW *,int type), - *sprow_copy(SPROW *,SPROW *,SPROW *,int type), - *sprow_mltadd(SPROW *,SPROW *,double,int,SPROW *,int type); -SPROW *sprow_add(SPROW *r1,SPROW *r2, int j0,SPROW *r_out, int type), - *sprow_sub(SPROW *r1,SPROW *r2, int j0,SPROW *r_out, int type), - *sprow_smlt(SPROW *r1,double alpha, int j0,SPROW *r_out, int type); -double sprow_set_val(SPROW *,int,double); -int sprow_free(SPROW *); -int sprow_idx(SPROW *,int); -void sprow_foutput(FILE *,SPROW *); - -/* dump */ -void sp_dump(FILE *fp, SPMAT *A); -void sprow_dump(FILE *fp, SPROW *r); -MAT *sp_m2dense(SPMAT *A,MAT *out); - -#endif - -/* MACROS */ - -#define sp_input() sp_finput(stdin) -#define sp_output(A) sp_foutput(stdout,(A)) -#define sp_output2(A) sp_foutput2(stdout,(A)) -#define row_mltadd(r1,r2,alpha,out) sprow_mltadd(r1,r2,alpha,0,out) -#define out_row(r) sprow_foutput(stdout,(r)) - -#define SP_FREE(A) ( sp_free((A)), (A)=(SPMAT *)NULL) - -/* utility for index computations -- ensures index returned >= 0 */ -#define fixindex(idx) ((idx) == -1 ? (error(E_BOUNDS,"fixindex"),0) : \ - (idx) < 0 ? -((idx)+2) : (idx)) - - -/* NOT USED */ - -/* loop over the columns in a row */ -/* -#define loop_cols(r,e,code) \ - do { int _r_idx; row_elt *e; SPROW *_t_row; \ - _t_row = (r); e = &(_t_row->elt); \ - for ( _r_idx = 0; _r_idx < _t_row->len; _r_idx++, e++ ) \ - { code; } } while ( 0 ) -*/ -/* loop over the rows in a column */ -/* -#define loop_cols(A,col,e,code) \ - do { int _r_num, _r_idx, _c; SPROW *_r; row_elt *e; \ - if ( ! (A)->flag_col ) sp_col_access((A)); \ - col_num = (col); \ - if ( col_num < 0 || col_num >= A->n ) \ - error(E_BOUNDS,"loop_cols"); \ - _r_num = (A)->start_row[_c]; _r_idx = (A)->start_idx[_c]; \ - while ( _r_num >= 0 ) { \ - _r = &((A)->row[_r_num]); \ - _r_idx = sprow_idx2(_r,_c,_r_idx); \ - if ( _r_idx < 0 ) continue; \ - e = &(_r->elt[_r_idx]); code; \ - _r_num = e->nxt_row; _r_idx = e->nxt_idx; \ - } } while ( 0 ) - -*/ - -#endif - diff --git a/src/mesch/sparse2.h b/src/mesch/sparse2.h deleted file mode 100755 index 4fa2114703..0000000000 --- a/src/mesch/sparse2.h +++ /dev/null @@ -1,95 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* Sparse matrix factorise/solve header */ -/* RCS id: sparse2.h,v 1.1 1997/11/03 16:15:56 hines Exp */ - - - -#ifndef SPARSE2H - -#define SPARSE2H - -#include "sparse.h" - - -#ifdef ANSI_C -SPMAT *spCHfactor(SPMAT *), *spICHfactor(SPMAT *), *spCHsymb(SPMAT *); -VEC *spCHsolve(SPMAT *,VEC *,VEC *); - -SPMAT *spLUfactor(SPMAT *,PERM *,double); -SPMAT *spILUfactor(SPMAT *,double); -VEC *spLUsolve(SPMAT *,PERM *,VEC *,VEC *), - *spLUTsolve(SPMAT *,PERM *,VEC *,VEC *); - -SPMAT *spBKPfactor(SPMAT *, PERM *, PERM *, double); -VEC *spBKPsolve(SPMAT *, PERM *, PERM *, VEC *, VEC *); - -VEC *pccg(VEC *(*A)(),void *A_par,VEC *(*M_inv)(),void *M_par,VEC *b, - double tol,VEC *x); -VEC *sp_pccg(SPMAT *,SPMAT *,VEC *,double,VEC *); -VEC *cgs(VEC *(*A)(),void *A_par,VEC *b,VEC *r0,double tol,VEC *x); -VEC *sp_cgs(SPMAT *,VEC *,VEC *,double,VEC *); -VEC *lsqr(VEC *(*A)(),VEC *(*AT)(),void *A_par,VEC *b,double tol,VEC *x); -VEC *sp_lsqr(SPMAT *,VEC *,double,VEC *); -int cg_set_maxiter(int); - -void lanczos(VEC *(*A)(),void *A_par,int m,VEC *x0,VEC *a,VEC *b, - Real *beta_m1,MAT *Q); -void sp_lanczos(SPMAT *,int,VEC *,VEC *,VEC *,Real *,MAT *); -VEC *lanczos2(VEC *(*A)(),void *A_par,int m,VEC *x0,VEC *evals, - VEC *err_est); -VEC *sp_lanczos2(SPMAT *,int,VEC *,VEC *,VEC *); -extern void scan_to(SPMAT *,IVEC *,IVEC *,IVEC *,int); -extern row_elt *chase_col(SPMAT *,int,int *,int *,int); -extern row_elt *chase_past(SPMAT *,int,int *,int *,int); -extern row_elt *bump_col(SPMAT *,int,int *,int *); - -#else -extern SPMAT *spCHfactor(), *spICHfactor(), *spCHsymb(); -extern VEC *spCHsolve(); - -extern SPMAT *spLUfactor(); -extern SPMAT *spILUfactor(); -extern VEC *spLUsolve(), *spLUTsolve(); - -extern SPMAT *spBKPfactor(); -extern VEC *spBKPsolve(); - -extern VEC *pccg(), *sp_pccg(), *cgs(), *sp_cgs(), *lsqr(), *sp_lsqr(); -extern int cg_set_maxiter(); - -void lanczos(), sp_lanczos(); -VEC *lanczos2(), *sp_lanczos2(); -extern void scan_to(); -extern row_elt *chase_col(); -extern row_elt *chase_past(); -extern row_elt *bump_col(); - -#endif - - -#endif diff --git a/src/mesch/sparseio.c b/src/mesch/sparseio.c deleted file mode 100755 index a3dcf63e1a..0000000000 --- a/src/mesch/sparseio.c +++ /dev/null @@ -1,318 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file has the routines for sparse matrix input/output - It works in conjunction with sparse.c, sparse.h etc -*/ - -#include -#include "sparse.h" - -static char rcsid[] = "sparseio.c,v 1.1 1997/12/04 17:55:49 hines Exp"; - - - -/* local variables */ -static char line[MAXLINE]; - -/* sp_foutput -- output sparse matrix A to file/stream fp */ -void sp_foutput(fp,A) -FILE *fp; -SPMAT *A; -{ - int i, j_idx, m /* , n */; - SPROW *rows; - row_elt *elts; - - fprintf(fp,"SparseMatrix: "); - if ( A == SMNULL ) - { - fprintf(fp,"*** NULL ***\n"); - error(E_NULL,"sp_foutput"); return; - } - fprintf(fp,"%d by %d\n",A->m,A->n); - m = A->m; /* n = A->n; */ - if ( ! (rows=A->row) ) - { - fprintf(fp,"*** NULL rows ***\n"); - error(E_NULL,"sp_foutput"); return; - } - - for ( i = 0; i < m; i++ ) - { - fprintf(fp,"row %d: ",i); - if ( ! (elts=rows[i].elt) ) - { - fprintf(fp,"*** NULL element list ***\n"); - continue; - } - for ( j_idx = 0; j_idx < rows[i].len; j_idx++ ) - { - fprintf(fp,"%d:%-20.15g ",elts[j_idx].col, - elts[j_idx].val); - if ( j_idx % 3 == 2 && j_idx != rows[i].len-1 ) - fprintf(fp,"\n "); - } - fprintf(fp,"\n"); - } - fprintf(fp,"#\n"); /* to stop looking beyond for next entry */ -} - -/* sp_foutput2 -- print out sparse matrix **as a dense matrix** - -- see output format used in matrix.h etc */ -/****************************************************************** -void sp_foutput2(fp,A) -FILE *fp; -SPMAT *A; -{ - int cnt, i, j, j_idx; - SPROW *r; - row_elt *elt; - - if ( A == SMNULL ) - { - fprintf(fp,"Matrix: *** NULL ***\n"); - return; - } - fprintf(fp,"Matrix: %d by %d\n",A->m,A->n); - for ( i = 0; i < A->m; i++ ) - { - fprintf(fp,"row %d:",i); - r = &(A->row[i]); - elt = r->elt; - cnt = j = j_idx = 0; - while ( j_idx < r->len || j < A->n ) - { - if ( j_idx >= r->len ) - fprintf(fp,"%14.9g ",0.0); - else if ( j < elt[j_idx].col ) - fprintf(fp,"%14.9g ",0.0); - else - fprintf(fp,"%14.9g ",elt[j_idx++].val); - if ( cnt++ % 4 == 3 ) - fprintf(fp,"\n"); - j++; - } - fprintf(fp,"\n"); - } -} -******************************************************************/ - -/* sp_dump -- prints ALL relevant information about the sparse matrix A */ -void sp_dump(fp,A) -FILE *fp; -SPMAT *A; -{ - int i, j, j_idx; - SPROW *rows; - row_elt *elts; - - fprintf(fp,"SparseMatrix dump:\n"); - if ( ! A ) - { fprintf(fp,"*** NULL ***\n"); return; } - fprintf(fp,"Matrix at 0x%p\n",A); - fprintf(fp,"Dimensions: %d by %d\n",A->m,A->n); - fprintf(fp,"MaxDimensions: %d by %d\n",A->max_m,A->max_n); - fprintf(fp,"flag_col = %d, flag_diag = %d\n",A->flag_col,A->flag_diag); - fprintf(fp,"start_row @ 0x%p:\n",(A->start_row)); - for ( j = 0; j < A->n; j++ ) - { - fprintf(fp,"%d ",A->start_row[j]); - if ( j % 10 == 9 ) - fprintf(fp,"\n"); - } - fprintf(fp,"\n"); - fprintf(fp,"start_idx @ 0x%p:\n",(A->start_idx)); - for ( j = 0; j < A->n; j++ ) - { - fprintf(fp,"%d ",A->start_idx[j]); - if ( j % 10 == 9 ) - fprintf(fp,"\n"); - } - fprintf(fp,"\n"); - fprintf(fp,"Rows @ 0x%p:\n", (A->row)); - if ( ! A->row ) - { fprintf(fp,"*** NULL row ***\n"); return; } - rows = A->row; - for ( i = 0; i < A->m; i++ ) - { - fprintf(fp,"row %d: len = %d, maxlen = %d, diag idx = %d\n", - i,rows[i].len,rows[i].maxlen,rows[i].diag); - fprintf(fp,"element list @ 0x%p\n",(rows[i].elt)); - if ( ! rows[i].elt ) - { - fprintf(fp,"*** NULL element list ***\n"); - continue; - } - elts = rows[i].elt; - for ( j_idx = 0; j_idx < rows[i].len; j_idx++, elts++ ) - fprintf(fp,"Col: %d, Val: %g, nxt_row = %d, nxt_idx = %d\n", - elts->col,elts->val,elts->nxt_row,elts->nxt_idx); - fprintf(fp,"\n"); - } -} - -#define MAXSCRATCH 100 - -/* sp_finput -- input sparse matrix from stream/file fp - -- uses friendly input routine if fp is a tty - -- uses format identical to output format otherwise */ -SPMAT *sp_finput(fp) -FILE *fp; -{ - int i, len, ret_val; - int col, curr_col, m, n, tmp, tty; - Real val; - SPMAT *A; - SPROW *rows; - - row_elt scratch[MAXSCRATCH]; - /* cannot handle >= MAXSCRATCH elements in a row */ - - for ( i = 0; i < MAXSCRATCH; i++ ) - scratch[i].nxt_row = scratch[i].nxt_idx = -1; - - tty = isatty(fileno(fp)); - - if ( tty ) - { - fprintf(stderr,"SparseMatrix: "); - do { - fprintf(stderr,"input rows cols: "); - if ( ! fgets(line,MAXLINE,fp) ) - error(E_INPUT,"sp_finput"); - } while ( sscanf(line,"%u %u",&m,&n) != 2 ); - A = sp_get(m,n,5); - rows = A->row; - - for ( i = 0; i < m; i++ ) - { - fprintf(stderr,"Row %d:\n",i); - fprintf(stderr,"Enter or 'e' to end row\n"); - curr_col = -1; - for ( len = 0; len < MAXSCRATCH; len++ ) - { - do { - fprintf(stderr,"Entry %d: ",len); - if ( ! fgets(line,MAXLINE,fp) ) - error(E_INPUT,"sp_finput"); - if ( *line == 'e' || *line == 'E' ) - break; -#if REAL == DOUBLE - } while ( sscanf(line,"%u %lf",&col,&val) != 2 || -#elif REAL == FLOAT - } while ( sscanf(line,"%u %f",&col,&val) != 2 || -#endif - col >= n || col <= curr_col ); - - if ( *line == 'e' || *line == 'E' ) - break; - - scratch[len].col = col; - scratch[len].val = val; - curr_col = col; - } - - /* Note: len = # elements in row */ - if ( len > 5 ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPMAT, - A->row[i].maxlen*sizeof(row_elt), - len*sizeof(row_elt)); - } - - rows[i].elt = (row_elt *)realloc((char *)rows[i].elt, - len*sizeof(row_elt)); - rows[i].maxlen = len; - } - MEM_COPY(scratch,rows[i].elt,len*sizeof(row_elt)); - rows[i].len = len; - rows[i].diag = sprow_idx(&(rows[i]),i); - } - } - else /* not tty */ - { - ret_val = 0; - skipjunk(fp); - if (fscanf(fp,"SparseMatrix:") == EOF) { - error(E_INPUT, "sp_finput"); - } - skipjunk(fp); - if ( (ret_val=fscanf(fp,"%u by %u",&m,&n)) != 2 ) - error((ret_val == EOF) ? E_EOF : E_FORMAT,"sp_finput"); - A = sp_get(m,n,5); - - /* initialise start_row */ - for ( i = 0; i < A->n; i++ ) - A->start_row[i] = -1; - - rows = A->row; - for ( i = 0; i < m; i++ ) - { - /* printf("Reading row # %d\n",i); */ - rows[i].diag = -1; - skipjunk(fp); - if ( (ret_val=fscanf(fp,"row %d :",&tmp)) != 1 || - tmp != i ) - error((ret_val == EOF) ? E_EOF : E_FORMAT, - "sp_finput"); - curr_col = -1; - for ( len = 0; len < MAXSCRATCH; len++ ) - { -#if REAL == DOUBLE - if ( (ret_val=fscanf(fp,"%u : %lf",&col,&val)) != 2 ) -#elif REAL == FLOAT - if ( (ret_val=fscanf(fp,"%u : %f",&col,&val)) != 2 ) -#endif - break; - if ( col <= curr_col || col >= n ) - error(E_FORMAT,"sp_finput"); - scratch[len].col = col; - scratch[len].val = val; - } - if ( ret_val == EOF ) - error(E_EOF,"sp_finput"); - - if ( len > rows[i].maxlen ) - { - rows[i].elt = (row_elt *)realloc((char *)rows[i].elt, - len*sizeof(row_elt)); - rows[i].maxlen = len; - } - MEM_COPY(scratch,rows[i].elt,len*sizeof(row_elt)); - rows[i].len = len; - /* printf("Have read row # %d\n",i); */ - rows[i].diag = sprow_idx(&(rows[i]),i); - /* printf("Have set diag index for row # %d\n",i); */ - } - } - - return A; -} - diff --git a/src/mesch/spbkp.c b/src/mesch/spbkp.c deleted file mode 100755 index 0cd7618d46..0000000000 --- a/src/mesch/spbkp.c +++ /dev/null @@ -1,1385 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Sparse matrix Bunch--Kaufman--Parlett factorisation and solve - Radical revision started Thu 05th Nov 1992, 09:36:12 AM - to use Karen George's suggestion of leaving the the row elements unordered - Radical revision completed Mon 07th Dec 1992, 10:59:57 AM -*/ - -static char rcsid[] = "spbkp.c,v 1.1 1997/12/04 17:55:50 hines Exp"; - -#include -#include "sparse2.h" -#include - - -#ifdef MALLOCDECL -#include -#endif - -#define alpha 0.6403882032022076 /* = (1+sqrt(17))/8 */ - - -#define btos(x) ((x) ? "TRUE" : "FALSE") - -/* assume no use of sqr() uses side-effects */ -#define sqr(x) ((x)*(x)) - -/* unord_get_idx -- returns index (encoded if entry not allocated) - of the element of row r with column j - -- uses linear search */ -int unord_get_idx(r,j) -SPROW *r; -int j; -{ - int idx; - row_elt *e; - - if ( ! r || ! r->elt ) - error(E_NULL,"unord_get_idx"); - for ( idx = 0, e = r->elt; idx < r->len; idx++, e++ ) - if ( e->col == j ) - break; - if ( idx >= r->len ) - return -(r->len+2); - else - return idx; -} - -/* unord_get_val -- returns value of the (i,j) entry of A - -- same assumptions as unord_get_idx() */ -double unord_get_val(A,i,j) -SPMAT *A; -int i, j; -{ - SPROW *r; - int idx; - - if ( ! A ) - error(E_NULL,"unord_get_val"); - if ( i < 0 || i >= A->m || j < 0 || j >= A->n ) - error(E_BOUNDS,"unord_get_val"); - - r = &(A->row[i]); - idx = unord_get_idx(r,j); - if ( idx < 0 ) - return 0.0; - else - return r->elt[idx].val; -} - - -/* bkp_swap_elt -- swaps the (i,j) with the (k,l) entry of sparse matrix - -- either or both of the entries may be unallocated */ -static SPMAT *bkp_swap_elt(A,i1,j1,idx1,i2,j2,idx2) -SPMAT *A; -int i1, j1, idx1, i2, j2, idx2; -{ - int tmp_row, tmp_idx; - SPROW *r1, *r2; - row_elt *e1, *e2; - Real tmp; - - if ( ! A ) - error(E_NULL,"bkp_swap_elt"); - - if ( i1 < 0 || j1 < 0 || i2 < 0 || j2 < 0 || - i1 >= A->m || j1 >= A->n || i2 >= A->m || j2 >= A->n ) - { - error(E_BOUNDS,"bkp_swap_elt"); - } - - if ( i1 == i2 && j1 == j2 ) - return A; - if ( idx1 < 0 && idx2 < 0 ) /* neither allocated */ - return A; - - r1 = &(A->row[i1]); r2 = &(A->row[i2]); - /* if ( idx1 >= r1->len || idx2 >= r2->len ) - error(E_BOUNDS,"bkp_swap_elt"); */ - if ( idx1 < 0 ) /* assume not allocated */ - { - idx1 = r1->len; - if ( idx1 >= r1->maxlen ) - { tracecatch(sprow_xpd(r1,2*r1->maxlen+1,TYPE_SPMAT), - "bkp_swap_elt"); } - r1->len = idx1+1; - r1->elt[idx1].col = j1; - r1->elt[idx1].val = 0.0; - /* now patch up column access path */ - tmp_row = -1; tmp_idx = j1; - chase_col(A,j1,&tmp_row,&tmp_idx,i1-1); - - if ( tmp_row < 0 ) - { - r1->elt[idx1].nxt_row = A->start_row[j1]; - r1->elt[idx1].nxt_idx = A->start_idx[j1]; - A->start_row[j1] = i1; - A->start_idx[j1] = idx1; - } - else - { - row_elt *tmp_e; - - tmp_e = &(A->row[tmp_row].elt[tmp_idx]); - r1->elt[idx1].nxt_row = tmp_e->nxt_row; - r1->elt[idx1].nxt_idx = tmp_e->nxt_idx; - tmp_e->nxt_row = i1; - tmp_e->nxt_idx = idx1; - } - } - else if ( r1->elt[idx1].col != j1 ) - error(E_INTERN,"bkp_swap_elt"); - if ( idx2 < 0 ) - { - idx2 = r2->len; - if ( idx2 >= r2->maxlen ) - { tracecatch(sprow_xpd(r2,2*r2->maxlen+1,TYPE_SPMAT), - "bkp_swap_elt"); } - - r2->len = idx2+1; - r2->elt[idx2].col = j2; - r2->elt[idx2].val = 0.0; - /* now patch up column access path */ - tmp_row = -1; tmp_idx = j2; - chase_col(A,j2,&tmp_row,&tmp_idx,i2-1); - if ( tmp_row < 0 ) - { - r2->elt[idx2].nxt_row = A->start_row[j2]; - r2->elt[idx2].nxt_idx = A->start_idx[j2]; - A->start_row[j2] = i2; - A->start_idx[j2] = idx2; - } - else - { - row_elt *tmp_e; - - tmp_e = &(A->row[tmp_row].elt[tmp_idx]); - r2->elt[idx2].nxt_row = tmp_e->nxt_row; - r2->elt[idx2].nxt_idx = tmp_e->nxt_idx; - tmp_e->nxt_row = i2; - tmp_e->nxt_idx = idx2; - } - } - else if ( r2->elt[idx2].col != j2 ) - error(E_INTERN,"bkp_swap_elt"); - - e1 = &(r1->elt[idx1]); e2 = &(r2->elt[idx2]); - - tmp = e1->val; - e1->val = e2->val; - e2->val = tmp; - - return A; -} - -/* bkp_bump_col -- bumps row and idx to next entry in column j */ -row_elt *bkp_bump_col(A, j, row, idx) -SPMAT *A; -int j, *row, *idx; -{ - SPROW *r; - row_elt *e; - - if ( *row < 0 ) - { - *row = A->start_row[j]; - *idx = A->start_idx[j]; - } - else - { - r = &(A->row[*row]); - e = &(r->elt[*idx]); - if ( e->col != j ) - error(E_INTERN,"bkp_bump_col"); - *row = e->nxt_row; - *idx = e->nxt_idx; - } - if ( *row < 0 ) - return (row_elt *)NULL; - else - return &(A->row[*row].elt[*idx]); -} - -/* bkp_interchange -- swap rows/cols i and j (symmetric pivot) - -- uses just the upper triangular part */ -SPMAT *bkp_interchange(A, i1, i2) -SPMAT *A; -int i1, i2; -{ - int tmp_row, tmp_idx; - int row1, row2, idx1, idx2, tmp_row1, tmp_idx1, tmp_row2, tmp_idx2; - SPROW *r1, *r2; - row_elt *e1, *e2; - IVEC *done_list = IVNULL; - - if ( ! A ) - error(E_NULL,"bkp_interchange"); - if ( i1 < 0 || i1 >= A->n || i2 < 0 || i2 >= A->n ) - error(E_BOUNDS,"bkp_interchange"); - if ( A->m != A->n ) - error(E_SQUARE,"bkp_interchange"); - - if ( i1 == i2 ) - return A; - if ( i1 > i2 ) - { tmp_idx = i1; i1 = i2; i2 = tmp_idx; } - - done_list = iv_resize(done_list,A->n); - for ( tmp_idx = 0; tmp_idx < A->n; tmp_idx++ ) - done_list->ive[tmp_idx] = FALSE; - row1 = -1; idx1 = i1; - row2 = -1; idx2 = i2; - e1 = bkp_bump_col(A,i1,&row1,&idx1); - e2 = bkp_bump_col(A,i2,&row2,&idx2); - - while ( (row1 >= 0 && row1 < i1) || (row2 >= 0 && row2 < i1) ) - /* Note: "row2 < i1" not "row2 < i2" as we must stop before the - "knee bend" */ - { - if ( row1 >= 0 && row1 < i1 && ( row1 < row2 || row2 < 0 ) ) - { - tmp_row1 = row1; tmp_idx1 = idx1; - e1 = bkp_bump_col(A,i1,&tmp_row1,&tmp_idx1); - if ( ! done_list->ive[row1] ) - { - if ( row1 == row2 ) - bkp_swap_elt(A,row1,i1,idx1,row1,i2,idx2); - else - bkp_swap_elt(A,row1,i1,idx1,row1,i2,-1); - done_list->ive[row1] = TRUE; - } - row1 = tmp_row1; idx1 = tmp_idx1; - } - else if ( row2 >= 0 && row2 < i1 && ( row2 < row1 || row1 < 0 ) ) - { - tmp_row2 = row2; tmp_idx2 = idx2; - e2 = bkp_bump_col(A,i2,&tmp_row2,&tmp_idx2); - if ( ! done_list->ive[row2] ) - { - if ( row1 == row2 ) - bkp_swap_elt(A,row2,i1,idx1,row2,i2,idx2); - else - bkp_swap_elt(A,row2,i1,-1,row2,i2,idx2); - done_list->ive[row2] = TRUE; - } - row2 = tmp_row2; idx2 = tmp_idx2; - } - else if ( row1 == row2 ) - { - tmp_row1 = row1; tmp_idx1 = idx1; - e1 = bkp_bump_col(A,i1,&tmp_row1,&tmp_idx1); - tmp_row2 = row2; tmp_idx2 = idx2; - e2 = bkp_bump_col(A,i2,&tmp_row2,&tmp_idx2); - if ( ! done_list->ive[row1] ) - { - bkp_swap_elt(A,row1,i1,idx1,row2,i2,idx2); - done_list->ive[row1] = TRUE; - } - row1 = tmp_row1; idx1 = tmp_idx1; - row2 = tmp_row2; idx2 = tmp_idx2; - } - } - - /* ensure we are **past** the first knee */ - while ( row2 >= 0 && row2 <= i1 ) - e2 = bkp_bump_col(A,i2,&row2,&idx2); - - /* at/after 1st "knee bend" */ - r1 = &(A->row[i1]); - idx1 = 0; - e1 = &(r1->elt[idx1]); - while ( row2 >= 0 && row2 < i2 ) - { - /* used for update of e2 at end of loop */ - tmp_row = row2; tmp_idx = idx2; - if ( ! done_list->ive[row2] ) - { - r2 = &(A->row[row2]); - bkp_bump_col(A,i2,&tmp_row,&tmp_idx); - done_list->ive[row2] = TRUE; - tmp_idx1 = unord_get_idx(r1,row2); - tracecatch(bkp_swap_elt(A,row2,i2,idx2,i1,row2,tmp_idx1), - "bkp_interchange"); - } - - /* update e1 and e2 */ - row2 = tmp_row; idx2 = tmp_idx; - e2 = ( row2 >= 0 ) ? &(A->row[row2].elt[idx2]) : (row_elt *)NULL; - } - - idx1 = 0; - e1 = r1->elt; - while ( idx1 < r1->len ) - { - if ( e1->col >= i2 || e1->col <= i1 ) - { - idx1++; - e1++; - continue; - } - if ( ! done_list->ive[e1->col] ) - { - tmp_idx2 = unord_get_idx(&(A->row[e1->col]),i2); - tracecatch(bkp_swap_elt(A,i1,e1->col,idx1,e1->col,i2,tmp_idx2), - "bkp_interchange"); - done_list->ive[e1->col] = TRUE; - } - idx1++; - e1++; - } - - /* at/after 2nd "knee bend" */ - idx1 = 0; - e1 = &(r1->elt[idx1]); - r2 = &(A->row[i2]); - idx2 = 0; - e2 = &(r2->elt[idx2]); - while ( idx1 < r1->len ) - { - if ( e1->col <= i2 ) - { - idx1++; e1++; - continue; - } - if ( ! done_list->ive[e1->col] ) - { - tmp_idx2 = unord_get_idx(r2,e1->col); - tracecatch(bkp_swap_elt(A,i1,e1->col,idx1,i2,e1->col,tmp_idx2), - "bkp_interchange"); - done_list->ive[e1->col] = TRUE; - } - idx1++; e1++; - } - - idx2 = 0; e2 = r2->elt; - while ( idx2 < r2->len ) - { - if ( e2->col <= i2 ) - { - idx2++; e2++; - continue; - } - if ( ! done_list->ive[e2->col] ) - { - tmp_idx1 = unord_get_idx(r1,e2->col); - tracecatch(bkp_swap_elt(A,i2,e2->col,idx2,i1,e2->col,tmp_idx1), - "bkp_interchange"); - done_list->ive[e2->col] = TRUE; - } - idx2++; e2++; - } - - /* now interchange the digonal entries! */ - idx1 = unord_get_idx(&(A->row[i1]),i1); - idx2 = unord_get_idx(&(A->row[i2]),i2); - if ( idx1 >= 0 || idx2 >= 0 ) - { - tracecatch(bkp_swap_elt(A,i1,i1,idx1,i2,i2,idx2), - "bkp_interchange"); - } - - return A; -} - - -/* iv_min -- returns minimum of an integer vector - -- sets index to the position in iv if index != NULL */ -int iv_min(iv,index) -IVEC *iv; -int *index; -{ - int i, i_min, min_val, tmp; - - if ( ! iv ) - error(E_NULL,"iv_min"); - if ( iv->dim <= 0 ) - error(E_SIZES,"iv_min"); - i_min = 0; - min_val = iv->ive[0]; - for ( i = 1; i < iv->dim; i++ ) - { - tmp = iv->ive[i]; - if ( tmp < min_val ) - { - min_val = tmp; - i_min = i; - } - } - - if ( index != (int *)NULL ) - *index = i_min; - - return min_val; -} - -/* max_row_col -- returns max { |A[j][k]| : k >= i, k != j, k != l } given j - using symmetry and only the upper triangular part of A */ -static double max_row_col(A,i,j,l) -SPMAT *A; -int i, j, l; -{ - int row_num, idx; - SPROW *r; - row_elt *e; - Real max_val, tmp; - - if ( ! A ) - error(E_NULL,"max_row_col"); - if ( i < 0 || i > A->n || j < 0 || j >= A->n ) - error(E_BOUNDS,"max_row_col"); - - max_val = 0.0; - - idx = unord_get_idx(&(A->row[i]),j); - if ( idx < 0 ) - { - row_num = -1; idx = j; - e = chase_past(A,j,&row_num,&idx,i); - } - else - { - row_num = i; - e = &(A->row[i].elt[idx]); - } - while ( row_num >= 0 && row_num < j ) - { - if ( row_num != l ) - { - tmp = fabs(e->val); - if ( tmp > max_val ) - max_val = tmp; - } - e = bump_col(A,j,&row_num,&idx); - } - r = &(A->row[j]); - for ( idx = 0, e = r->elt; idx < r->len; idx++, e++ ) - { - if ( e->col > j && e->col != l ) - { - tmp = fabs(e->val); - if ( tmp > max_val ) - max_val = tmp; - } - } - - return max_val; -} - -/* nonzeros -- counts non-zeros in A */ -static int nonzeros(A) -SPMAT *A; -{ - int cnt, i; - - if ( ! A ) - return 0; - cnt = 0; - for ( i = 0; i < A->m; i++ ) - cnt += A->row[i].len; - - return cnt; -} - -/* chk_col_access -- for spBKPfactor() - -- checks that column access path is OK */ -int chk_col_access(A) -SPMAT *A; -{ - int cnt_nz, j, row, idx; - SPROW *r; - row_elt *e; - - if ( ! A ) - error(E_NULL,"chk_col_access"); - - /* count nonzeros as we go down columns */ - cnt_nz = 0; - for ( j = 0; j < A->n; j++ ) - { - row = A->start_row[j]; - idx = A->start_idx[j]; - while ( row >= 0 ) - { - if ( row >= A->m || idx < 0 ) - return FALSE; - r = &(A->row[row]); - if ( idx >= r->len ) - return FALSE; - e = &(r->elt[idx]); - if ( e->nxt_row >= 0 && e->nxt_row <= row ) - return FALSE; - row = e->nxt_row; - idx = e->nxt_idx; - cnt_nz++; - } - } - - if ( cnt_nz != nonzeros(A) ) - return FALSE; - else - return TRUE; -} - -/* col_cmp -- compare two columns -- for sorting rows using qsort() */ -static int col_cmp(e1,e2) -row_elt *e1, *e2; -{ - return e1->col - e2->col; -} - -/* spBKPfactor -- sparse Bunch-Kaufman-Parlett factorisation of A in-situ - -- A is factored into the form P'AP = MDM' where - P is a permutation matrix, M lower triangular and D is block - diagonal with blocks of size 1 or 2 - -- P is stored in pivot; blocks[i]==i iff D[i][i] is a block */ -SPMAT *spBKPfactor(A,pivot,blocks,tol) -SPMAT *A; -PERM *pivot, *blocks; -double tol; -{ - int i, j, k, l, n, onebyone=0, r; - int idx, idx1, idx_piv; - int row_num; - int best_deg=0, best_j, best_l, best_cost, mark_cost, deg, deg_j, - deg_l, ignore_deg; - int list_idx, list_idx2, old_list_idx; - SPROW *row, *r_piv, *r1_piv; - row_elt *e, *e1; - Real aii, aip1, aip1i; - Real det, max_j, max_l, s, t; - static IVEC *scan_row = IVNULL, *scan_idx = IVNULL, *col_list = IVNULL, - *tmp_iv = IVNULL; - static IVEC *deg_list = IVNULL; - static IVEC *orig_idx = IVNULL, *orig1_idx = IVNULL; - static PERM *order = PNULL; - - if ( ! A || ! pivot || ! blocks ) - error(E_NULL,"spBKPfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"spBKPfactor"); - if ( A->m != pivot->size || pivot->size != blocks->size ) - error(E_SIZES,"spBKPfactor"); - if ( tol <= 0.0 || tol > 1.0 ) - error(E_RANGE,"spBKPfactor"); - - n = A->n; - - px_ident(pivot); px_ident(blocks); - sp_col_access(A); sp_diag_access(A); - ignore_deg = FALSE; - - deg_list = iv_resize(deg_list,n); - order = px_resize(order,n); - MEM_STAT_REG(deg_list,TYPE_IVEC); - MEM_STAT_REG(order,TYPE_PERM); - - scan_row = iv_resize(scan_row,5); - scan_idx = iv_resize(scan_idx,5); - col_list = iv_resize(col_list,5); - orig_idx = iv_resize(orig_idx,5); - orig_idx = iv_resize(orig1_idx,5); - orig_idx = iv_resize(tmp_iv,5); - MEM_STAT_REG(scan_row,TYPE_IVEC); - MEM_STAT_REG(scan_idx,TYPE_IVEC); - MEM_STAT_REG(col_list,TYPE_IVEC); - MEM_STAT_REG(orig_idx,TYPE_IVEC); - MEM_STAT_REG(orig1_idx,TYPE_IVEC); - MEM_STAT_REG(tmp_iv,TYPE_IVEC); - - for ( i = 0; i < n-1; i = onebyone ? i+1 : i+2 ) - { - /* now we want to use a Markowitz-style selection rule for - determining which rows to swap and whether to use - 1x1 or 2x2 pivoting */ - - /* get list of degrees of nodes */ - deg_list = iv_resize(deg_list,n-i); - if ( ! ignore_deg ) - for ( j = i; j < n; j++ ) - deg_list->ive[j-i] = 0; - else - { - for ( j = i; j < n; j++ ) - deg_list->ive[j-i] = 1; - if ( i < n ) - deg_list->ive[0] = 0; - } - order = px_resize(order,n-i); - px_ident(order); - - if ( ! ignore_deg ) - { - for ( j = i; j < n; j++ ) - { - /* idx = sprow_idx(&(A->row[j]),j+1); */ - /* idx = fixindex(idx); */ - idx = 0; - row = &(A->row[j]); - e = &(row->elt[idx]); - /* deg_list->ive[j-i] += row->len - idx; */ - for ( ; idx < row->len; idx++, e++ ) - if ( e->col >= i ) - deg_list->ive[e->col - i]++; - } - /* now deg_list[k] == degree of node k+i */ - - /* now sort them into increasing order */ - iv_sort(deg_list,order); - /* now deg_list[idx] == degree of node i+order[idx] */ - } - - /* now we can chase through the nodes in order of increasing - degree, picking out the ones that satisfy our stability - criterion */ - list_idx = 0; r = -1; - best_j = best_l = -1; - for ( deg = 0; deg <= n; deg++ ) - { - Real ajj, all, ajl; - - if ( list_idx >= deg_list->dim ) - break; /* That's all folks! */ - old_list_idx = list_idx; - while ( list_idx < deg_list->dim && - deg_list->ive[list_idx] <= deg ) - { - j = i+order->pe[list_idx]; - if ( j < i ) - continue; - /* can we use row/col j for a 1 x 1 pivot? */ - /* find max_j = max_{k>=i} {|A[k][j]|,|A[j][k]|} */ - ajj = fabs(unord_get_val(A,j,j)); - if ( ajj == 0.0 ) - { - list_idx++; - continue; /* can't use this for 1 x 1 pivot */ - } - - max_j = max_row_col(A,i,j,-1); - if ( ajj >= tol/* *alpha */ *max_j ) - { - onebyone = TRUE; - best_j = j; - best_deg = deg_list->ive[list_idx]; - break; - } - list_idx++; - } - if ( best_j >= 0 ) - break; - best_cost = 2*n; /* > any possible Markowitz cost (bound) */ - best_j = best_l = -1; - list_idx = old_list_idx; - while ( list_idx < deg_list->dim && - deg_list->ive[list_idx] <= deg ) - { - j = i+order->pe[list_idx]; - ajj = fabs(unord_get_val(A,j,j)); - for ( list_idx2 = 0; list_idx2 < list_idx; list_idx2++ ) - { - deg_j = deg; - deg_l = deg_list->ive[list_idx2]; - l = i+order->pe[list_idx2]; - if ( l < i ) - continue; - /* try using rows/cols (j,l) for a 2 x 2 pivot block */ - all = fabs(unord_get_val(A,l,l)); - ajl = ( j > l ) ? fabs(unord_get_val(A,l,j)) : - fabs(unord_get_val(A,j,l)); - det = fabs(ajj*all - ajl*ajl); - if ( det == 0.0 ) - continue; - max_j = max_row_col(A,i,j,l); - max_l = max_row_col(A,i,l,j); - if ( tol*(all*max_j+ajl*max_l) < det && - tol*(ajl*max_j+ajj*max_l) < det ) - { - /* acceptably stable 2 x 2 pivot */ - /* this is actually an overestimate of the - Markowitz cost for choosing (j,l) */ - mark_cost = (ajj == 0.0) ? - ((all == 0.0) ? deg_j+deg_l : deg_j+2*deg_l) : - ((all == 0.0) ? 2*deg_j+deg_l : - 2*(deg_j+deg_l)); - if ( mark_cost < best_cost ) - { - onebyone = FALSE; - best_cost = mark_cost; - best_j = j; - best_l = l; - best_deg = deg_j; - } - } - } - list_idx++; - } - if ( best_j >= 0 ) - break; - } - - if ( best_deg > (int)floor(0.8*(n-i)) ) - ignore_deg = TRUE; - - /* now do actual interchanges */ - if ( best_j >= 0 && onebyone ) - { - bkp_interchange(A,i,best_j); - px_transp(pivot,i,best_j); - } - else if ( best_j >= 0 && best_l >= 0 && ! onebyone ) - { - if ( best_j == i || best_j == i+1 ) - { - if ( best_l == i || best_l == i+1 ) - { - /* no pivoting, but must update blocks permutation */ - px_transp(blocks,i,i+1); - goto dopivot; - } - bkp_interchange(A,(best_j == i) ? i+1 : i,best_l); - px_transp(pivot,(best_j == i) ? i+1 : i,best_l); - } - else if ( best_l == i || best_l == i+1 ) - { - bkp_interchange(A,(best_l == i) ? i+1 : i,best_j); - px_transp(pivot,(best_l == i) ? i+1 : i,best_j); - } - else /* best_j & best_l outside i, i+1 */ - { - if ( i != best_j ) - { - bkp_interchange(A,i,best_j); - px_transp(pivot,i,best_j); - } - if ( i+1 != best_l ) - { - bkp_interchange(A,i+1,best_l); - px_transp(pivot,i+1,best_l); - } - } - } - else /* can't pivot &/or nothing to pivot */ - continue; - - /* update blocks permutation */ - if ( ! onebyone ) - px_transp(blocks,i,i+1); - - dopivot: - if ( onebyone ) - { - int idx_j, idx_k, s_idx, s_idx2; - row_elt *e_ij, *e_ik; - - r_piv = &(A->row[i]); - idx_piv = unord_get_idx(r_piv,i); - /* if idx_piv < 0 then aii == 0 and no pivoting can be done; - -- this means that we should continue to the next iteration */ - if ( idx_piv < 0 ) - continue; - aii = r_piv->elt[idx_piv].val; - if ( aii == 0.0 ) - continue; - - /* for ( j = i+1; j < n; j++ ) { ... pivot step ... } */ - /* initialise scan_... etc for the 1 x 1 pivot */ - scan_row = iv_resize(scan_row,r_piv->len); - scan_idx = iv_resize(scan_idx,r_piv->len); - col_list = iv_resize(col_list,r_piv->len); - orig_idx = iv_resize(orig_idx,r_piv->len); - row_num = i; s_idx = idx = 0; - e = &(r_piv->elt[idx]); - for ( idx = 0; idx < r_piv->len; idx++, e++ ) - { - if ( e->col < i ) - continue; - scan_row->ive[s_idx] = i; - scan_idx->ive[s_idx] = idx; - orig_idx->ive[s_idx] = idx; - col_list->ive[s_idx] = e->col; - s_idx++; - } - scan_row = iv_resize(scan_row,s_idx); - scan_idx = iv_resize(scan_idx,s_idx); - col_list = iv_resize(col_list,s_idx); - orig_idx = iv_resize(orig_idx,s_idx); - - order = px_resize(order,scan_row->dim); - px_ident(order); - iv_sort(col_list,order); - - tmp_iv = iv_resize(tmp_iv,scan_row->dim); - for ( idx = 0; idx < order->size; idx++ ) - tmp_iv->ive[idx] = scan_idx->ive[order->pe[idx]]; - iv_copy(tmp_iv,scan_idx); - for ( idx = 0; idx < order->size; idx++ ) - tmp_iv->ive[idx] = scan_row->ive[order->pe[idx]]; - iv_copy(tmp_iv,scan_row); - for ( idx = 0; idx < scan_row->dim; idx++ ) - tmp_iv->ive[idx] = orig_idx->ive[order->pe[idx]]; - iv_copy(tmp_iv,orig_idx); - - /* now do actual pivot */ - /* for ( j = i+1; j < n-1; j++ ) .... */ - - for ( s_idx = 0; s_idx < scan_row->dim; s_idx++ ) - { - idx_j = orig_idx->ive[s_idx]; - if ( idx_j < 0 ) - error(E_INTERN,"spBKPfactor"); - e_ij = &(r_piv->elt[idx_j]); - j = e_ij->col; - if ( j < i+1 ) - continue; - scan_to(A,scan_row,scan_idx,col_list,j); - - /* compute multiplier */ - t = e_ij->val / aii; - - /* for ( k = j; k < n; k++ ) { .... update A[j][k] .... } */ - /* this is the row in which pivoting is done */ - row = &(A->row[j]); - for ( s_idx2 = s_idx; s_idx2 < scan_row->dim; s_idx2++ ) - { - idx_k = orig_idx->ive[s_idx2]; - e_ik = &(r_piv->elt[idx_k]); - k = e_ik->col; - /* k >= j since col_list has been sorted */ - - if ( scan_row->ive[s_idx2] == j ) - { /* no fill-in -- can be done directly */ - idx = scan_idx->ive[s_idx2]; - /* idx = sprow_idx2(row,k,idx); */ - row->elt[idx].val -= t*e_ik->val; - } - else - { /* fill-in -- insert entry & patch column */ - int old_row, old_idx; - row_elt *old_e, *new_e; - - old_row = scan_row->ive[s_idx2]; - old_idx = scan_idx->ive[s_idx2]; - /* old_idx = sprow_idx2(&(A->row[old_row]),k,old_idx); */ - - if ( old_idx < 0 ) - error(E_INTERN,"spBKPfactor"); - /* idx = sprow_idx(row,k); */ - /* idx = fixindex(idx); */ - idx = row->len; - - /* sprow_set_val(row,k,-t*e_ik->val); */ - if ( row->len >= row->maxlen ) - { tracecatch(sprow_xpd(row,2*row->maxlen+1,TYPE_SPMAT), - "spBKPfactor"); } - - row->len = idx+1; - - new_e = &(row->elt[idx]); - new_e->val = -t*e_ik->val; - new_e->col = k; - - old_e = &(A->row[old_row].elt[old_idx]); - new_e->nxt_row = old_e->nxt_row; - new_e->nxt_idx = old_e->nxt_idx; - old_e->nxt_row = j; - old_e->nxt_idx = idx; - } - } - e_ij->val = t; - } - } - else /* onebyone == FALSE */ - { /* do 2 x 2 pivot */ - int idx_k, idx1_k, s_idx, s_idx2; - int old_col; - row_elt *e_tmp; - - r_piv = &(A->row[i]); - idx_piv = unord_get_idx(r_piv,i); - aii = aip1i = 0.0; - e_tmp = r_piv->elt; - for ( idx_piv = 0; idx_piv < r_piv->len; idx_piv++, e_tmp++ ) - if ( e_tmp->col == i ) - aii = e_tmp->val; - else if ( e_tmp->col == i+1 ) - aip1i = e_tmp->val; - - r1_piv = &(A->row[i+1]); - e_tmp = r1_piv->elt; - aip1 = unord_get_val(A,i+1,i+1); - det = aii*aip1 - aip1i*aip1i; /* Must have det < 0 */ - if ( aii == 0.0 && aip1i == 0.0 ) - { - /* error(E_RANGE,"spBKPfactor"); */ - onebyone = TRUE; - continue; /* cannot pivot */ - } - - if ( det == 0.0 ) - { - if ( aii != 0.0 ) - error(E_RANGE,"spBKPfactor"); - onebyone = TRUE; - continue; /* cannot pivot */ - } - aip1i = aip1i/det; - aii = aii/det; - aip1 = aip1/det; - - /* initialise scan_... etc for the 2 x 2 pivot */ - s_idx = r_piv->len + r1_piv->len; - scan_row = iv_resize(scan_row,s_idx); - scan_idx = iv_resize(scan_idx,s_idx); - col_list = iv_resize(col_list,s_idx); - orig_idx = iv_resize(orig_idx,s_idx); - orig1_idx = iv_resize(orig1_idx,s_idx); - - e = r_piv->elt; - for ( idx = 0; idx < r_piv->len; idx++, e++ ) - { - scan_row->ive[idx] = i; - scan_idx->ive[idx] = idx; - col_list->ive[idx] = e->col; - orig_idx->ive[idx] = idx; - orig1_idx->ive[idx] = -1; - } - e = r_piv->elt; - e1 = r1_piv->elt; - for ( idx = 0; idx < r1_piv->len; idx++, e1++ ) - { - scan_row->ive[idx+r_piv->len] = i+1; - scan_idx->ive[idx+r_piv->len] = idx; - col_list->ive[idx+r_piv->len] = e1->col; - orig_idx->ive[idx+r_piv->len] = -1; - orig1_idx->ive[idx+r_piv->len] = idx; - } - - e1 = r1_piv->elt; - order = px_resize(order,scan_row->dim); - px_ident(order); - iv_sort(col_list,order); - tmp_iv = iv_resize(tmp_iv,scan_row->dim); - for ( idx = 0; idx < order->size; idx++ ) - tmp_iv->ive[idx] = scan_idx->ive[order->pe[idx]]; - iv_copy(tmp_iv,scan_idx); - for ( idx = 0; idx < order->size; idx++ ) - tmp_iv->ive[idx] = scan_row->ive[order->pe[idx]]; - iv_copy(tmp_iv,scan_row); - for ( idx = 0; idx < scan_row->dim; idx++ ) - tmp_iv->ive[idx] = orig_idx->ive[order->pe[idx]]; - iv_copy(tmp_iv,orig_idx); - for ( idx = 0; idx < scan_row->dim; idx++ ) - tmp_iv->ive[idx] = orig1_idx->ive[order->pe[idx]]; - iv_copy(tmp_iv,orig1_idx); - - s_idx = 0; - old_col = -1; - for ( idx = 0; idx < scan_row->dim; idx++ ) - { - if ( col_list->ive[idx] == old_col ) - { - if ( scan_row->ive[idx] == i ) - { - scan_row->ive[s_idx-1] = scan_row->ive[idx]; - scan_idx->ive[s_idx-1] = scan_idx->ive[idx]; - col_list->ive[s_idx-1] = col_list->ive[idx]; - orig_idx->ive[s_idx-1] = orig_idx->ive[idx]; - orig1_idx->ive[s_idx-1] = orig1_idx->ive[idx-1]; - } - else if ( idx > 0 ) - { - scan_row->ive[s_idx-1] = scan_row->ive[idx-1]; - scan_idx->ive[s_idx-1] = scan_idx->ive[idx-1]; - col_list->ive[s_idx-1] = col_list->ive[idx-1]; - orig_idx->ive[s_idx-1] = orig_idx->ive[idx-1]; - orig1_idx->ive[s_idx-1] = orig1_idx->ive[idx]; - } - } - else - { - scan_row->ive[s_idx] = scan_row->ive[idx]; - scan_idx->ive[s_idx] = scan_idx->ive[idx]; - col_list->ive[s_idx] = col_list->ive[idx]; - orig_idx->ive[s_idx] = orig_idx->ive[idx]; - orig1_idx->ive[s_idx] = orig1_idx->ive[idx]; - s_idx++; - } - old_col = col_list->ive[idx]; - } - scan_row = iv_resize(scan_row,s_idx); - scan_idx = iv_resize(scan_idx,s_idx); - col_list = iv_resize(col_list,s_idx); - orig_idx = iv_resize(orig_idx,s_idx); - orig1_idx = iv_resize(orig1_idx,s_idx); - - /* for ( j = i+2; j < n; j++ ) { .... row operation .... } */ - for ( s_idx = 0; s_idx < scan_row->dim; s_idx++ ) - { - int idx_piv, idx1_piv; - Real aip1j, aij, aik, aip1k; - row_elt *e_ik, *e_ip1k; - - j = col_list->ive[s_idx]; - if ( j < i+2 ) - continue; - tracecatch(scan_to(A,scan_row,scan_idx,col_list,j), - "spBKPfactor"); - - idx_piv = orig_idx->ive[s_idx]; - aij = ( idx_piv < 0 ) ? 0.0 : r_piv->elt[idx_piv].val; - /* aij = ( s_idx < r_piv->len ) ? r_piv->elt[s_idx].val : - 0.0; */ - /* aij = sp_get_val(A,i,j); */ - idx1_piv = orig1_idx->ive[s_idx]; - aip1j = ( idx1_piv < 0 ) ? 0.0 : r1_piv->elt[idx1_piv].val; - /* aip1j = ( s_idx < r_piv->len ) ? 0.0 : - r1_piv->elt[s_idx-r_piv->len].val; */ - /* aip1j = sp_get_val(A,i+1,j); */ - s = - aip1i*aip1j + aip1*aij; - t = - aip1i*aij + aii*aip1j; - - /* for ( k = j; k < n; k++ ) { .... update entry .... } */ - row = &(A->row[j]); - /* set idx_k and idx1_k indices */ - s_idx2 = s_idx; - k = col_list->ive[s_idx2]; - idx_k = orig_idx->ive[s_idx2]; - idx1_k = orig1_idx->ive[s_idx2]; - - while ( s_idx2 < scan_row->dim ) - { - k = col_list->ive[s_idx2]; - idx_k = orig_idx->ive[s_idx2]; - idx1_k = orig1_idx->ive[s_idx2]; - e_ik = ( idx_k < 0 ) ? (row_elt *)NULL : - &(r_piv->elt[idx_k]); - e_ip1k = ( idx1_k < 0 ) ? (row_elt *)NULL : - &(r1_piv->elt[idx1_k]); - aik = ( idx_k >= 0 ) ? e_ik->val : 0.0; - aip1k = ( idx1_k >= 0 ) ? e_ip1k->val : 0.0; - if ( scan_row->ive[s_idx2] == j ) - { /* no fill-in */ - row = &(A->row[j]); - /* idx = sprow_idx(row,k); */ - idx = scan_idx->ive[s_idx2]; - if ( idx < 0 ) - error(E_INTERN,"spBKPfactor"); - row->elt[idx].val -= s*aik + t*aip1k; - } - else - { /* fill-in -- insert entry & patch column */ - Real tmp; - int old_row, old_idx; - row_elt *old_e, *new_e; - - tmp = - s*aik - t*aip1k; - if ( tmp != 0.0 ) - { - row = &(A->row[j]); - old_row = scan_row->ive[s_idx2]; - old_idx = scan_idx->ive[s_idx2]; - - idx = row->len; - if ( row->len >= row->maxlen ) - { tracecatch(sprow_xpd(row,2*row->maxlen+1, - TYPE_SPMAT), - "spBKPfactor"); } - - row->len = idx + 1; - /* idx = sprow_idx(row,k); */ - new_e = &(row->elt[idx]); - new_e->val = tmp; - new_e->col = k; - - if ( old_row < 0 ) - error(E_INTERN,"spBKPfactor"); - /* old_idx = sprow_idx2(&(A->row[old_row]), - k,old_idx); */ - old_e = &(A->row[old_row].elt[old_idx]); - new_e->nxt_row = old_e->nxt_row; - new_e->nxt_idx = old_e->nxt_idx; - old_e->nxt_row = j; - old_e->nxt_idx = idx; - } - } - - /* update idx_k, idx1_k, s_idx2 etc */ - s_idx2++; - } - - /* store multipliers -- may involve fill-in (!) */ - /* idx = sprow_idx(r_piv,j); */ - idx = orig_idx->ive[s_idx]; - if ( idx >= 0 ) - { - r_piv->elt[idx].val = s; - } - else if ( s != 0.0 ) - { - int old_row, old_idx; - row_elt *new_e, *old_e; - - old_row = -1; old_idx = j; - - if ( i > 0 ) - { - tracecatch(chase_col(A,j,&old_row,&old_idx,i-1), - "spBKPfactor"); - } - /* sprow_set_val(r_piv,j,s); */ - idx = r_piv->len; - if ( r_piv->len >= r_piv->maxlen ) - { tracecatch(sprow_xpd(r_piv,2*r_piv->maxlen+1, - TYPE_SPMAT), - "spBKPfactor"); } - - r_piv->len = idx + 1; - /* idx = sprow_idx(r_piv,j); */ - /* if ( idx < 0 ) - error(E_INTERN,"spBKPfactor"); */ - new_e = &(r_piv->elt[idx]); - new_e->val = s; - new_e->col = j; - if ( old_row < 0 ) - { - new_e->nxt_row = A->start_row[j]; - new_e->nxt_idx = A->start_idx[j]; - A->start_row[j] = i; - A->start_idx[j] = idx; - } - else - { - /* old_idx = sprow_idx2(&(A->row[old_row]),j,old_idx);*/ - if ( old_idx < 0 ) - error(E_INTERN,"spBKPfactor"); - old_e = &(A->row[old_row].elt[old_idx]); - new_e->nxt_row = old_e->nxt_row; - new_e->nxt_idx = old_e->nxt_idx; - old_e->nxt_row = i; - old_e->nxt_idx = idx; - } - } - /* idx1 = sprow_idx(r1_piv,j); */ - idx1 = orig1_idx->ive[s_idx]; - if ( idx1 >= 0 ) - { - r1_piv->elt[idx1].val = t; - } - else if ( t != 0.0 ) - { - int old_row, old_idx; - row_elt *new_e, *old_e; - - old_row = -1; old_idx = j; - tracecatch(chase_col(A,j,&old_row,&old_idx,i), - "spBKPfactor"); - /* sprow_set_val(r1_piv,j,t); */ - idx1 = r1_piv->len; - if ( r1_piv->len >= r1_piv->maxlen ) - { tracecatch(sprow_xpd(r1_piv,2*r1_piv->maxlen+1, - TYPE_SPMAT), - "spBKPfactor"); } - - r1_piv->len = idx1 + 1; - /* idx1 = sprow_idx(r1_piv,j); */ - /* if ( idx < 0 ) - error(E_INTERN,"spBKPfactor"); */ - new_e = &(r1_piv->elt[idx1]); - new_e->val = t; - new_e->col = j; - if ( idx1 < 0 ) - error(E_INTERN,"spBKPfactor"); - new_e = &(r1_piv->elt[idx1]); - if ( old_row < 0 ) - { - new_e->nxt_row = A->start_row[j]; - new_e->nxt_idx = A->start_idx[j]; - A->start_row[j] = i+1; - A->start_idx[j] = idx1; - } - else - { - old_idx = sprow_idx2(&(A->row[old_row]),j,old_idx); - if ( old_idx < 0 ) - error(E_INTERN,"spBKPfactor"); - old_e = &(A->row[old_row].elt[old_idx]); - new_e->nxt_row = old_e->nxt_row; - new_e->nxt_idx = old_e->nxt_idx; - old_e->nxt_row = i+1; - old_e->nxt_idx = idx1; - } - } - } - } - } - - /* now sort the rows arrays */ - for ( i = 0; i < A->m; i++ ) - qsort(A->row[i].elt,A->row[i].len,sizeof(row_elt),(int(*)())col_cmp); - A->flag_col = A->flag_diag = FALSE; - - return A; -} - -/* spBKPsolve -- solves A.x = b where A has been factored a la BKPfactor() - -- returns x, which is created if NULL */ -VEC *spBKPsolve(A,pivot,block,b,x) -SPMAT *A; -PERM *pivot, *block; -VEC *b, *x; -{ - static VEC *tmp=VNULL; /* dummy storage needed */ - int i /* , j */, n, onebyone; - int row_num, idx; - Real a11, a12, a22, b1, b2, det, sum, *tmp_ve, tmp_diag; - SPROW *r; - row_elt *e; - - if ( ! A || ! pivot || ! block || ! b ) - error(E_NULL,"spBKPsolve"); - if ( A->m != A->n ) - error(E_SQUARE,"spBKPsolve"); - n = A->n; - if ( b->dim != n || pivot->size != n || block->size != n ) - error(E_SIZES,"spBKPsolve"); - x = v_resize(x,n); - tmp = v_resize(tmp,n); - MEM_STAT_REG(tmp,TYPE_VEC); - - tmp_ve = tmp->ve; - - if ( ! A->flag_col ) - sp_col_access(A); - - px_vec(pivot,b,tmp); - /* printf("# BKPsolve: effect of pivot: tmp =\n"); v_output(tmp); */ - - /* solve for lower triangular part */ - for ( i = 0; i < n; i++ ) - { - sum = tmp_ve[i]; - if ( block->pe[i] < i ) - { - /* for ( j = 0; j < i-1; j++ ) - sum -= A_me[j][i]*tmp_ve[j]; */ - row_num = -1; idx = i; - e = bump_col(A,i,&row_num,&idx); - while ( row_num >= 0 && row_num < i-1 ) - { - sum -= e->val*tmp_ve[row_num]; - e = bump_col(A,i,&row_num,&idx); - } - } - else - { - /* for ( j = 0; j < i; j++ ) - sum -= A_me[j][i]*tmp_ve[j]; */ - row_num = -1; idx = i; - e = bump_col(A,i,&row_num,&idx); - while ( row_num >= 0 && row_num < i ) - { - sum -= e->val*tmp_ve[row_num]; - e = bump_col(A,i,&row_num,&idx); - } - } - tmp_ve[i] = sum; - } - - /* printf("# BKPsolve: solving L part: tmp =\n"); v_output(tmp); */ - /* solve for diagonal part */ - for ( i = 0; i < n; i = onebyone ? i+1 : i+2 ) - { - onebyone = ( block->pe[i] == i ); - if ( onebyone ) - { - /* tmp_ve[i] /= A_me[i][i]; */ - tmp_diag = sp_get_val(A,i,i); - if ( tmp_diag == 0.0 ) - error(E_SING,"spBKPsolve"); - tmp_ve[i] /= tmp_diag; - } - else - { - a11 = sp_get_val(A,i,i); - a22 = sp_get_val(A,i+1,i+1); - a12 = sp_get_val(A,i,i+1); - b1 = tmp_ve[i]; - b2 = tmp_ve[i+1]; - det = a11*a22-a12*a12; /* < 0 : see BKPfactor() */ - if ( det == 0.0 ) - error(E_SING,"BKPsolve"); - det = 1/det; - tmp_ve[i] = det*(a22*b1-a12*b2); - tmp_ve[i+1] = det*(a11*b2-a12*b1); - } - } - - /* printf("# BKPsolve: solving D part: tmp =\n"); v_output(tmp); */ - /* solve for transpose of lower triangular part */ - for ( i = n-2; i >= 0; i-- ) - { - sum = tmp_ve[i]; - if ( block->pe[i] > i ) - { - /* onebyone is false */ - /* for ( j = i+2; j < n; j++ ) - sum -= A_me[i][j]*tmp_ve[j]; */ - if ( i+2 >= n ) - continue; - r = &(A->row[i]); - idx = sprow_idx(r,i+2); - idx = fixindex(idx); - e = &(r->elt[idx]); - for ( ; idx < r->len; idx++, e++ ) - sum -= e->val*tmp_ve[e->col]; - } - else /* onebyone */ - { - /* for ( j = i+1; j < n; j++ ) - sum -= A_me[i][j]*tmp_ve[j]; */ - r = &(A->row[i]); - idx = sprow_idx(r,i+1); - idx = fixindex(idx); - e = &(r->elt[idx]); - for ( ; idx < r->len; idx++, e++ ) - sum -= e->val*tmp_ve[e->col]; - } - tmp_ve[i] = sum; - } - - /* printf("# BKPsolve: solving L^T part: tmp =\n");v_output(tmp); */ - /* and do final permutation */ - x = pxinv_vec(pivot,tmp,x); - - return x; -} - - - diff --git a/src/mesch/spchfctr.c b/src/mesch/spchfctr.c deleted file mode 100755 index a731140eb2..0000000000 --- a/src/mesch/spchfctr.c +++ /dev/null @@ -1,628 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Sparse Cholesky factorisation code - To be used with sparse.h, sparse.c etc - -*/ - -static char rcsid[] = "spchfctr.c,v 1.1 1997/12/04 17:55:51 hines Exp"; - -#include -#include "sparse2.h" -#include - - -#ifndef MALLOCDECL -#ifndef ANSI_C -extern char *calloc(), *realloc(); -#endif -#endif - - - -/* sprow_ip -- finds the (partial) inner product of a pair of sparse rows - -- uses a "merging" approach & assumes column ordered rows - -- row indices for inner product are all < lim */ -double sprow_ip(row1, row2, lim) -SPROW *row1, *row2; -int lim; -{ - int idx1, idx2, len1, len2, tmp; - int sprow_idx(); - register row_elt *elts1, *elts2; - register Real sum; - - elts1 = row1->elt; elts2 = row2->elt; - len1 = row1->len; len2 = row2->len; - - sum = 0.0; - - if ( len1 <= 0 || len2 <= 0 ) - return 0.0; - if ( elts1->col >= lim || elts2->col >= lim ) - return 0.0; - - /* use sprow_idx() to speed up inner product where one row is - much longer than the other */ - idx1 = idx2 = 0; - if ( len1 > 2*len2 ) - { - idx1 = sprow_idx(row1,elts2->col); - idx1 = (idx1 < 0) ? -(idx1+2) : idx1; - if ( idx1 < 0 ) - error(E_UNKNOWN,"sprow_ip"); - len1 -= idx1; - } - else if ( len2 > 2*len1 ) - { - idx2 = sprow_idx(row2,elts1->col); - idx2 = (idx2 < 0) ? -(idx2+2) : idx2; - if ( idx2 < 0 ) - error(E_UNKNOWN,"sprow_ip"); - len2 -= idx2; - } - if ( len1 <= 0 || len2 <= 0 ) - return 0.0; - - elts1 = &(elts1[idx1]); elts2 = &(elts2[idx2]); - - - for ( ; ; ) /* forever do... */ - { - if ( (tmp=elts1->col-elts2->col) < 0 ) - { - len1--; elts1++; - if ( ! len1 || elts1->col >= lim ) - break; - } - else if ( tmp > 0 ) - { - len2--; elts2++; - if ( ! len2 || elts2->col >= lim ) - break; - } - else - { - sum += elts1->val * elts2->val; - len1--; elts1++; - len2--; elts2++; - if ( ! len1 || ! len2 || - elts1->col >= lim || elts2->col >= lim ) - break; - } - } - - return sum; -} - -/* sprow_sqr -- returns same as sprow_ip(row, row, lim) */ -double sprow_sqr(row, lim) -SPROW *row; -int lim; -{ - register row_elt *elts; - int idx, len; - register Real sum, tmp; - - sum = 0.0; - elts = row->elt; len = row->len; - for ( idx = 0; idx < len; idx++, elts++ ) - { - if ( elts->col >= lim ) - break; - tmp = elts->val; - sum += tmp*tmp; - } - - return sum; -} - -static int *scan_row = (int *)NULL, *scan_idx = (int *)NULL, - *col_list = (int *)NULL; -static int scan_len = 0; - -/* set_scan -- expand scan_row and scan_idx arrays - -- return new length */ -int set_scan(new_len) -int new_len; -{ - if ( new_len <= scan_len ) - return scan_len; - if ( new_len <= scan_len+5 ) - new_len += 5; - - if ( ! scan_row || ! scan_idx || ! col_list ) - { - scan_row = (int *)calloc(new_len,sizeof(int)); - scan_idx = (int *)calloc(new_len,sizeof(int)); - col_list = (int *)calloc(new_len,sizeof(int)); - } - else - { - scan_row = (int *)realloc((char *)scan_row,new_len*sizeof(int)); - scan_idx = (int *)realloc((char *)scan_idx,new_len*sizeof(int)); - col_list = (int *)realloc((char *)col_list,new_len*sizeof(int)); - } - - if ( ! scan_row || ! scan_idx || ! col_list ) - error(E_MEM,"set_scan"); - return new_len; -} - -/* spCHfactor -- sparse Cholesky factorisation - -- only the lower triangular part of A (incl. diagonal) is used */ -SPMAT *spCHfactor(A) -SPMAT *A; -{ - register int i; - int idx, k, m, minim, n, num_scan, diag_idx, tmp1; - Real pivot, tmp2; - SPROW *r_piv, *r_op; - row_elt *elt_piv, *elt_op, *old_elt; - - if ( A == SMNULL ) - error(E_NULL,"spCHfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"spCHfactor"); - - /* set up access paths if not already done so */ - sp_col_access(A); - sp_diag_access(A); - - /* printf("spCHfactor() -- checkpoint 1\n"); */ - m = A->m; n = A->n; - for ( k = 0; k < m; k++ ) - { - r_piv = &(A->row[k]); - if ( r_piv->len > scan_len ) - set_scan(r_piv->len); - elt_piv = r_piv->elt; - diag_idx = sprow_idx2(r_piv,k,r_piv->diag); - if ( diag_idx < 0 ) - error(E_POSDEF,"spCHfactor"); - old_elt = &(elt_piv[diag_idx]); - for ( i = 0; i < r_piv->len; i++ ) - { - if ( elt_piv[i].col > k ) - break; - col_list[i] = elt_piv[i].col; - scan_row[i] = elt_piv[i].nxt_row; - scan_idx[i] = elt_piv[i].nxt_idx; - } - /* printf("spCHfactor() -- checkpoint 2\n"); */ - num_scan = i; /* number of actual entries in scan_row etc. */ - /* printf("num_scan = %d\n",num_scan); */ - - /* set diagonal entry of Cholesky factor */ - tmp2 = elt_piv[diag_idx].val - sprow_sqr(r_piv,k); - if ( tmp2 <= 0.0 ) - error(E_POSDEF,"spCHfactor"); - elt_piv[diag_idx].val = pivot = sqrt(tmp2); - - /* now set the k-th column of the Cholesky factors */ - /* printf("k = %d\n",k); */ - for ( ; ; ) /* forever do... */ - { - /* printf("spCHfactor() -- checkpoint 3\n"); */ - /* find next row where something (non-trivial) happens - i.e. find min(scan_row) */ - /* printf("scan_row: "); */ - minim = n; - for ( i = 0; i < num_scan; i++ ) - { - tmp1 = scan_row[i]; - /* printf("%d ",tmp1); */ - minim = ( tmp1 >= 0 && tmp1 < minim ) ? tmp1 : minim; - } - /* printf("minim = %d\n",minim); */ - /* printf("col_list: "); */ -/********************************************************************** - for ( i = 0; i < num_scan; i++ ) - printf("%d ",col_list[i]); - printf("\n"); -**********************************************************************/ - - if ( minim >= n ) - break; /* nothing more to do for this column */ - r_op = &(A->row[minim]); - elt_op = r_op->elt; - - /* set next entry in column k of Cholesky factors */ - idx = sprow_idx2(r_op,k,scan_idx[num_scan-1]); - if ( idx < 0 ) - { /* fill-in */ - sp_set_val(A,minim,k, - -sprow_ip(r_piv,r_op,k)/pivot); - /* in case a realloc() has occurred... */ - elt_op = r_op->elt; - /* now set up column access path again */ - idx = sprow_idx2(r_op,k,-(idx+2)); - tmp1 = old_elt->nxt_row; - old_elt->nxt_row = minim; - r_op->elt[idx].nxt_row = tmp1; - tmp1 = old_elt->nxt_idx; - old_elt->nxt_idx = idx; - r_op->elt[idx].nxt_idx = tmp1; - } - else - elt_op[idx].val = (elt_op[idx].val - - sprow_ip(r_piv,r_op,k))/pivot; - - /* printf("spCHfactor() -- checkpoint 4\n"); */ - - /* remember current element in column k for column chain */ - idx = sprow_idx2(r_op,k,idx); - old_elt = &(r_op->elt[idx]); - - /* update scan_row */ - /* printf("spCHfactor() -- checkpoint 5\n"); */ - /* printf("minim = %d\n",minim); */ - for ( i = 0; i < num_scan; i++ ) - { - if ( scan_row[i] != minim ) - continue; - idx = sprow_idx2(r_op,col_list[i],scan_idx[i]); - if ( idx < 0 ) - { scan_row[i] = -1; continue; } - scan_row[i] = elt_op[idx].nxt_row; - scan_idx[i] = elt_op[idx].nxt_idx; - /* printf("scan_row[%d] = %d\n",i,scan_row[i]); */ - /* printf("scan_idx[%d] = %d\n",i,scan_idx[i]); */ - } - - } - /* printf("spCHfactor() -- checkpoint 6\n"); */ - /* sp_dump(stdout,A); */ - /* printf("\n\n\n"); */ - } - - return A; -} - -/* spCHsolve -- solve L.L^T.out=b where L is a sparse matrix, - -- out, b dense vectors - -- returns out; operation may be in-situ */ -VEC *spCHsolve(L,b,out) -SPMAT *L; -VEC *b, *out; -{ - int i, j_idx, n, scan_idx, scan_row; - SPROW *row; - row_elt *elt; - Real diag_val, sum, *out_ve; - - if ( L == SMNULL || b == VNULL ) - error(E_NULL,"spCHsolve"); - if ( L->m != L->n ) - error(E_SQUARE,"spCHsolve"); - if ( b->dim != L->m ) - error(E_SIZES,"spCHsolve"); - - if ( ! L->flag_col ) - sp_col_access(L); - if ( ! L->flag_diag ) - sp_diag_access(L); - - out = v_copy(b,out); - out_ve = out->ve; - - /* forward substitution: solve L.x=b for x */ - n = L->n; - for ( i = 0; i < n; i++ ) - { - sum = out_ve[i]; - row = &(L->row[i]); - elt = row->elt; - for ( j_idx = 0; j_idx < row->len; j_idx++, elt++ ) - { - if ( elt->col >= i ) - break; - sum -= elt->val*out_ve[elt->col]; - } - if ( row->diag >= 0 ) - out_ve[i] = sum/(row->elt[row->diag].val); - else - error(E_SING,"spCHsolve"); - } - - /* backward substitution: solve L^T.out = x for out */ - for ( i = n-1; i >= 0; i-- ) - { - sum = out_ve[i]; - row = &(L->row[i]); - /* Note that row->diag >= 0 by above loop */ - elt = &(row->elt[row->diag]); - diag_val = elt->val; - - /* scan down column */ - scan_idx = elt->nxt_idx; - scan_row = elt->nxt_row; - while ( scan_row >= 0 /* && scan_idx >= 0 */ ) - { - row = &(L->row[scan_row]); - elt = &(row->elt[scan_idx]); - sum -= elt->val*out_ve[scan_row]; - scan_idx = elt->nxt_idx; - scan_row = elt->nxt_row; - } - out_ve[i] = sum/diag_val; - } - - return out; -} - -/* spICHfactor -- sparse Incomplete Cholesky factorisation - -- does a Cholesky factorisation assuming NO FILL-IN - -- as for spCHfactor(), only the lower triangular part of A is used */ -SPMAT *spICHfactor(A) -SPMAT *A; -{ - int k, m, n, nxt_row, nxt_idx, diag_idx; - Real pivot, tmp2; - SPROW *r_piv, *r_op; - row_elt *elt_piv, *elt_op; - - if ( A == SMNULL ) - error(E_NULL,"spICHfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"spICHfactor"); - - /* set up access paths if not already done so */ - if ( ! A->flag_col ) - sp_col_access(A); - if ( ! A->flag_diag ) - sp_diag_access(A); - - m = A->m; n = A->n; - for ( k = 0; k < m; k++ ) - { - r_piv = &(A->row[k]); - - diag_idx = r_piv->diag; - if ( diag_idx < 0 ) - error(E_POSDEF,"spICHfactor"); - - elt_piv = r_piv->elt; - - /* set diagonal entry of Cholesky factor */ - tmp2 = elt_piv[diag_idx].val - sprow_sqr(r_piv,k); - if ( tmp2 <= 0.0 ) - error(E_POSDEF,"spICHfactor"); - elt_piv[diag_idx].val = pivot = sqrt(tmp2); - - /* find next row where something (non-trivial) happens */ - nxt_row = elt_piv[diag_idx].nxt_row; - nxt_idx = elt_piv[diag_idx].nxt_idx; - - /* now set the k-th column of the Cholesky factors */ - while ( nxt_row >= 0 && nxt_idx >= 0 ) - { - /* nxt_row and nxt_idx give next next row (& index) - of the entry to be modified */ - r_op = &(A->row[nxt_row]); - elt_op = r_op->elt; - elt_op[nxt_idx].val = (elt_op[nxt_idx].val - - sprow_ip(r_piv,r_op,k))/pivot; - - nxt_row = elt_op[nxt_idx].nxt_row; - nxt_idx = elt_op[nxt_idx].nxt_idx; - } - } - - return A; -} - - -/* spCHsymb -- symbolic sparse Cholesky factorisation - -- does NOT do any floating point arithmetic; just sets up the structure - -- only the lower triangular part of A (incl. diagonal) is used */ -SPMAT *spCHsymb(A) -SPMAT *A; -{ - register int i; - int idx, k, m, minim, n, num_scan, diag_idx, tmp1; - SPROW *r_piv, *r_op; - row_elt *elt_piv, *elt_op, *old_elt; - - if ( A == SMNULL ) - error(E_NULL,"spCHsymb"); - if ( A->m != A->n ) - error(E_SQUARE,"spCHsymb"); - - /* set up access paths if not already done so */ - if ( ! A->flag_col ) - sp_col_access(A); - if ( ! A->flag_diag ) - sp_diag_access(A); - - /* printf("spCHsymb() -- checkpoint 1\n"); */ - m = A->m; n = A->n; - for ( k = 0; k < m; k++ ) - { - r_piv = &(A->row[k]); - if ( r_piv->len > scan_len ) - set_scan(r_piv->len); - elt_piv = r_piv->elt; - diag_idx = sprow_idx2(r_piv,k,r_piv->diag); - if ( diag_idx < 0 ) - error(E_POSDEF,"spCHsymb"); - old_elt = &(elt_piv[diag_idx]); - for ( i = 0; i < r_piv->len; i++ ) - { - if ( elt_piv[i].col > k ) - break; - col_list[i] = elt_piv[i].col; - scan_row[i] = elt_piv[i].nxt_row; - scan_idx[i] = elt_piv[i].nxt_idx; - } - /* printf("spCHsymb() -- checkpoint 2\n"); */ - num_scan = i; /* number of actual entries in scan_row etc. */ - /* printf("num_scan = %d\n",num_scan); */ - - /* now set the k-th column of the Cholesky factors */ - /* printf("k = %d\n",k); */ - for ( ; ; ) /* forever do... */ - { - /* printf("spCHsymb() -- checkpoint 3\n"); */ - /* find next row where something (non-trivial) happens - i.e. find min(scan_row) */ - minim = n; - for ( i = 0; i < num_scan; i++ ) - { - tmp1 = scan_row[i]; - /* printf("%d ",tmp1); */ - minim = ( tmp1 >= 0 && tmp1 < minim ) ? tmp1 : minim; - } - - if ( minim >= n ) - break; /* nothing more to do for this column */ - r_op = &(A->row[minim]); - elt_op = r_op->elt; - - /* set next entry in column k of Cholesky factors */ - idx = sprow_idx2(r_op,k,scan_idx[num_scan-1]); - if ( idx < 0 ) - { /* fill-in */ - sp_set_val(A,minim,k,0.0); - /* in case a realloc() has occurred... */ - elt_op = r_op->elt; - /* now set up column access path again */ - idx = sprow_idx2(r_op,k,-(idx+2)); - tmp1 = old_elt->nxt_row; - old_elt->nxt_row = minim; - r_op->elt[idx].nxt_row = tmp1; - tmp1 = old_elt->nxt_idx; - old_elt->nxt_idx = idx; - r_op->elt[idx].nxt_idx = tmp1; - } - - /* printf("spCHsymb() -- checkpoint 4\n"); */ - - /* remember current element in column k for column chain */ - idx = sprow_idx2(r_op,k,idx); - old_elt = &(r_op->elt[idx]); - - /* update scan_row */ - /* printf("spCHsymb() -- checkpoint 5\n"); */ - /* printf("minim = %d\n",minim); */ - for ( i = 0; i < num_scan; i++ ) - { - if ( scan_row[i] != minim ) - continue; - idx = sprow_idx2(r_op,col_list[i],scan_idx[i]); - if ( idx < 0 ) - { scan_row[i] = -1; continue; } - scan_row[i] = elt_op[idx].nxt_row; - scan_idx[i] = elt_op[idx].nxt_idx; - /* printf("scan_row[%d] = %d\n",i,scan_row[i]); */ - /* printf("scan_idx[%d] = %d\n",i,scan_idx[i]); */ - } - - } - /* printf("spCHsymb() -- checkpoint 6\n"); */ - } - - return A; -} - -/* comp_AAT -- compute A.A^T where A is a given sparse matrix */ -SPMAT *comp_AAT(A) -SPMAT *A; -{ - SPMAT *AAT; - SPROW *r, *r2; - row_elt *elts, *elts2; - int i, idx, idx2, j, m, minim, n, num_scan, tmp1; - Real ip; - - if ( ! A ) - error(E_NULL,"comp_AAT"); - m = A->m; n = A->n; - - /* set up column access paths */ - if ( ! A->flag_col ) - sp_col_access(A); - - AAT = sp_get(m,m,10); - - for ( i = 0; i < m; i++ ) - { - /* initialisation */ - r = &(A->row[i]); - elts = r->elt; - - /* set up scan lists for this row */ - if ( r->len > scan_len ) - set_scan(r->len); - for ( j = 0; j < r->len; j++ ) - { - col_list[j] = elts[j].col; - scan_row[j] = elts[j].nxt_row; - scan_idx[j] = elts[j].nxt_idx; - } - num_scan = r->len; - - /* scan down the rows for next non-zero not - associated with a diagonal entry */ - for ( ; ; ) - { - minim = m; - for ( idx = 0; idx < num_scan; idx++ ) - { - tmp1 = scan_row[idx]; - minim = ( tmp1 >= 0 && tmp1 < minim ) ? tmp1 : minim; - } - if ( minim >= m ) - break; - r2 = &(A->row[minim]); - if ( minim > i ) - { - ip = sprow_ip(r,r2,n); - sp_set_val(AAT,minim,i,ip); - sp_set_val(AAT,i,minim,ip); - } - /* update scan entries */ - elts2 = r2->elt; - for ( idx = 0; idx < num_scan; idx++ ) - { - if ( scan_row[idx] != minim || scan_idx[idx] < 0 ) - continue; - idx2 = scan_idx[idx]; - scan_row[idx] = elts2[idx2].nxt_row; - scan_idx[idx] = elts2[idx2].nxt_idx; - } - } - - /* set the diagonal entry */ - sp_set_val(AAT,i,i,sprow_sqr(r,n)); - } - - return AAT; -} - diff --git a/src/mesch/splufctr.c b/src/mesch/splufctr.c deleted file mode 100755 index 826f9a8f95..0000000000 --- a/src/mesch/splufctr.c +++ /dev/null @@ -1,411 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Stewart & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Sparse LU factorisation - See also: sparse.[ch] etc for details about sparse matrices -*/ - -#include -#include "sparse2.h" -#include - - - -/* Macro for speedup */ -/* #define sprow_idx2(r,c,hint) \ - ( ( (hint) >= 0 && (r)->elt[hint].col == (c)) ? hint : sprow_idx((r),(c)) ) */ - - -/* spLUfactor -- sparse LU factorisation with pivoting - -- uses partial pivoting and Markowitz criterion - |a[p][k]| >= alpha * max_i |a[i][k]| - -- creates fill-in as needed - -- in situ factorisation */ -SPMAT *spLUfactor(A,px,alpha) -SPMAT *A; -PERM *px; -double alpha; -{ - int i, best_i, k, idx, len, best_len, m, n; - SPROW *r, *r_piv, tmp_row; - static SPROW *merge = (SPROW *)NULL; - Real max_val, tmp; - static VEC *col_vals=VNULL; - - if ( ! A || ! px ) - error(E_NULL,"spLUfctr"); - if ( alpha <= 0.0 || alpha > 1.0 ) - error(E_RANGE,"alpha in spLUfctr"); - if ( px->size <= A->m ) - px = px_resize(px,A->m); - px_ident(px); - col_vals = v_resize(col_vals,A->m); - MEM_STAT_REG(col_vals,TYPE_VEC); - - m = A->m; n = A->n; - if ( ! A->flag_col ) - sp_col_access(A); - if ( ! A->flag_diag ) - sp_diag_access(A); - A->flag_col = A->flag_diag = FALSE; - if ( ! merge ) { - merge = sprow_get(20); - MEM_STAT_REG(merge,TYPE_SPROW); - } - - for ( k = 0; k < n; k++ ) - { - /* find pivot row/element for partial pivoting */ - - /* get first row with a non-zero entry in the k-th column */ - max_val = 0.0; - for ( i = k; i < m; i++ ) - { - r = &(A->row[i]); - idx = sprow_idx(r,k); - if ( idx < 0 ) - tmp = 0.0; - else - tmp = r->elt[idx].val; - if ( fabs(tmp) > max_val ) - max_val = fabs(tmp); - col_vals->ve[i] = tmp; - } - - if ( max_val == 0.0 ) - continue; - - best_len = n+1; /* only if no possibilities */ - best_i = -1; - for ( i = k; i < m; i++ ) - { - tmp = fabs(col_vals->ve[i]); - if ( tmp == 0.0 ) - continue; - if ( tmp >= alpha*max_val ) - { - r = &(A->row[i]); - idx = sprow_idx(r,k); - len = (r->len) - idx; - if ( len < best_len ) - { - best_len = len; - best_i = i; - } - } - } - - /* swap row #best_i with row #k */ - MEM_COPY(&(A->row[best_i]),&tmp_row,sizeof(SPROW)); - MEM_COPY(&(A->row[k]),&(A->row[best_i]),sizeof(SPROW)); - MEM_COPY(&tmp_row,&(A->row[k]),sizeof(SPROW)); - /* swap col_vals entries */ - tmp = col_vals->ve[best_i]; - col_vals->ve[best_i] = col_vals->ve[k]; - col_vals->ve[k] = tmp; - px_transp(px,k,best_i); - - r_piv = &(A->row[k]); - for ( i = k+1; i < n; i++ ) - { - /* compute and set multiplier */ - tmp = col_vals->ve[i]/col_vals->ve[k]; - if ( tmp != 0.0 ) - sp_set_val(A,i,k,tmp); - else - continue; - - /* perform row operations */ - merge->len = 0; - r = &(A->row[i]); - sprow_mltadd(r,r_piv,-tmp,k+1,merge,TYPE_SPROW); - idx = sprow_idx(r,k+1); - if ( idx < 0 ) - idx = -(idx+2); - /* see if r needs expanding */ - if ( r->maxlen < idx + merge->len ) - sprow_xpd(r,idx+merge->len,TYPE_SPMAT); - r->len = idx+merge->len; - MEM_COPY((char *)(merge->elt),(char *)&(r->elt[idx]), - merge->len*sizeof(row_elt)); - } - } - - return A; -} - -/* spLUsolve -- solve A.x = b using factored matrix A from spLUfactor() - -- returns x - -- may not be in-situ */ -VEC *spLUsolve(A,pivot,b,x) -SPMAT *A; -PERM *pivot; -VEC *b, *x; -{ - int i, idx, len, lim; - Real sum, *x_ve; - SPROW *r; - row_elt *elt; - - if ( ! A || ! b ) - error(E_NULL,"spLUsolve"); - if ( (pivot != PNULL && A->m != pivot->size) || A->m != b->dim ) - error(E_SIZES,"spLUsolve"); - if ( ! x || x->dim != A->n ) - x = v_resize(x,A->n); - - if ( pivot != PNULL ) - x = px_vec(pivot,b,x); - else - x = v_copy(b,x); - - x_ve = x->ve; - lim = min(A->m,A->n); - for ( i = 0; i < lim; i++ ) - { - sum = x_ve[i]; - r = &(A->row[i]); - len = r->len; - elt = r->elt; - for ( idx = 0; idx < len && elt->col < i; idx++, elt++ ) - sum -= elt->val*x_ve[elt->col]; - x_ve[i] = sum; - } - - for ( i = lim-1; i >= 0; i-- ) - { - sum = x_ve[i]; - r = &(A->row[i]); - len = r->len; - elt = &(r->elt[len-1]); - for ( idx = len-1; idx >= 0 && elt->col > i; idx--, elt-- ) - sum -= elt->val*x_ve[elt->col]; - if ( idx < 0 || elt->col != i || elt->val == 0.0 ) - error(E_SING,"spLUsolve"); - x_ve[i] = sum/elt->val; - } - - return x; -} - -/* spLUTsolve -- solve A.x = b using factored matrix A from spLUfactor() - -- returns x - -- may not be in-situ */ -VEC *spLUTsolve(A,pivot,b,x) -SPMAT *A; -PERM *pivot; -VEC *b, *x; -{ - int i, idx, lim, rownum; - Real sum, *tmp_ve; - /* SPROW *r; */ - row_elt *elt; - static VEC *tmp=VNULL; - - if ( ! A || ! b ) - error(E_NULL,"spLUTsolve"); - if ( (pivot != PNULL && A->m != pivot->size) || A->m != b->dim ) - error(E_SIZES,"spLUTsolve"); - tmp = v_copy(b,tmp); - MEM_STAT_REG(tmp,TYPE_VEC); - - if ( ! A->flag_col ) - sp_col_access(A); - if ( ! A->flag_diag ) - sp_diag_access(A); - - lim = min(A->m,A->n); - tmp_ve = tmp->ve; - /* solve U^T.tmp = b */ - for ( i = 0; i < lim; i++ ) - { - sum = tmp_ve[i]; - rownum = A->start_row[i]; - idx = A->start_idx[i]; - if ( rownum < 0 || idx < 0 ) - error(E_SING,"spLUTsolve"); - while ( rownum < i && rownum >= 0 && idx >= 0 ) - { - elt = &(A->row[rownum].elt[idx]); - sum -= elt->val*tmp_ve[rownum]; - rownum = elt->nxt_row; - idx = elt->nxt_idx; - } - if ( rownum != i ) - error(E_SING,"spLUTsolve"); - elt = &(A->row[rownum].elt[idx]); - if ( elt->val == 0.0 ) - error(E_SING,"spLUTsolve"); - tmp_ve[i] = sum/elt->val; - } - - /* now solve L^T.tmp = (old) tmp */ - for ( i = lim-1; i >= 0; i-- ) - { - sum = tmp_ve[i]; - rownum = i; - idx = A->row[rownum].diag; - if ( idx < 0 ) - error(E_NULL,"spLUTsolve"); - elt = &(A->row[rownum].elt[idx]); - rownum = elt->nxt_row; - idx = elt->nxt_idx; - while ( rownum < lim && rownum >= 0 && idx >= 0 ) - { - elt = &(A->row[rownum].elt[idx]); - sum -= elt->val*tmp_ve[rownum]; - rownum = elt->nxt_row; - idx = elt->nxt_idx; - } - tmp_ve[i] = sum; - } - - if ( pivot != PNULL ) - x = pxinv_vec(pivot,tmp,x); - else - x = v_copy(tmp,x); - - return x; -} - -/* spILUfactor -- sparse modified incomplete LU factorisation with - no pivoting - -- all pivot entries are ensured to be >= alpha in magnitude - -- setting alpha = 0 gives incomplete LU factorisation - -- no fill-in is generated - -- in situ factorisation */ -SPMAT *spILUfactor(A,alpha) -SPMAT *A; -double alpha; -{ - int i, k, idx, idx_piv, m, n, old_idx, old_idx_piv; - SPROW *r, *r_piv; - Real piv_val, tmp; - - /* printf("spILUfactor: entered\n"); */ - if ( ! A ) - error(E_NULL,"spILUfactor"); - if ( alpha < 0.0 ) - error(E_RANGE,"[alpha] in spILUfactor"); - - m = A->m; n = A->n; - sp_diag_access(A); - sp_col_access(A); - - for ( k = 0; k < n; k++ ) - { - /* printf("spILUfactor(l.%d): checkpoint A: k = %d\n",__LINE__,k); */ - /* printf("spILUfactor(l.%d): A =\n", __LINE__); */ - /* sp_output(A); */ - r_piv = &(A->row[k]); - idx_piv = r_piv->diag; - if ( idx_piv < 0 ) - { - sprow_set_val(r_piv,k,alpha); - idx_piv = sprow_idx(r_piv,k); - } - /* printf("spILUfactor: checkpoint B\n"); */ - if ( idx_piv < 0 ) - error(E_BOUNDS,"spILUfactor"); - old_idx_piv = idx_piv; - piv_val = r_piv->elt[idx_piv].val; - /* printf("spILUfactor: checkpoint C\n"); */ - if ( fabs(piv_val) < alpha ) - piv_val = ( piv_val < 0.0 ) ? -alpha : alpha; - if ( piv_val == 0.0 ) /* alpha == 0.0 too! */ - error(E_SING,"spILUfactor"); - - /* go to next row with a non-zero in this column */ - i = r_piv->elt[idx_piv].nxt_row; - old_idx = idx = r_piv->elt[idx_piv].nxt_idx; - while ( i >= k ) - { - /* printf("spILUfactor: checkpoint D: i = %d\n",i); */ - /* perform row operations */ - r = &(A->row[i]); - /* idx = sprow_idx(r,k); */ - /* printf("spLUfactor(l.%d) i = %d, idx = %d\n", - __LINE__, i, idx); */ - if ( idx < 0 ) - { - idx = r->elt[old_idx].nxt_idx; - i = r->elt[old_idx].nxt_row; - continue; - } - /* printf("spILUfactor: checkpoint E\n"); */ - /* compute and set multiplier */ - r->elt[idx].val = tmp = r->elt[idx].val/piv_val; - /* printf("spILUfactor: piv_val = %g, multiplier = %g\n", - piv_val, tmp); */ - /* printf("spLUfactor(l.%d) multiplier = %g\n", __LINE__, tmp); */ - if ( tmp == 0.0 ) - { - idx = r->elt[old_idx].nxt_idx; - i = r->elt[old_idx].nxt_row; - continue; - } - /* idx = sprow_idx(r,k+1); */ - /* if ( idx < 0 ) - idx = -(idx+2); */ - idx_piv++; idx++; /* now look beyond the multiplier entry */ - /* printf("spILUfactor: checkpoint F: idx = %d, idx_piv = %d\n", - idx, idx_piv); */ - while ( idx_piv < r_piv->len && idx < r->len ) - { - /* printf("spILUfactor: checkpoint G: idx = %d, idx_piv = %d\n", - idx, idx_piv); */ - if ( r_piv->elt[idx_piv].col < r->elt[idx].col ) - idx_piv++; - else if ( r_piv->elt[idx_piv].col > r->elt[idx].col ) - idx++; - else /* column numbers match */ - { - /* printf("spILUfactor(l.%d) subtract %g times the ", - __LINE__, tmp); */ - /* printf("(%d,%d) entry to the (%d,%d) entry\n", - k, r_piv->elt[idx_piv].col, - i, r->elt[idx].col); */ - r->elt[idx].val -= tmp*r_piv->elt[idx_piv].val; - idx++; idx_piv++; - } - } - - /* bump to next row with a non-zero in column k */ - /* printf("spILUfactor(l.%d) column = %d, row[%d] =\n", - __LINE__, r->elt[old_idx].col, i); */ - /* sprow_foutput(stdout,r); */ - i = r->elt[old_idx].nxt_row; - old_idx = idx = r->elt[old_idx].nxt_idx; - /* printf("spILUfactor(l.%d) i = %d, idx = %d\n", __LINE__, i, idx); */ - /* and restore idx_piv to index of pivot entry */ - idx_piv = old_idx_piv; - } - } - /* printf("spILUfactor: exiting\n"); */ - return A; -} diff --git a/src/mesch/sprow.c b/src/mesch/sprow.c deleted file mode 100755 index 5fbebb9f52..0000000000 --- a/src/mesch/sprow.c +++ /dev/null @@ -1,715 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - Sparse rows package - See also: sparse.h, matrix.h - */ - -#include -#include -#include -#include "sparse.h" - - -static char rcsid[] = "sprow.c,v 1.1 1997/12/04 17:55:53 hines Exp"; - -#define MINROWLEN 10 - - -/* sprow_dump - prints relevant information about the sparse row r */ - -void sprow_dump(fp,r) -FILE *fp; -SPROW *r; -{ - int j_idx; - row_elt *elts; - - fprintf(fp,"SparseRow dump:\n"); - if ( ! r ) - { fprintf(fp,"*** NULL row ***\n"); return; } - - fprintf(fp,"row: len = %d, maxlen = %d, diag idx = %d\n", - r->len,r->maxlen,r->diag); - fprintf(fp,"element list @ 0x%p\n",(r->elt)); - if ( ! r->elt ) - { - fprintf(fp,"*** NULL element list ***\n"); - return; - } - elts = r->elt; - for ( j_idx = 0; j_idx < r->len; j_idx++, elts++ ) - fprintf(fp,"Col: %d, Val: %g, nxt_row = %d, nxt_idx = %d\n", - elts->col,elts->val,elts->nxt_row,elts->nxt_idx); - fprintf(fp,"\n"); -} - - -/* sprow_idx -- get index into row for a given column in a given row - -- return -1 on error - -- return -(idx+2) where idx is index to insertion point */ -int sprow_idx(r,col) -SPROW *r; -int col; -{ - register int lo, hi, mid; - int tmp; - register row_elt *r_elt; - - /******************************************* - if ( r == (SPROW *)NULL ) - return -1; - if ( col < 0 ) - return -1; - *******************************************/ - - r_elt = r->elt; - if ( r->len <= 0 ) - return -2; - - /* try the hint */ - /* if ( hint >= 0 && hint < r->len && r_elt[hint].col == col ) - return hint; */ - - /* otherwise use binary search... */ - /* code from K&R Ch. 6, p. 125 */ - lo = 0; hi = r->len - 1; mid = lo; - while ( lo <= hi ) - { - mid = (hi + lo)/2; - if ( (tmp=r_elt[mid].col-col) > 0 ) - hi = mid-1; - else if ( tmp < 0 ) - lo = mid+1; - else /* tmp == 0 */ - return mid; - } - tmp = r_elt[mid].col - col; - - if ( tmp > 0 ) - return -(mid+2); /* insert at mid */ - else /* tmp < 0 */ - return -(mid+3); /* insert at mid+1 */ -} - - -/* sprow_get -- gets, initialises and returns a SPROW structure - -- max. length is maxlen */ -SPROW *sprow_get(maxlen) -int maxlen; -{ - SPROW *r; - - if ( maxlen < 0 ) - error(E_NEG,"sprow_get"); - - r = NEW(SPROW); - if ( ! r ) - error(E_MEM,"sprow_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPROW,0,sizeof(SPROW)); - mem_numvar(TYPE_SPROW,1); - } - r->elt = NEW_A(maxlen,row_elt); - if ( ! r->elt ) - error(E_MEM,"sprow_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_SPROW,0,maxlen*sizeof(row_elt)); - } - r->len = 0; - r->maxlen = maxlen; - r->diag = -1; - - return r; -} - - -/* sprow_xpd -- expand row by means of realloc() - -- type must be TYPE_SPMAT if r is a row of a SPMAT structure, - otherwise it must be TYPE_SPROW - -- returns r */ -SPROW *sprow_xpd(r,n,type) -SPROW *r; -int n,type; -{ - int newlen; - - if ( ! r ) { - r = NEW(SPROW); - if (! r ) - error(E_MEM,"sprow_xpd"); - else if ( mem_info_is_on()) { - if (type != TYPE_SPMAT && type != TYPE_SPROW) - warning(WARN_WRONG_TYPE,"sprow_xpd"); - mem_bytes(type,0,sizeof(SPROW)); - if (type == TYPE_SPROW) - mem_numvar(type,1); - } - } - - if ( ! r->elt ) - { - r->elt = NEW_A((unsigned)n,row_elt); - if ( ! r->elt ) - error(E_MEM,"sprow_xpd"); - else if (mem_info_is_on()) { - mem_bytes(type,0,n*sizeof(row_elt)); - } - r->len = 0; - r->maxlen = n; - return r; - } - if ( n <= r->len ) - newlen = max(2*r->len + 1,MINROWLEN); - else - newlen = n; - if ( newlen <= r->maxlen ) - { - MEM_ZERO((char *)(&(r->elt[r->len])), - (newlen-r->len)*sizeof(row_elt)); - r->len = newlen; - } - else - { - if (mem_info_is_on()) { - mem_bytes(type,r->maxlen*sizeof(row_elt), - newlen*sizeof(row_elt)); - } - r->elt = RENEW(r->elt,newlen,row_elt); - if ( ! r->elt ) - error(E_MEM,"sprow_xpd"); - r->maxlen = newlen; - r->len = newlen; - } - - return r; -} - -/* sprow_resize -- resize a SPROW variable by means of realloc() - -- n is a new size - -- returns r */ -SPROW *sprow_resize(r,n,type) -SPROW *r; -int n,type; -{ - if (n < 0) - error(E_NEG,"sprow_resize"); - - if ( ! r ) - return sprow_get(n); - - if (n == r->len) - return r; - - if ( ! r->elt ) - { - r->elt = NEW_A((unsigned)n,row_elt); - if ( ! r->elt ) - error(E_MEM,"sprow_resize"); - else if (mem_info_is_on()) { - mem_bytes(type,0,n*sizeof(row_elt)); - } - r->maxlen = r->len = n; - return r; - } - - if ( n <= r->maxlen ) - r->len = n; - else - { - if (mem_info_is_on()) { - mem_bytes(type,r->maxlen*sizeof(row_elt), - n*sizeof(row_elt)); - } - r->elt = RENEW(r->elt,n,row_elt); - if ( ! r->elt ) - error(E_MEM,"sprow_resize"); - r->maxlen = r->len = n; - } - - return r; -} - - -/* release a row of a matrix */ -int sprow_free(r) -SPROW *r; -{ - if ( ! r ) - return -1; - - if (mem_info_is_on()) { - mem_bytes(TYPE_SPROW,sizeof(SPROW),0); - mem_numvar(TYPE_SPROW,-1); - } - - if ( r->elt ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_SPROW,r->maxlen*sizeof(row_elt),0); - } - free((char *)r->elt); - } - free((char *)r); - return 0; -} - - -/* sprow_merge -- merges r1 and r2 into r_out - -- cannot be done in-situ - -- type must be SPMAT or SPROW depending on - whether r_out is a row of a SPMAT structure - or a SPROW variable - -- returns r_out */ -SPROW *sprow_merge(r1,r2,r_out,type) -SPROW *r1, *r2, *r_out; -int type; -{ - int idx1, idx2, idx_out, len1, len2, len_out; - row_elt *elt1, *elt2, *elt_out; - - if ( ! r1 || ! r2 ) - error(E_NULL,"sprow_merge"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - if ( r1 == r_out || r2 == r_out ) - error(E_INSITU,"sprow_merge"); - - /* Initialise */ - len1 = r1->len; len2 = r2->len; len_out = r_out->maxlen; - idx1 = idx2 = idx_out = 0; - elt1 = r1->elt; elt2 = r2->elt; elt_out = r_out->elt; - - while ( idx1 < len1 || idx2 < len2 ) - { - if ( idx_out >= len_out ) - { /* r_out is too small */ - r_out->len = idx_out; - r_out = sprow_xpd(r_out,0,type); - len_out = r_out->len; - elt_out = &(r_out->elt[idx_out]); - } - if ( idx2 >= len2 || (idx1 < len1 && elt1->col <= elt2->col) ) - { - elt_out->col = elt1->col; - elt_out->val = elt1->val; - if ( elt1->col == elt2->col && idx2 < len2 ) - { elt2++; idx2++; } - elt1++; idx1++; - } - else - { - elt_out->col = elt2->col; - elt_out->val = elt2->val; - elt2++; idx2++; - } - elt_out++; idx_out++; - } - r_out->len = idx_out; - - return r_out; -} - -/* sprow_copy -- copies r1 and r2 into r_out - -- cannot be done in-situ - -- type must be SPMAT or SPROW depending on - whether r_out is a row of a SPMAT structure - or a SPROW variable - -- returns r_out */ -SPROW *sprow_copy(r1,r2,r_out,type) -SPROW *r1, *r2, *r_out; -int type; -{ - int idx1, idx2, idx_out, len1, len2, len_out; - row_elt *elt1, *elt2, *elt_out; - - if ( ! r1 || ! r2 ) - error(E_NULL,"sprow_copy"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - if ( r1 == r_out || r2 == r_out ) - error(E_INSITU,"sprow_copy"); - - /* Initialise */ - len1 = r1->len; len2 = r2->len; len_out = r_out->maxlen; - idx1 = idx2 = idx_out = 0; - elt1 = r1->elt; elt2 = r2->elt; elt_out = r_out->elt; - - while ( idx1 < len1 || idx2 < len2 ) - { - while ( idx_out >= len_out ) - { /* r_out is too small */ - r_out->len = idx_out; - r_out = sprow_xpd(r_out,0,type); - len_out = r_out->maxlen; - elt_out = &(r_out->elt[idx_out]); - } - if ( idx2 >= len2 || (idx1 < len1 && elt1->col <= elt2->col) ) - { - elt_out->col = elt1->col; - elt_out->val = elt1->val; - if ( elt1->col == elt2->col && idx2 < len2 ) - { elt2++; idx2++; } - elt1++; idx1++; - } - else - { - elt_out->col = elt2->col; - elt_out->val = 0.0; - elt2++; idx2++; - } - elt_out++; idx_out++; - } - r_out->len = idx_out; - - return r_out; -} - -/* sprow_mltadd -- sets r_out <- r1 + alpha.r2 - -- cannot be in situ - -- only for columns j0, j0+1, ... - -- type must be SPMAT or SPROW depending on - whether r_out is a row of a SPMAT structure - or a SPROW variable - -- returns r_out */ -SPROW *sprow_mltadd(r1,r2,alpha,j0,r_out,type) -SPROW *r1, *r2, *r_out; -double alpha; -int j0, type; -{ - int idx1, idx2, idx_out, len1, len2, len_out; - row_elt *elt1, *elt2, *elt_out; - - if ( ! r1 || ! r2 ) - error(E_NULL,"sprow_mltadd"); - if ( r1 == r_out || r2 == r_out ) - error(E_INSITU,"sprow_mltadd"); - if ( j0 < 0 ) - error(E_BOUNDS,"sprow_mltadd"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - - /* Initialise */ - len1 = r1->len; len2 = r2->len; len_out = r_out->maxlen; - /* idx1 = idx2 = idx_out = 0; */ - idx1 = sprow_idx(r1,j0); - idx2 = sprow_idx(r2,j0); - idx_out = sprow_idx(r_out,j0); - idx1 = (idx1 < 0) ? -(idx1+2) : idx1; - idx2 = (idx2 < 0) ? -(idx2+2) : idx2; - idx_out = (idx_out < 0) ? -(idx_out+2) : idx_out; - elt1 = &(r1->elt[idx1]); - elt2 = &(r2->elt[idx2]); - elt_out = &(r_out->elt[idx_out]); - - while ( idx1 < len1 || idx2 < len2 ) - { - if ( idx_out >= len_out ) - { /* r_out is too small */ - r_out->len = idx_out; - r_out = sprow_xpd(r_out,0,type); - len_out = r_out->maxlen; - elt_out = &(r_out->elt[idx_out]); - } - if ( idx2 >= len2 || (idx1 < len1 && elt1->col <= elt2->col) ) - { - elt_out->col = elt1->col; - elt_out->val = elt1->val; - if ( idx2 < len2 && elt1->col == elt2->col ) - { - elt_out->val += alpha*elt2->val; - elt2++; idx2++; - } - elt1++; idx1++; - } - else - { - elt_out->col = elt2->col; - elt_out->val = alpha*elt2->val; - elt2++; idx2++; - } - elt_out++; idx_out++; - } - r_out->len = idx_out; - - return r_out; -} - -/* sprow_add -- sets r_out <- r1 + r2 - -- cannot be in situ - -- only for columns j0, j0+1, ... - -- type must be SPMAT or SPROW depending on - whether r_out is a row of a SPMAT structure - or a SPROW variable - -- returns r_out */ -SPROW *sprow_add(r1,r2,j0,r_out,type) -SPROW *r1, *r2, *r_out; -int j0, type; -{ - int idx1, idx2, idx_out, len1, len2, len_out; - row_elt *elt1, *elt2, *elt_out; - - if ( ! r1 || ! r2 ) - error(E_NULL,"sprow_add"); - if ( r1 == r_out || r2 == r_out ) - error(E_INSITU,"sprow_add"); - if ( j0 < 0 ) - error(E_BOUNDS,"sprow_add"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - - /* Initialise */ - len1 = r1->len; len2 = r2->len; len_out = r_out->maxlen; - /* idx1 = idx2 = idx_out = 0; */ - idx1 = sprow_idx(r1,j0); - idx2 = sprow_idx(r2,j0); - idx_out = sprow_idx(r_out,j0); - idx1 = (idx1 < 0) ? -(idx1+2) : idx1; - idx2 = (idx2 < 0) ? -(idx2+2) : idx2; - idx_out = (idx_out < 0) ? -(idx_out+2) : idx_out; - elt1 = &(r1->elt[idx1]); - elt2 = &(r2->elt[idx2]); - elt_out = &(r_out->elt[idx_out]); - - while ( idx1 < len1 || idx2 < len2 ) - { - if ( idx_out >= len_out ) - { /* r_out is too small */ - r_out->len = idx_out; - r_out = sprow_xpd(r_out,0,type); - len_out = r_out->maxlen; - elt_out = &(r_out->elt[idx_out]); - } - if ( idx2 >= len2 || (idx1 < len1 && elt1->col <= elt2->col) ) - { - elt_out->col = elt1->col; - elt_out->val = elt1->val; - if ( idx2 < len2 && elt1->col == elt2->col ) - { - elt_out->val += elt2->val; - elt2++; idx2++; - } - elt1++; idx1++; - } - else - { - elt_out->col = elt2->col; - elt_out->val = elt2->val; - elt2++; idx2++; - } - elt_out++; idx_out++; - } - r_out->len = idx_out; - - return r_out; -} - -/* sprow_sub -- sets r_out <- r1 - r2 - -- cannot be in situ - -- only for columns j0, j0+1, ... - -- type must be SPMAT or SPROW depending on - whether r_out is a row of a SPMAT structure - or a SPROW variable - -- returns r_out */ -SPROW *sprow_sub(r1,r2,j0,r_out,type) -SPROW *r1, *r2, *r_out; -int j0, type; -{ - int idx1, idx2, idx_out, len1, len2, len_out; - row_elt *elt1, *elt2, *elt_out; - - if ( ! r1 || ! r2 ) - error(E_NULL,"sprow_sub"); - if ( r1 == r_out || r2 == r_out ) - error(E_INSITU,"sprow_sub"); - if ( j0 < 0 ) - error(E_BOUNDS,"sprow_sub"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - - /* Initialise */ - len1 = r1->len; len2 = r2->len; len_out = r_out->maxlen; - /* idx1 = idx2 = idx_out = 0; */ - idx1 = sprow_idx(r1,j0); - idx2 = sprow_idx(r2,j0); - idx_out = sprow_idx(r_out,j0); - idx1 = (idx1 < 0) ? -(idx1+2) : idx1; - idx2 = (idx2 < 0) ? -(idx2+2) : idx2; - idx_out = (idx_out < 0) ? -(idx_out+2) : idx_out; - elt1 = &(r1->elt[idx1]); - elt2 = &(r2->elt[idx2]); - elt_out = &(r_out->elt[idx_out]); - - while ( idx1 < len1 || idx2 < len2 ) - { - if ( idx_out >= len_out ) - { /* r_out is too small */ - r_out->len = idx_out; - r_out = sprow_xpd(r_out,0,type); - len_out = r_out->maxlen; - elt_out = &(r_out->elt[idx_out]); - } - if ( idx2 >= len2 || (idx1 < len1 && elt1->col <= elt2->col) ) - { - elt_out->col = elt1->col; - elt_out->val = elt1->val; - if ( idx2 < len2 && elt1->col == elt2->col ) - { - elt_out->val -= elt2->val; - elt2++; idx2++; - } - elt1++; idx1++; - } - else - { - elt_out->col = elt2->col; - elt_out->val = -elt2->val; - elt2++; idx2++; - } - elt_out++; idx_out++; - } - r_out->len = idx_out; - - return r_out; -} - - -/* sprow_smlt -- sets r_out <- alpha*r1 - -- can be in situ - -- only for columns j0, j0+1, ... - -- returns r_out */ -SPROW *sprow_smlt(r1,alpha,j0,r_out,type) -SPROW *r1, *r_out; -double alpha; -int j0, type; -{ - int idx1, idx_out, len1; - row_elt *elt1, *elt_out; - - if ( ! r1 ) - error(E_NULL,"sprow_smlt"); - if ( j0 < 0 ) - error(E_BOUNDS,"sprow_smlt"); - if ( ! r_out ) - r_out = sprow_get(MINROWLEN); - - /* Initialise */ - len1 = r1->len; - idx1 = sprow_idx(r1,j0); - idx_out = sprow_idx(r_out,j0); - idx1 = (idx1 < 0) ? -(idx1+2) : idx1; - idx_out = (idx_out < 0) ? -(idx_out+2) : idx_out; - elt1 = &(r1->elt[idx1]); - - r_out = sprow_resize(r_out,idx_out+len1-idx1,type); - elt_out = &(r_out->elt[idx_out]); - - for ( ; idx1 < len1; elt1++,elt_out++,idx1++,idx_out++ ) - { - elt_out->col = elt1->col; - elt_out->val = alpha*elt1->val; - } - - r_out->len = idx_out; - - return r_out; -} - - -/* sprow_foutput -- print a representation of r on stream fp */ -void sprow_foutput(fp,r) -FILE *fp; -SPROW *r; -{ - int i, len; - row_elt *e; - - if ( ! r ) - { - fprintf(fp,"SparseRow: **** NULL ****\n"); - return; - } - len = r->len; - fprintf(fp,"SparseRow: length: %d\n",len); - for ( i = 0, e = r->elt; i < len; i++, e++ ) - fprintf(fp,"Column %d: %g, next row: %d, next index %d\n", - e->col, e->val, e->nxt_row, e->nxt_idx); -} - - -/* sprow_set_val -- sets the j-th column entry of the sparse row r - -- Note: destroys the usual column & row access paths */ -double sprow_set_val(r,j,val) -SPROW *r; -int j; -double val; -{ - int idx, idx2, new_len; - - if ( ! r ) - error(E_NULL,"sprow_set_val"); - - idx = sprow_idx(r,j); - if ( idx >= 0 ) - { r->elt[idx].val = val; return val; } - /* else */ if ( idx < -1 ) - { - /* shift & insert new value */ - idx = -(idx+2); /* this is the intended insertion index */ - if ( r->len >= r->maxlen ) - { - r->len = r->maxlen; - new_len = max(2*r->maxlen+1,5); - if (mem_info_is_on()) { - mem_bytes(TYPE_SPROW,r->maxlen*sizeof(row_elt), - new_len*sizeof(row_elt)); - } - - r->elt = RENEW(r->elt,new_len,row_elt); - if ( ! r->elt ) /* can't allocate */ - error(E_MEM,"sprow_set_val"); - r->maxlen = 2*r->maxlen+1; - } - for ( idx2 = r->len-1; idx2 >= idx; idx2-- ) - MEM_COPY((char *)(&(r->elt[idx2])), - (char *)(&(r->elt[idx2+1])),sizeof(row_elt)); - /************************************************************ - if ( idx < r->len ) - MEM_COPY((char *)(&(r->elt[idx])),(char *)(&(r->elt[idx+1])), - (r->len-idx)*sizeof(row_elt)); - ************************************************************/ - r->len++; - r->elt[idx].col = j; - r->elt[idx].nxt_row = -1; - r->elt[idx].nxt_idx = -1; - return r->elt[idx].val = val; - } - /* else -- idx == -1, error in index/matrix! */ - return 0.0; -} - - diff --git a/src/mesch/spswap.c b/src/mesch/spswap.c deleted file mode 100755 index ecf6025580..0000000000 --- a/src/mesch/spswap.c +++ /dev/null @@ -1,303 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Sparse matrix swap and permutation routines - Modified Mon 09th Nov 1992, 08:50:54 PM - to use Karen George's suggestion to use unordered rows -*/ - -static char rcsid[] = "spswap.c,v 1.1 1997/12/04 17:55:54 hines Exp"; - -#include -#include "sparse2.h" -#include - - -#define btos(x) ((x) ? "TRUE" : "FALSE") - -/* scan_to -- updates scan (int) vectors to point to the last row in each - column with row # <= max_row, if any */ -void scan_to(A, scan_row, scan_idx, col_list, max_row) -SPMAT *A; -IVEC *scan_row, *scan_idx, *col_list; -int max_row; -{ - int col, idx, j_idx, row_num; - SPROW *r; - row_elt *e; - - if ( ! A || ! scan_row || ! scan_idx || ! col_list ) - error(E_NULL,"scan_to"); - if ( scan_row->dim != scan_idx->dim || scan_idx->dim != col_list->dim ) - error(E_SIZES,"scan_to"); - - if ( max_row < 0 ) - return; - - if ( ! A->flag_col ) - sp_col_access(A); - - for ( j_idx = 0; j_idx < scan_row->dim; j_idx++ ) - { - row_num = scan_row->ive[j_idx]; - idx = scan_idx->ive[j_idx]; - col = col_list->ive[j_idx]; - - if ( col < 0 || col >= A->n ) - error(E_BOUNDS,"scan_to"); - if ( row_num < 0 ) - { - idx = col; - continue; - } - r = &(A->row[row_num]); - if ( idx < 0 ) - error(E_INTERN,"scan_to"); - e = &(r->elt[idx]); - if ( e->col != col ) - error(E_INTERN,"scan_to"); - if ( idx < 0 ) - { - printf("scan_to: row_num = %d, idx = %d, col = %d\n", - row_num, idx, col); - error(E_INTERN,"scan_to"); - } - /* if ( e->nxt_row <= max_row ) - chase_col(A, col, &row_num, &idx, max_row); */ - while ( e->nxt_row >= 0 && e->nxt_row <= max_row ) - { - row_num = e->nxt_row; - idx = e->nxt_idx; - e = &(A->row[row_num].elt[idx]); - } - - /* printf("scan_to: computed j_idx = %d, row_num = %d, idx = %d\n", - j_idx, row_num, idx); */ - scan_row->ive[j_idx] = row_num; - scan_idx->ive[j_idx] = idx; - } -} - -/* patch_col -- patches column access paths for fill-in */ -void patch_col(A, col, old_row, old_idx, row_num, idx) -SPMAT *A; -int col, old_row, old_idx, row_num, idx; -{ - SPROW *r; - row_elt *e; - - if ( old_row >= 0 ) - { - r = &(A->row[old_row]); - old_idx = sprow_idx2(r,col,old_idx); - e = &(r->elt[old_idx]); - e->nxt_row = row_num; - e->nxt_idx = idx; - } - else - { - A->start_row[col] = row_num; - A->start_idx[col] = idx; - } -} - -/* chase_col -- chases column access path in column col, starting with - row_num and idx, to find last row # in this column <= max_row - -- row_num is returned; idx is also set by this routine - -- assumes that the column access paths (possibly without the - nxt_idx fields) are set up */ -row_elt *chase_col(A, col, row_num, idx, max_row) -SPMAT *A; -int col, *row_num, *idx, max_row; -{ - int old_idx, old_row, tmp_idx, tmp_row; - SPROW *r; - row_elt *e=0; - - if ( col < 0 || col >= A->n ) - error(E_BOUNDS,"chase_col"); - tmp_row = *row_num; - if ( tmp_row < 0 ) - { - if ( A->start_row[col] > max_row ) - { - tmp_row = -1; - tmp_idx = col; - return (row_elt *)NULL; - } - else - { - tmp_row = A->start_row[col]; - tmp_idx = A->start_idx[col]; - } - } - else - tmp_idx = *idx; - - old_row = tmp_row; - old_idx = tmp_idx; - while ( tmp_row >= 0 && tmp_row < max_row ) - { - r = &(A->row[tmp_row]); - /* tmp_idx = sprow_idx2(r,col,tmp_idx); */ - if ( tmp_idx < 0 || tmp_idx >= r->len || - r->elt[tmp_idx].col != col ) - { -#ifdef DEBUG - printf("chase_col:error: col = %d, row # = %d, idx = %d\n", - col, tmp_row, tmp_idx); - printf("chase_col:error: old_row = %d, old_idx = %d\n", - old_row, old_idx); - printf("chase_col:error: A =\n"); - sp_dump(stdout,A); -#endif - error(E_INTERN,"chase_col"); - } - e = &(r->elt[tmp_idx]); - old_row = tmp_row; - old_idx = tmp_idx; - tmp_row = e->nxt_row; - tmp_idx = e->nxt_idx; - } - if ( old_row > max_row ) - { - old_row = -1; - old_idx = col; - e = (row_elt *)NULL; - } - else if ( tmp_row <= max_row && tmp_row >= 0 ) - { - old_row = tmp_row; - old_idx = tmp_idx; - } - - *row_num = old_row; - if ( old_row >= 0 ) - *idx = old_idx; - else - *idx = col; - - return e; -} - -/* chase_past -- as for chase_col except that we want the first - row whose row # >= min_row; -1 indicates no such row */ -row_elt *chase_past(A, col, row_num, idx, min_row) -SPMAT *A; -int col, *row_num, *idx, min_row; -{ - SPROW *r; - row_elt *e; - int tmp_idx, tmp_row; - - tmp_row = *row_num; - tmp_idx = *idx; - chase_col(A,col,&tmp_row,&tmp_idx,min_row); - if ( tmp_row < 0 ) /* use A->start_row[..] etc. */ - { - if ( A->start_row[col] < 0 ) - tmp_row = -1; - else - { - tmp_row = A->start_row[col]; - tmp_idx = A->start_idx[col]; - } - } - else if ( tmp_row < min_row ) - { - r = &(A->row[tmp_row]); - if ( tmp_idx < 0 || tmp_idx >= r->len || - r->elt[tmp_idx].col != col ) - error(E_INTERN,"chase_past"); - tmp_row = r->elt[tmp_idx].nxt_row; - tmp_idx = r->elt[tmp_idx].nxt_idx; - } - - *row_num = tmp_row; - *idx = tmp_idx; - if ( tmp_row < 0 ) - e = (row_elt *)NULL; - else - { - if ( tmp_idx < 0 || tmp_idx >= A->row[tmp_row].len || - A->row[tmp_row].elt[tmp_idx].col != col ) - error(E_INTERN,"bump_col"); - e = &(A->row[tmp_row].elt[tmp_idx]); - } - - return e; -} - -/* bump_col -- move along to next nonzero entry in column col after row_num - -- update row_num and idx */ -row_elt *bump_col(A, col, row_num, idx) -SPMAT *A; -int col, *row_num, *idx; -{ - SPROW *r; - row_elt *e; - int tmp_row, tmp_idx; - - tmp_row = *row_num; - tmp_idx = *idx; - /* printf("bump_col: col = %d, row# = %d, idx = %d\n", - col, *row_num, *idx); */ - if ( tmp_row < 0 ) - { - tmp_row = A->start_row[col]; - tmp_idx = A->start_idx[col]; - } - else - { - r = &(A->row[tmp_row]); - if ( tmp_idx < 0 || tmp_idx >= r->len || - r->elt[tmp_idx].col != col ) - error(E_INTERN,"bump_col"); - e = &(r->elt[tmp_idx]); - tmp_row = e->nxt_row; - tmp_idx = e->nxt_idx; - } - if ( tmp_row < 0 ) - { - e = (row_elt *)NULL; - tmp_idx = col; - } - else - { - if ( tmp_idx < 0 || tmp_idx >= A->row[tmp_row].len || - A->row[tmp_row].elt[tmp_idx].col != col ) - error(E_INTERN,"bump_col"); - e = &(A->row[tmp_row].elt[tmp_idx]); - } - *row_num = tmp_row; - *idx = tmp_idx; - - return e; -} - - diff --git a/src/mesch/submat.c b/src/mesch/submat.c deleted file mode 100755 index 9e75e87882..0000000000 --- a/src/mesch/submat.c +++ /dev/null @@ -1,179 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* 1.2 submat.c 11/25/87 */ - -#include -#include "matrix.h" - -static char rcsid[] = "submat.c,v 1.1 1997/12/04 17:55:55 hines Exp"; - - -/* get_col -- gets a specified column of a matrix and retruns it as a vector */ -VEC *get_col(mat,col,vec) -u_int col; -MAT *mat; -VEC *vec; -{ - u_int i; - - if ( mat==(MAT *)NULL ) - error(E_NULL,"get_col"); - if ( col >= mat->n ) - error(E_RANGE,"get_col"); - if ( vec==(VEC *)NULL || vec->dimm ) - vec = v_resize(vec,mat->m); - - for ( i=0; im; i++ ) - vec->ve[i] = mat->me[i][col]; - - return (vec); -} - -/* get_row -- gets a specified row of a matrix and retruns it as a vector */ -VEC *get_row(mat,row,vec) -u_int row; -MAT *mat; -VEC *vec; -{ - u_int i; - - if ( mat==(MAT *)NULL ) - error(E_NULL,"get_row"); - if ( row >= mat->m ) - error(E_RANGE,"get_row"); - if ( vec==(VEC *)NULL || vec->dimn ) - vec = v_resize(vec,mat->n); - - for ( i=0; in; i++ ) - vec->ve[i] = mat->me[row][i]; - - return (vec); -} - -/* _set_col -- sets column of matrix to values given in vec (in situ) */ -MAT *_set_col(mat,col,vec,i0) -MAT *mat; -VEC *vec; -u_int col,i0; -{ - u_int i,lim; - - if ( mat==(MAT *)NULL || vec==(VEC *)NULL ) - error(E_NULL,"_set_col"); - if ( col >= mat->n ) - error(E_RANGE,"_set_col"); - lim = min(mat->m,vec->dim); - for ( i=i0; ime[i][col] = vec->ve[i]; - - return (mat); -} - -/* _set_row -- sets row of matrix to values given in vec (in situ) */ -MAT *_set_row(mat,row,vec,j0) -MAT *mat; -VEC *vec; -u_int row,j0; -{ - u_int j,lim; - - if ( mat==(MAT *)NULL || vec==(VEC *)NULL ) - error(E_NULL,"_set_row"); - if ( row >= mat->m ) - error(E_RANGE,"_set_row"); - lim = min(mat->n,vec->dim); - for ( j=j0; jme[row][j] = vec->ve[j]; - - return (mat); -} - -/* sub_mat -- returns sub-matrix of old which is formed by the rectangle - from (row1,col1) to (row2,col2) - -- Note: storage is shared so that altering the "new" - matrix will alter the "old" matrix */ -MAT *sub_mat(old,row1,col1,row2,col2,new) -MAT *old,*new; -u_int row1,col1,row2,col2; -{ - u_int i; - - if ( old==(MAT *)NULL ) - error(E_NULL,"sub_mat"); - if ( row1 > row2 || col1 > col2 || row2 >= old->m || col2 >= old->n ) - error(E_RANGE,"sub_mat"); - if ( new==(MAT *)NULL || new->m < row2-row1+1 ) - { - new = NEW(MAT); - new->me = NEW_A(row2-row1+1,Real *); - if ( new==(MAT *)NULL || new->me==(Real **)NULL ) - error(E_MEM,"sub_mat"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_MAT,0,sizeof(MAT)+ - (row2-row1+1)*sizeof(Real *)); - } - - } - new->m = row2-row1+1; - - new->n = col2-col1+1; - - new->base = (Real *)NULL; - - for ( i=0; i < new->m; i++ ) - new->me[i] = (old->me[i+row1]) + col1; - - return (new); -} - - -/* sub_vec -- returns sub-vector which is formed by the elements i1 to i2 - -- as for sub_mat, storage is shared */ -VEC *sub_vec(old,i1,i2,new) -VEC *old, *new; -int i1, i2; -{ - if ( old == (VEC *)NULL ) - error(E_NULL,"sub_vec"); - if ( i1 > i2 || old->dim < i2 ) - error(E_RANGE,"sub_vec"); - - if ( new == (VEC *)NULL ) - new = NEW(VEC); - if ( new == (VEC *)NULL ) - error(E_MEM,"sub_vec"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_VEC,0,sizeof(VEC)); - } - - - new->dim = i2 - i1 + 1; - new->ve = &(old->ve[i1]); - - return new; -} diff --git a/src/mesch/svd.c b/src/mesch/svd.c deleted file mode 100755 index 2c7687476c..0000000000 --- a/src/mesch/svd.c +++ /dev/null @@ -1,404 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File containing routines for computing the SVD of matrices -*/ - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -static char rcsid[] = "svd.c,v 1.1 1997/12/04 17:55:56 hines Exp"; - - - -#define sgn(x) ((x) >= 0 ? 1 : -1) -#define MAX_STACK 100 - -/* fixsvd -- fix minor details about SVD - -- make singular values non-negative - -- sort singular values in decreasing order - -- variables as for bisvd() - -- no argument checking */ -static void fixsvd(d,U,V) -VEC *d; -MAT *U, *V; -{ - int i, j, k, l, r, stack[MAX_STACK], sp; - Real tmp, v; - - /* make singular values non-negative */ - for ( i = 0; i < d->dim; i++ ) - if ( d->ve[i] < 0.0 ) - { - d->ve[i] = - d->ve[i]; - if ( U != MNULL ) - for ( j = 0; j < U->m; j++ ) - U->me[i][j] = - U->me[i][j]; - } - - /* sort singular values */ - /* nonrecursive implementation of quicksort due to R.Sedgewick, - "Algorithms in C", p. 122 (1990) */ - sp = -1; - l = 0; r = d->dim - 1; - for ( ; ; ) - { - while ( r > l ) - { - /* i = partition(d->ve,l,r) */ - v = d->ve[r]; - - i = l - 1; j = r; - for ( ; ; ) - { /* inequalities are "backwards" for **decreasing** order */ - while ( d->ve[++i] > v ) - ; - while ( i < j && d->ve[--j] < v ) - ; - if ( i >= j ) - break; - /* swap entries in d->ve */ - tmp = d->ve[i]; d->ve[i] = d->ve[j]; d->ve[j] = tmp; - /* swap rows of U & V as well */ - if ( U != MNULL ) - for ( k = 0; k < U->n; k++ ) - { - tmp = U->me[i][k]; - U->me[i][k] = U->me[j][k]; - U->me[j][k] = tmp; - } - if ( V != MNULL ) - for ( k = 0; k < V->n; k++ ) - { - tmp = V->me[i][k]; - V->me[i][k] = V->me[j][k]; - V->me[j][k] = tmp; - } - } - tmp = d->ve[i]; d->ve[i] = d->ve[r]; d->ve[r] = tmp; - if ( U != MNULL ) - for ( k = 0; k < U->n; k++ ) - { - tmp = U->me[i][k]; - U->me[i][k] = U->me[r][k]; - U->me[r][k] = tmp; - } - if ( V != MNULL ) - for ( k = 0; k < V->n; k++ ) - { - tmp = V->me[i][k]; - V->me[i][k] = V->me[r][k]; - V->me[r][k] = tmp; - } - /* end i = partition(...) */ - if ( i - l > r - i ) - { stack[++sp] = l; stack[++sp] = i-1; l = i+1; } - else - { stack[++sp] = i+1; stack[++sp] = r; r = i-1; } - } - if ( sp < 0 ) - break; - r = stack[sp--]; l = stack[sp--]; - } -} - - -/* bisvd -- svd of a bidiagonal m x n matrix represented by d (diagonal) and - f (super-diagonals) - -- returns with d set to the singular values, f zeroed - -- if U, V non-NULL, the orthogonal operations are accumulated - in U, V; if U, V == I on entry, then SVD == U^T.A.V - where A is initial matrix - -- returns d on exit */ -VEC *bisvd(d,f,U,V) -VEC *d, *f; -MAT *U, *V; -{ - int i, j, n; - int i_min, i_max, split; - Real c, s, shift, size, z; - Real d_tmp, diff, t11, t12, t22, *d_ve, *f_ve; - - if ( ! d || ! f ) - error(E_NULL,"bisvd"); - if ( d->dim != f->dim + 1 ) - error(E_SIZES,"bisvd"); - n = d->dim; - if ( ( U && U->n < n ) || ( V && V->m < n ) ) - error(E_SIZES,"bisvd"); - if ( ( U && U->m != U->n ) || ( V && V->m != V->n ) ) - error(E_SQUARE,"bisvd"); - - - if ( n == 1 ) - return d; - d_ve = d->ve; f_ve = f->ve; - - size = v_norm_inf(d) + v_norm_inf(f); - - i_min = 0; - while ( i_min < n ) /* outer while loop */ - { - /* find i_max to suit; - submatrix i_min..i_max should be irreducible */ - i_max = n - 1; - for ( i = i_min; i < n - 1; i++ ) - if ( d_ve[i] == 0.0 || f_ve[i] == 0.0 ) - { i_max = i; - if ( f_ve[i] != 0.0 ) - { - /* have to ``chase'' f[i] element out of matrix */ - z = f_ve[i]; f_ve[i] = 0.0; - for ( j = i; j < n-1 && z != 0.0; j++ ) - { - givens(d_ve[j+1],z, &c, &s); - s = -s; - d_ve[j+1] = c*d_ve[j+1] - s*z; - if ( j+1 < n-1 ) - { - z = s*f_ve[j+1]; - f_ve[j+1] = c*f_ve[j+1]; - } - if ( U ) - rot_rows(U,i,j+1,c,s,U); - } - } - break; - } - if ( i_max <= i_min ) - { - i_min = i_max + 1; - continue; - } - /* printf("bisvd: i_min = %d, i_max = %d\n",i_min,i_max); */ - - split = FALSE; - while ( ! split ) - { - /* compute shift */ - t11 = d_ve[i_max-1]*d_ve[i_max-1] + - (i_max > i_min+1 ? f_ve[i_max-2]*f_ve[i_max-2] : 0.0); - t12 = d_ve[i_max-1]*f_ve[i_max-1]; - t22 = d_ve[i_max]*d_ve[i_max] + f_ve[i_max-1]*f_ve[i_max-1]; - /* use e-val of [[t11,t12],[t12,t22]] matrix - closest to t22 */ - diff = (t11-t22)/2; - shift = t22 - t12*t12/(diff + - sgn(diff)*sqrt(diff*diff+t12*t12)); - - /* initial Givens' rotation */ - givens(d_ve[i_min]*d_ve[i_min]-shift, - d_ve[i_min]*f_ve[i_min], &c, &s); - - /* do initial Givens' rotations */ - d_tmp = c*d_ve[i_min] + s*f_ve[i_min]; - f_ve[i_min] = c*f_ve[i_min] - s*d_ve[i_min]; - d_ve[i_min] = d_tmp; - z = s*d_ve[i_min+1]; - d_ve[i_min+1] = c*d_ve[i_min+1]; - if ( V ) - rot_rows(V,i_min,i_min+1,c,s,V); - /* 2nd Givens' rotation */ - givens(d_ve[i_min],z, &c, &s); - d_ve[i_min] = c*d_ve[i_min] + s*z; - d_tmp = c*d_ve[i_min+1] - s*f_ve[i_min]; - f_ve[i_min] = s*d_ve[i_min+1] + c*f_ve[i_min]; - d_ve[i_min+1] = d_tmp; - if ( i_min+1 < i_max ) - { - z = s*f_ve[i_min+1]; - f_ve[i_min+1] = c*f_ve[i_min+1]; - } - if ( U ) - rot_rows(U,i_min,i_min+1,c,s,U); - - for ( i = i_min+1; i < i_max; i++ ) - { - /* get Givens' rotation for zeroing z */ - givens(f_ve[i-1],z, &c, &s); - f_ve[i-1] = c*f_ve[i-1] + s*z; - d_tmp = c*d_ve[i] + s*f_ve[i]; - f_ve[i] = c*f_ve[i] - s*d_ve[i]; - d_ve[i] = d_tmp; - z = s*d_ve[i+1]; - d_ve[i+1] = c*d_ve[i+1]; - if ( V ) - rot_rows(V,i,i+1,c,s,V); - /* get 2nd Givens' rotation */ - givens(d_ve[i],z, &c, &s); - d_ve[i] = c*d_ve[i] + s*z; - d_tmp = c*d_ve[i+1] - s*f_ve[i]; - f_ve[i] = c*f_ve[i] + s*d_ve[i+1]; - d_ve[i+1] = d_tmp; - if ( i+1 < i_max ) - { - z = s*f_ve[i+1]; - f_ve[i+1] = c*f_ve[i+1]; - } - if ( U ) - rot_rows(U,i,i+1,c,s,U); - } - /* should matrix be split? */ - for ( i = i_min; i < i_max; i++ ) - if ( fabs(f_ve[i]) < - MACHEPS*(fabs(d_ve[i])+fabs(d_ve[i+1])) ) - { - split = TRUE; - f_ve[i] = 0.0; - } - else if ( fabs(d_ve[i]) < MACHEPS*size ) - { - split = TRUE; - d_ve[i] = 0.0; - } - /* printf("bisvd: d =\n"); v_output(d); */ - /* printf("bisvd: f = \n"); v_output(f); */ - } - } - fixsvd(d,U,V); - - return d; -} - -/* bifactor -- perform preliminary factorisation for bisvd - -- updates U and/or V, which ever is not NULL */ -MAT *bifactor(A,U,V) -MAT *A, *U, *V; -{ - int k; - static VEC *tmp1=VNULL, *tmp2=VNULL; - Real beta; - - if ( ! A ) - error(E_NULL,"bifactor"); - if ( ( U && ( U->m != U->n ) ) || ( V && ( V->m != V->n ) ) ) - error(E_SQUARE,"bifactor"); - if ( ( U && U->m != A->m ) || ( V && V->m != A->n ) ) - error(E_SIZES,"bifactor"); - tmp1 = v_resize(tmp1,A->m); - tmp2 = v_resize(tmp2,A->n); - MEM_STAT_REG(tmp1,TYPE_VEC); - MEM_STAT_REG(tmp2,TYPE_VEC); - - if ( A->m >= A->n ) - for ( k = 0; k < A->n; k++ ) - { - get_col(A,k,tmp1); - hhvec(tmp1,k,&beta,tmp1,&(A->me[k][k])); - hhtrcols(A,k,k+1,tmp1,beta); - if ( U ) - hhtrcols(U,k,0,tmp1,beta); - if ( k+1 >= A->n ) - continue; - get_row(A,k,tmp2); - hhvec(tmp2,k+1,&beta,tmp2,&(A->me[k][k+1])); - hhtrrows(A,k+1,k+1,tmp2,beta); - if ( V ) - hhtrcols(V,k+1,0,tmp2,beta); - } - else - for ( k = 0; k < A->m; k++ ) - { - get_row(A,k,tmp2); - hhvec(tmp2,k,&beta,tmp2,&(A->me[k][k])); - hhtrrows(A,k+1,k,tmp2,beta); - if ( V ) - hhtrcols(V,k,0,tmp2,beta); - if ( k+1 >= A->m ) - continue; - get_col(A,k,tmp1); - hhvec(tmp1,k+1,&beta,tmp1,&(A->me[k+1][k])); - hhtrcols(A,k+1,k+1,tmp1,beta); - if ( U ) - hhtrcols(U,k+1,0,tmp1,beta); - } - - return A; -} - -/* svd -- returns vector of singular values in d - -- also updates U and/or V, if one or the other is non-NULL - -- destroys A */ -VEC *svd(A,U,V,d) -MAT *A, *U, *V; -VEC *d; -{ - static VEC *f=VNULL; - int i, limit; - MAT *A_tmp; - - if ( ! A ) - error(E_NULL,"svd"); - if ( ( U && ( U->m != U->n ) ) || ( V && ( V->m != V->n ) ) ) - error(E_SQUARE,"svd"); - if ( ( U && U->m != A->m ) || ( V && V->m != A->n ) ) - error(E_SIZES,"svd"); - - A_tmp = m_copy(A,MNULL); - if ( U != MNULL ) - m_ident(U); - if ( V != MNULL ) - m_ident(V); - limit = min(A_tmp->m,A_tmp->n); - d = v_resize(d,limit); - if (f == VNULL && limit == 1) { /* some calloc's do not allow 0 size */ - f = v_resize(f, limit); - } - f = v_resize(f,limit-1); - MEM_STAT_REG(f,TYPE_VEC); - - bifactor(A_tmp,U,V); - if ( A_tmp->m >= A_tmp->n ) - for ( i = 0; i < limit; i++ ) - { - d->ve[i] = A_tmp->me[i][i]; - if ( i+1 < limit ) - f->ve[i] = A_tmp->me[i][i+1]; - } - else - for ( i = 0; i < limit; i++ ) - { - d->ve[i] = A_tmp->me[i][i]; - if ( i+1 < limit ) - f->ve[i] = A_tmp->me[i+1][i]; - } - - - if ( A_tmp->m >= A_tmp->n ) - bisvd(d,f,U,V); - else - bisvd(d,f,V,U); - - M_FREE(A_tmp); - - return d; -} - diff --git a/src/mesch/symmeig.c b/src/mesch/symmeig.c deleted file mode 100755 index c75a3ea443..0000000000 --- a/src/mesch/symmeig.c +++ /dev/null @@ -1,213 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File containing routines for symmetric eigenvalue problems -*/ - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - -static char rcsid[] = "symmeig.c,v 1.1 1997/12/04 17:55:57 hines Exp"; - - - -#define SQRT2 1.4142135623730949 -#define sgn(x) ( (x) >= 0 ? 1 : -1 ) - -/* trieig -- finds eigenvalues of symmetric tridiagonal matrices - -- matrix represented by a pair of vectors a (diag entries) - and b (sub- & super-diag entries) - -- eigenvalues in a on return */ -VEC *trieig(a,b,Q) -VEC *a, *b; -MAT *Q; -{ - int i, i_min, i_max, n, split; - Real *a_ve, *b_ve; - Real b_sqr, bk, ak1, bk1, ak2, bk2, z; - Real c, c2, cs, s, s2, d, mu; - - if ( ! a || ! b ) - error(E_NULL,"trieig"); - if ( a->dim != b->dim + 1 || ( Q && Q->m != a->dim ) ) - error(E_SIZES,"trieig"); - if ( Q && Q->m != Q->n ) - error(E_SQUARE,"trieig"); - - n = a->dim; - a_ve = a->ve; b_ve = b->ve; - - i_min = 0; - while ( i_min < n ) /* outer while loop */ - { - /* find i_max to suit; - submatrix i_min..i_max should be irreducible */ - i_max = n-1; - for ( i = i_min; i < n-1; i++ ) - if ( b_ve[i] == 0.0 ) - { i_max = i; break; } - if ( i_max <= i_min ) - { - /* printf("# i_min = %d, i_max = %d\n",i_min,i_max); */ - i_min = i_max + 1; - continue; /* outer while loop */ - } - - /* printf("# i_min = %d, i_max = %d\n",i_min,i_max); */ - - /* repeatedly perform QR method until matrix splits */ - split = FALSE; - while ( ! split ) /* inner while loop */ - { - - /* find Wilkinson shift */ - d = (a_ve[i_max-1] - a_ve[i_max])/2; - b_sqr = b_ve[i_max-1]*b_ve[i_max-1]; - mu = a_ve[i_max] - b_sqr/(d + sgn(d)*sqrt(d*d+b_sqr)); - /* printf("# Wilkinson shift = %g\n",mu); */ - - /* initial Givens' rotation */ - givens(a_ve[i_min]-mu,b_ve[i_min],&c,&s); - s = -s; - /* printf("# c = %g, s = %g\n",c,s); */ - if ( fabs(c) < SQRT2 ) - { c2 = c*c; s2 = 1-c2; } - else - { s2 = s*s; c2 = 1-s2; } - cs = c*s; - ak1 = c2*a_ve[i_min]+s2*a_ve[i_min+1]-2*cs*b_ve[i_min]; - bk1 = cs*(a_ve[i_min]-a_ve[i_min+1]) + - (c2-s2)*b_ve[i_min]; - ak2 = s2*a_ve[i_min]+c2*a_ve[i_min+1]+2*cs*b_ve[i_min]; - bk2 = ( i_min < i_max-1 ) ? c*b_ve[i_min+1] : 0.0; - z = ( i_min < i_max-1 ) ? -s*b_ve[i_min+1] : 0.0; - a_ve[i_min] = ak1; - a_ve[i_min+1] = ak2; - b_ve[i_min] = bk1; - if ( i_min < i_max-1 ) - b_ve[i_min+1] = bk2; - if ( Q ) - rot_cols(Q,i_min,i_min+1,c,-s,Q); - /* printf("# z = %g\n",z); */ - /* printf("# a [temp1] =\n"); v_output(a); */ - /* printf("# b [temp1] =\n"); v_output(b); */ - - for ( i = i_min+1; i < i_max; i++ ) - { - /* get Givens' rotation for sub-block -- k == i-1 */ - givens(b_ve[i-1],z,&c,&s); - s = -s; - /* printf("# c = %g, s = %g\n",c,s); */ - - /* perform Givens' rotation on sub-block */ - if ( fabs(c) < SQRT2 ) - { c2 = c*c; s2 = 1-c2; } - else - { s2 = s*s; c2 = 1-s2; } - cs = c*s; - bk = c*b_ve[i-1] - s*z; - ak1 = c2*a_ve[i]+s2*a_ve[i+1]-2*cs*b_ve[i]; - bk1 = cs*(a_ve[i]-a_ve[i+1]) + - (c2-s2)*b_ve[i]; - ak2 = s2*a_ve[i]+c2*a_ve[i+1]+2*cs*b_ve[i]; - bk2 = ( i+1 < i_max ) ? c*b_ve[i+1] : 0.0; - z = ( i+1 < i_max ) ? -s*b_ve[i+1] : 0.0; - a_ve[i] = ak1; a_ve[i+1] = ak2; - b_ve[i] = bk1; - if ( i < i_max-1 ) - b_ve[i+1] = bk2; - if ( i > i_min ) - b_ve[i-1] = bk; - if ( Q ) - rot_cols(Q,i,i+1,c,-s,Q); - /* printf("# a [temp2] =\n"); v_output(a); */ - /* printf("# b [temp2] =\n"); v_output(b); */ - } - - /* test to see if matrix should be split */ - for ( i = i_min; i < i_max; i++ ) - if ( fabs(b_ve[i]) < MACHEPS* - (fabs(a_ve[i])+fabs(a_ve[i+1])) ) - { b_ve[i] = 0.0; split = TRUE; } - - /* printf("# a =\n"); v_output(a); */ - /* printf("# b =\n"); v_output(b); */ - } - } - - return a; -} - -/* symmeig -- computes eigenvalues of a dense symmetric matrix - -- A **must** be symmetric on entry - -- eigenvalues stored in out - -- Q contains orthogonal matrix of eigenvectors - -- returns vector of eigenvalues */ -VEC *symmeig(A,Q,out) -MAT *A, *Q; -VEC *out; -{ - int i; - static MAT *tmp = MNULL; - static VEC *b = VNULL, *diag = VNULL, *beta = VNULL; - - if ( ! A ) - error(E_NULL,"symmeig"); - if ( A->m != A->n ) - error(E_SQUARE,"symmeig"); - if ( ! out || out->dim != A->m ) - out = v_resize(out,A->m); - - tmp = m_resize(tmp,A->m,A->n); - tmp = m_copy(A,tmp); - b = v_resize(b,A->m - 1); - diag = v_resize(diag,(u_int)A->m); - beta = v_resize(beta,(u_int)A->m); - MEM_STAT_REG(tmp,TYPE_MAT); - MEM_STAT_REG(b,TYPE_VEC); - MEM_STAT_REG(diag,TYPE_VEC); - MEM_STAT_REG(beta,TYPE_VEC); - - Hfactor(tmp,diag,beta); - if ( Q ) - makeHQ(tmp,diag,beta,Q); - - for ( i = 0; i < A->m - 1; i++ ) - { - out->ve[i] = tmp->me[i][i]; - b->ve[i] = tmp->me[i][i+1]; - } - out->ve[i] = tmp->me[i][i]; - trieig(out,b,Q); - - return out; -} - diff --git a/src/mesch/update.c b/src/mesch/update.c deleted file mode 100755 index bd125bc6a6..0000000000 --- a/src/mesch/update.c +++ /dev/null @@ -1,132 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. -*/ - -/* update.c 1.3 11/25/87 */ -static char rcsid[] = "update.c,v 1.1 1997/12/04 17:56:01 hines Exp"; - -#include -#include "matrix.h" -#include "matrix2.h" -#include - - - - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* LDLupdate -- updates a CHolesky factorisation, replacing LDL' by - MD~M' = LDL' + alpha.w.w' Note: w is overwritten - Ref: Gill et al Math Comp 28, p516 Algorithm C1 */ -MAT *LDLupdate(CHmat,w,alpha) -MAT *CHmat; -VEC *w; -double alpha; -{ - u_int i,j; - Real diag,new_diag,beta,p; - - if ( CHmat==(MAT *)NULL || w==(VEC *)NULL ) - error(E_NULL,"LDLupdate"); - if ( CHmat->m != CHmat->n || w->dim != CHmat->m ) - error(E_SIZES,"LDLupdate"); - - for ( j=0; j < w->dim; j++ ) - { - p = w->ve[j]; - diag = CHmat->me[j][j]; - new_diag = CHmat->me[j][j] = diag + alpha*p*p; - if ( new_diag <= 0.0 ) - error(E_POSDEF,"LDLupdate"); - beta = p*alpha/new_diag; - alpha *= diag/new_diag; - - for ( i=j+1; i < w->dim; i++ ) - { - w->ve[i] -= p*CHmat->me[i][j]; - CHmat->me[i][j] += beta*w->ve[i]; - CHmat->me[j][i] = CHmat->me[i][j]; - } - } - - return (CHmat); -} - - -/* QRupdate -- updates QR factorisation in expanded form (seperate matrices) - Finds Q+, R+ s.t. Q+.R+ = Q.(R+u.v') and Q+ orthogonal, R+ upper triang - Ref: Golub & van Loan Matrix Computations pp437-443 - -- does not update Q if it is NULL */ -MAT *QRupdate(Q,R,u,v) -MAT *Q,*R; -VEC *u,*v; -{ - int i,j,k; - Real c,s,temp; - - if ( ! R || ! u || ! v ) - error(E_NULL,"QRupdate"); - if ( ( Q && ( Q->m != Q->n || R->m != Q->n ) ) || - u->dim != R->m || v->dim != R->n ) - error(E_SIZES,"QRupdate"); - - /* find largest k s.t. u[k] != 0 */ - for ( k=R->m-1; k>=0; k-- ) - if ( u->ve[k] != 0.0 ) - break; - - /* transform R+u.v' to Hessenberg form */ - for ( i=k-1; i>=0; i-- ) - { - /* get Givens rotation */ - givens(u->ve[i],u->ve[i+1],&c,&s); - rot_rows(R,i,i+1,c,s,R); - if ( Q ) - rot_cols(Q,i,i+1,c,s,Q); - rot_vec(u,i,i+1,c,s,u); - } - - /* add into R */ - temp = u->ve[0]; - for ( j=0; jn; j++ ) - R->me[0][j] += temp*v->ve[j]; - - /* transform Hessenberg to upper triangular */ - for ( i=0; ime[i][i],R->me[i+1][i],&c,&s); - rot_rows(R,i,i+1,c,s,R); - if ( Q ) - rot_cols(Q,i,i+1,c,s,Q); - } - - return R; -} - diff --git a/src/mesch/vecop.c b/src/mesch/vecop.c deleted file mode 100755 index 9be822542b..0000000000 --- a/src/mesch/vecop.c +++ /dev/null @@ -1,606 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* vecop.c 1.3 8/18/87 */ - -#include -#include "matrix.h" - -static char rcsid[] = "vecop.c,v 1.1 1997/12/04 17:56:02 hines Exp"; - - -/* _in_prod -- inner product of two vectors from i0 downwards */ -double _in_prod(a,b,i0) -VEC *a,*b; -u_int i0; -{ - u_int limit; - /* Real *a_v, *b_v; */ - /* register Real sum; */ - - if ( a==(VEC *)NULL || b==(VEC *)NULL ) - error(E_NULL,"_in_prod"); - limit = min(a->dim,b->dim); - if ( i0 > limit ) - error(E_BOUNDS,"_in_prod"); - - return __ip__(&(a->ve[i0]),&(b->ve[i0]),(int)(limit-i0)); - /***************************************** - a_v = &(a->ve[i0]); b_v = &(b->ve[i0]); - for ( i=i0; idim != vector->dim ) - out = v_resize(out,vector->dim); - if ( scalar == 0.0 ) - return v_zero(out); - if ( scalar == 1.0 ) - return v_copy(vector,out); - - __smlt__(vector->ve,(double)scalar,out->ve,(int)(vector->dim)); - /************************************************** - dim = vector->dim; - out_ve = out->ve; vec_ve = vector->ve; - for ( i=0; ive[i] = scalar*vector->ve[i]; - (*out_ve++) = scalar*(*vec_ve++); - **************************************************/ - return (out); -} - -/* v_add -- vector addition -- may be in-situ */ -VEC *v_add(vec1,vec2,out) -VEC *vec1,*vec2,*out; -{ - u_int dim; - /* Real *out_ve, *vec1_ve, *vec2_ve; */ - - if ( vec1==(VEC *)NULL || vec2==(VEC *)NULL ) - error(E_NULL,"v_add"); - if ( vec1->dim != vec2->dim ) - error(E_SIZES,"v_add"); - if ( out==(VEC *)NULL || out->dim != vec1->dim ) - out = v_resize(out,vec1->dim); - dim = vec1->dim; - __add__(vec1->ve,vec2->ve,out->ve,(int)dim); - /************************************************************ - out_ve = out->ve; vec1_ve = vec1->ve; vec2_ve = vec2->ve; - for ( i=0; ive[i] = vec1->ve[i]+vec2->ve[i]; - (*out_ve++) = (*vec1_ve++) + (*vec2_ve++); - ************************************************************/ - - return (out); -} - -/* v_mltadd -- scalar/vector multiplication and addition - -- out = v1 + scale.v2 */ -VEC *v_mltadd(v1,v2,scale,out) -VEC *v1,*v2,*out; -double scale; -{ - /* register u_int dim, i; */ - /* Real *out_ve, *v1_ve, *v2_ve; */ - - if ( v1==(VEC *)NULL || v2==(VEC *)NULL ) - error(E_NULL,"v_mltadd"); - if ( v1->dim != v2->dim ) - error(E_SIZES,"v_mltadd"); - if ( scale == 0.0 ) - return v_copy(v1,out); - if ( scale == 1.0 ) - return v_add(v1,v2,out); - - if ( v2 != out ) - { - tracecatch(out = v_copy(v1,out),"v_mltadd"); - - /* dim = v1->dim; */ - __mltadd__(out->ve,v2->ve,scale,(int)(v1->dim)); - } - else - { - tracecatch(out = sv_mlt(scale,v2,out),"v_mltadd"); - out = v_add(v1,out,out); - } - /************************************************************ - out_ve = out->ve; v1_ve = v1->ve; v2_ve = v2->ve; - for ( i=0; i < dim ; i++ ) - out->ve[i] = v1->ve[i] + scale*v2->ve[i]; - (*out_ve++) = (*v1_ve++) + scale*(*v2_ve++); - ************************************************************/ - - return (out); -} - -/* v_sub -- vector subtraction -- may be in-situ */ -VEC *v_sub(vec1,vec2,out) -VEC *vec1,*vec2,*out; -{ - /* u_int i, dim; */ - /* Real *out_ve, *vec1_ve, *vec2_ve; */ - - if ( vec1==(VEC *)NULL || vec2==(VEC *)NULL ) - error(E_NULL,"v_sub"); - if ( vec1->dim != vec2->dim ) - error(E_SIZES,"v_sub"); - if ( out==(VEC *)NULL || out->dim != vec1->dim ) - out = v_resize(out,vec1->dim); - - __sub__(vec1->ve,vec2->ve,out->ve,(int)(vec1->dim)); - /************************************************************ - dim = vec1->dim; - out_ve = out->ve; vec1_ve = vec1->ve; vec2_ve = vec2->ve; - for ( i=0; ive[i] = vec1->ve[i]-vec2->ve[i]; - (*out_ve++) = (*vec1_ve++) - (*vec2_ve++); - ************************************************************/ - - return (out); -} - -/* v_map -- maps function f over components of x: out[i] = f(x[i]) - -- _v_map sets out[i] = f(params,x[i]) */ -VEC *v_map(f,x,out) -#ifdef PROTOTYPES_IN_STRUCT -double (*f)(double); -#else -double (*f)(); -#endif -VEC *x, *out; -{ - Real *x_ve, *out_ve; - int i, dim; - - if ( ! x || ! f ) - error(E_NULL,"v_map"); - if ( ! out || out->dim != x->dim ) - out = v_resize(out,x->dim); - - dim = x->dim; x_ve = x->ve; out_ve = out->ve; - for ( i = 0; i < dim; i++ ) - *out_ve++ = (*f)(*x_ve++); - - return out; -} - -VEC *_v_map(f,params,x,out) -#ifdef PROTOTYPES_IN_STRUCT -double (*f)(void *,double); -#else -double (*f)(); -#endif -VEC *x, *out; -void *params; -{ - Real *x_ve, *out_ve; - int i, dim; - - if ( ! x || ! f ) - error(E_NULL,"_v_map"); - if ( ! out || out->dim != x->dim ) - out = v_resize(out,x->dim); - - dim = x->dim; x_ve = x->ve; out_ve = out->ve; - for ( i = 0; i < dim; i++ ) - *out_ve++ = (*f)(params,*x_ve++); - - return out; -} - -/* v_lincomb -- returns sum_i a[i].v[i], a[i] real, v[i] vectors */ -VEC *v_lincomb(n,v,a,out) -int n; /* number of a's and v's */ -Real a[]; -VEC *v[], *out; -{ - int i; - - if ( ! a || ! v ) - error(E_NULL,"v_lincomb"); - if ( n <= 0 ) - return VNULL; - - for ( i = 1; i < n; i++ ) - if ( out == v[i] ) - error(E_INSITU,"v_lincomb"); - - out = sv_mlt(a[0],v[0],out); - for ( i = 1; i < n; i++ ) - { - if ( ! v[i] ) - error(E_NULL,"v_lincomb"); - if ( v[i]->dim != out->dim ) - error(E_SIZES,"v_lincomb"); - out = v_mltadd(out,v[i],a[i],out); - } - - return out; -} - - - -#ifdef ANSI_C - -/* v_linlist -- linear combinations taken from a list of arguments; - calling: - v_linlist(out,v1,a1,v2,a2,...,vn,an,NULL); - where vi are vectors (VEC *) and ai are numbers (double) -*/ -VEC *v_linlist(VEC *out,VEC *v1,double a1,...) -{ - va_list ap; - VEC *par; - double a_par; - - if ( ! v1 ) - return VNULL; - - va_start(ap, a1); - out = sv_mlt(a1,v1,out); - - while ((par = va_arg(ap,VEC *))) { /* NULL ends the list*/ - a_par = va_arg(ap,double); - if (a_par == 0.0) continue; - if ( out == par ) - error(E_INSITU,"v_linlist"); - if ( out->dim != par->dim ) - error(E_SIZES,"v_linlist"); - - if (a_par == 1.0) - out = v_add(out,par,out); - else if (a_par == -1.0) - out = v_sub(out,par,out); - else - out = v_mltadd(out,par,a_par,out); - } - - va_end(ap); - return out; -} - -#elif VARARGS - - -/* v_linlist -- linear combinations taken from a list of arguments; - calling: - v_linlist(out,v1,a1,v2,a2,...,vn,an,NULL); - where vi are vectors (VEC *) and ai are numbers (double) -*/ -VEC *v_linlist(va_alist) va_dcl -{ - va_list ap; - VEC *par, *out; - double a_par; - - va_start(ap); - out = va_arg(ap,VEC *); - par = va_arg(ap,VEC *); - if ( ! par ) { - va_end(ap); - return VNULL; - } - - a_par = va_arg(ap,double); - out = sv_mlt(a_par,par,out); - - while ((par = va_arg(ap,VEC *))) { /* NULL ends the list*/ - a_par = va_arg(ap,double); - if (a_par == 0.0) continue; - if ( out == par ) - error(E_INSITU,"v_linlist"); - if ( out->dim != par->dim ) - error(E_SIZES,"v_linlist"); - - if (a_par == 1.0) - out = v_add(out,par,out); - else if (a_par == -1.0) - out = v_sub(out,par,out); - else - out = v_mltadd(out,par,a_par,out); - } - - va_end(ap); - return out; -} - -#endif - - - - - -/* v_star -- computes componentwise (Hadamard) product of x1 and x2 - -- result out is returned */ -VEC *v_star(x1, x2, out) -VEC *x1, *x2, *out; -{ - int i; - - if ( ! x1 || ! x2 ) - error(E_NULL,"v_star"); - if ( x1->dim != x2->dim ) - error(E_SIZES,"v_star"); - out = v_resize(out,x1->dim); - - for ( i = 0; i < x1->dim; i++ ) - out->ve[i] = x1->ve[i] * x2->ve[i]; - - return out; -} - -/* v_slash -- computes componentwise ratio of x2 and x1 - -- out[i] = x2[i] / x1[i] - -- if x1[i] == 0 for some i, then raise E_SING error - -- result out is returned */ -VEC *v_slash(x1, x2, out) -VEC *x1, *x2, *out; -{ - int i; - Real tmp; - - if ( ! x1 || ! x2 ) - error(E_NULL,"v_slash"); - if ( x1->dim != x2->dim ) - error(E_SIZES,"v_slash"); - out = v_resize(out,x1->dim); - - for ( i = 0; i < x1->dim; i++ ) - { - tmp = x1->ve[i]; - if ( tmp == 0.0 ) - error(E_SING,"v_slash"); - out->ve[i] = x2->ve[i] / tmp; - } - - return out; -} - -/* v_min -- computes minimum component of x, which is returned - -- also sets min_idx to the index of this minimum */ -double v_min(x, min_idx) -VEC *x; -int *min_idx; -{ - int i, i_min; - Real min_val, tmp; - - if ( ! x ) - error(E_NULL,"v_min"); - if ( x->dim <= 0 ) - error(E_SIZES,"v_min"); - i_min = 0; - min_val = x->ve[0]; - for ( i = 1; i < x->dim; i++ ) - { - tmp = x->ve[i]; - if ( tmp < min_val ) - { - min_val = tmp; - i_min = i; - } - } - - if ( min_idx != NULL ) - *min_idx = i_min; - return min_val; -} - -/* v_max -- computes maximum component of x, which is returned - -- also sets max_idx to the index of this maximum */ -double v_max(x, max_idx) -VEC *x; -int *max_idx; -{ - int i, i_max; - Real max_val, tmp; - - if ( ! x ) - error(E_NULL,"v_max"); - if ( x->dim <= 0 ) - error(E_SIZES,"v_max"); - i_max = 0; - max_val = x->ve[0]; - for ( i = 1; i < x->dim; i++ ) - { - tmp = x->ve[i]; - if ( tmp > max_val ) - { - max_val = tmp; - i_max = i; - } - } - - if ( max_idx != NULL ) - *max_idx = i_max; - return max_val; -} - -#define MAX_STACK 60 - - -/* v_sort -- sorts vector x, and generates permutation that gives the order - of the components; x = [1.3, 3.7, 0.5] -> [0.5, 1.3, 3.7] and - the permutation is order = [2, 0, 1]. - -- if order is NULL on entry then it is ignored - -- the sorted vector x is returned */ -VEC *v_sort(x, order) -VEC *x; -PERM *order; -{ - Real *x_ve, tmp, v; - /* int *order_pe; */ - int dim, i, j, l, r, tmp_i; - int stack[MAX_STACK], sp; - - if ( ! x ) - error(E_NULL,"v_sort"); - if ( order != PNULL && order->size != x->dim ) - order = px_resize(order, x->dim); - - x_ve = x->ve; - dim = x->dim; - if ( order != PNULL ) - px_ident(order); - - if ( dim <= 1 ) - return x; - - /* using quicksort algorithm in Sedgewick, - "Algorithms in C", Ch. 9, pp. 118--122 (1990) */ - sp = 0; - l = 0; r = dim-1; v = x_ve[0]; - for ( ; ; ) - { - while ( r > l ) - { - /* "i = partition(x_ve,l,r);" */ - v = x_ve[r]; - i = l-1; - j = r; - for ( ; ; ) - { - while ( x_ve[++i] < v ) - ; - while ( x_ve[--j] > v ) - ; - if ( i >= j ) break; - - tmp = x_ve[i]; - x_ve[i] = x_ve[j]; - x_ve[j] = tmp; - if ( order != PNULL ) - { - tmp_i = order->pe[i]; - order->pe[i] = order->pe[j]; - order->pe[j] = tmp_i; - } - } - tmp = x_ve[i]; - x_ve[i] = x_ve[r]; - x_ve[r] = tmp; - if ( order != PNULL ) - { - tmp_i = order->pe[i]; - order->pe[i] = order->pe[r]; - order->pe[r] = tmp_i; - } - - if ( i-l > r-i ) - { stack[sp++] = l; stack[sp++] = i-1; l = i+1; } - else - { stack[sp++] = i+1; stack[sp++] = r; r = i-1; } - } - - /* recursion elimination */ - if ( sp == 0 ) - break; - r = stack[--sp]; - l = stack[--sp]; - } - - return x; -} - -/* v_sum -- returns sum of entries of a vector */ -double v_sum(x) -VEC *x; -{ - int i; - Real sum; - - if ( ! x ) - error(E_NULL,"v_sum"); - - sum = 0.0; - for ( i = 0; i < x->dim; i++ ) - sum += x->ve[i]; - - return sum; -} - -/* v_conv -- computes convolution product of two vectors */ -VEC *v_conv(x1, x2, out) -VEC *x1, *x2, *out; -{ - int i; - - if ( ! x1 || ! x2 ) - error(E_NULL,"v_conv"); - if ( x1 == out || x2 == out ) - error(E_INSITU,"v_conv"); - if ( x1->dim == 0 || x2->dim == 0 ) - return out = v_resize(out,0); - - out = v_resize(out,x1->dim + x2->dim - 1); - v_zero(out); - for ( i = 0; i < x1->dim; i++ ) - __mltadd__(&(out->ve[i]),x2->ve,x1->ve[i],x2->dim); - - return out; -} - -/* v_pconv -- computes a periodic convolution product - -- the period is the dimension of x2 */ -VEC *v_pconv(x1, x2, out) -VEC *x1, *x2, *out; -{ - int i; - - if ( ! x1 || ! x2 ) - error(E_NULL,"v_pconv"); - if ( x1 == out || x2 == out ) - error(E_INSITU,"v_pconv"); - out = v_resize(out,x2->dim); - if ( x2->dim == 0 ) - return out; - - v_zero(out); - for ( i = 0; i < x1->dim; i++ ) - { - __mltadd__(&(out->ve[i]),x2->ve,x1->ve[i],x2->dim - i); - if ( i > 0 ) - __mltadd__(out->ve,&(x2->ve[x2->dim - i]),x1->ve[i],i); - } - - return out; -} diff --git a/src/mesch/version.c b/src/mesch/version.c deleted file mode 100755 index c8007d1325..0000000000 --- a/src/mesch/version.c +++ /dev/null @@ -1,75 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* Version routine */ -/* This routine must be modified whenever modifications are made to - Meschach by persons other than the original authors - (David E. Stewart & Zbigniew Leyk); - when new releases of Meschach are made the - version number will also be updated -*/ - -#include - -void m_version() -{ - static char rcsid[] = "version.c,v 1.1 1997/12/04 17:56:03 hines Exp"; - - printf("Meshach matrix library version 1.2b\n"); - printf("RCS id: %s\n",rcsid); - printf("Changes since 1.2a:\n"); - printf("\t Fixed bug in schur() for 2x2 blocks with real e-vals\n"); - printf("\t Fixed bug in schur() reading beyond end of array\n"); - printf("\t Fixed some installation bugs\n"); - printf("\t Fixed bugs & improved efficiency in spILUfactor()\n"); - printf("\t px_inv() doesn't crash inverting non-permutations\n"); - /**** List of modifications ****/ - /* Example below is for illustration only */ - /* printf("Modified by %s, routine(s) %s, file %s on date %s\n", - "Joe Bloggs", - "m_version", - "version.c", - "Fri Apr 5 16:00:38 EST 1994"); */ - /* printf("Purpose: %s\n", - "To update the version number"); */ -} - -/* version.c,v - * Revision 1.1 1997/12/04 17:56:03 hines - * meschach .c files missing from distribution - * - * Revision 1.9 1994/03/24 00:04:05 des - * Added notes on changes to spILUfactor() and px_inv(). - * - * Revision 1.8 1994/02/21 04:32:25 des - * Set version to 1.2b with bug fixes in schur() and installation. - * - * Revision 1.7 1994/01/13 05:43:57 des - * Version 1.2 update - * - - * */ diff --git a/src/mesch/zcopy.c b/src/mesch/zcopy.c deleted file mode 100755 index f91537137a..0000000000 --- a/src/mesch/zcopy.c +++ /dev/null @@ -1,193 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -static char rcsid[] = "zcopy.c,v 1.1 1997/12/04 17:56:04 hines Exp"; -#include -#include "zmatrix.h" - - - -/* _zm_copy -- copies matrix into new area */ -ZMAT *_zm_copy(in,out,i0,j0) -ZMAT *in,*out; -u_int i0,j0; -{ - u_int i /* ,j */; - - if ( in==ZMNULL ) - error(E_NULL,"_zm_copy"); - if ( in==out ) - return (out); - if ( out==ZMNULL || out->m < in->m || out->n < in->n ) - out = zm_resize(out,in->m,in->n); - - for ( i=i0; i < in->m; i++ ) - MEM_COPY(&(in->me[i][j0]),&(out->me[i][j0]), - (in->n - j0)*sizeof(complex)); - /* for ( j=j0; j < in->n; j++ ) - out->me[i][j] = in->me[i][j]; */ - - return (out); -} - -/* _zv_copy -- copies vector into new area */ -ZVEC *_zv_copy(in,out,i0) -ZVEC *in,*out; -u_int i0; -{ - /* u_int i,j; */ - - if ( in==ZVNULL ) - error(E_NULL,"_zv_copy"); - if ( in==out ) - return (out); - if ( out==ZVNULL || out->dim < in->dim ) - out = zv_resize(out,in->dim); - - MEM_COPY(&(in->ve[i0]),&(out->ve[i0]),(in->dim - i0)*sizeof(complex)); - /* for ( i=i0; i < in->dim; i++ ) - out->ve[i] = in->ve[i]; */ - - return (out); -} - - -/* - The z._move() routines are for moving blocks of memory around - within Meschach data structures and for re-arranging matrices, - vectors etc. -*/ - -/* zm_move -- copies selected pieces of a matrix - -- moves the m0 x n0 submatrix with top-left cor-ordinates (i0,j0) - to the corresponding submatrix of out with top-left co-ordinates - (i1,j1) - -- out is resized (& created) if necessary */ -ZMAT *zm_move(in,i0,j0,m0,n0,out,i1,j1) -ZMAT *in, *out; -int i0, j0, m0, n0, i1, j1; -{ - int i; - - if ( ! in ) - error(E_NULL,"zm_move"); - if ( i0 < 0 || j0 < 0 || i1 < 0 || j1 < 0 || m0 < 0 || n0 < 0 || - i0+m0 > in->m || j0+n0 > in->n ) - error(E_BOUNDS,"zm_move"); - - if ( ! out ) - out = zm_resize(out,i1+m0,j1+n0); - else if ( i1+m0 > out->m || j1+n0 > out->n ) - out = zm_resize(out,max(out->m,i1+m0),max(out->n,j1+n0)); - - for ( i = 0; i < m0; i++ ) - MEM_COPY(&(in->me[i0+i][j0]),&(out->me[i1+i][j1]), - n0*sizeof(complex)); - - return out; -} - -/* zv_move -- copies selected pieces of a vector - -- moves the length dim0 subvector with initial index i0 - to the corresponding subvector of out with initial index i1 - -- out is resized if necessary */ -ZVEC *zv_move(in,i0,dim0,out,i1) -ZVEC *in, *out; -int i0, dim0, i1; -{ - if ( ! in ) - error(E_NULL,"zv_move"); - if ( i0 < 0 || dim0 < 0 || i1 < 0 || - i0+dim0 > in->dim ) - error(E_BOUNDS,"zv_move"); - - if ( (! out) || i1+dim0 > out->dim ) - out = zv_resize(out,i1+dim0); - - MEM_COPY(&(in->ve[i0]),&(out->ve[i1]),dim0*sizeof(complex)); - - return out; -} - - -/* zmv_move -- copies selected piece of matrix to a vector - -- moves the m0 x n0 submatrix with top-left co-ordinate (i0,j0) to - the subvector with initial index i1 (and length m0*n0) - -- rows are copied contiguously - -- out is resized if necessary */ -ZVEC *zmv_move(in,i0,j0,m0,n0,out,i1) -ZMAT *in; -ZVEC *out; -int i0, j0, m0, n0, i1; -{ - int dim1, i; - - if ( ! in ) - error(E_NULL,"zmv_move"); - if ( i0 < 0 || j0 < 0 || m0 < 0 || n0 < 0 || i1 < 0 || - i0+m0 > in->m || j0+n0 > in->n ) - error(E_BOUNDS,"zmv_move"); - - dim1 = m0*n0; - if ( (! out) || i1+dim1 > out->dim ) - out = zv_resize(out,i1+dim1); - - for ( i = 0; i < m0; i++ ) - MEM_COPY(&(in->me[i0+i][j0]),&(out->ve[i1+i*n0]),n0*sizeof(complex)); - - return out; -} - -/* zvm_move -- copies selected piece of vector to a matrix - -- moves the subvector with initial index i0 and length m1*n1 to - the m1 x n1 submatrix with top-left co-ordinate (i1,j1) - -- copying is done by rows - -- out is resized if necessary */ -ZMAT *zvm_move(in,i0,out,i1,j1,m1,n1) -ZVEC *in; -ZMAT *out; -int i0, i1, j1, m1, n1; -{ - int dim0, i; - - if ( ! in ) - error(E_NULL,"zvm_move"); - if ( i0 < 0 || i1 < 0 || j1 < 0 || m1 < 0 || n1 < 0 || - i0+m1*n1 > in->dim ) - error(E_BOUNDS,"zvm_move"); - - if ( ! out ) - out = zm_resize(out,i1+m1,j1+n1); - else - out = zm_resize(out,max(i1+m1,out->m),max(j1+n1,out->n)); - - dim0 = m1*n1; - for ( i = 0; i < m1; i++ ) - MEM_COPY(&(in->ve[i0+i*n1]),&(out->me[i1+i][j1]),n1*sizeof(complex)); - - return out; -} diff --git a/src/mesch/zfunc.c b/src/mesch/zfunc.c deleted file mode 100755 index 3921ae8e88..0000000000 --- a/src/mesch/zfunc.c +++ /dev/null @@ -1,245 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - Elementary functions for complex numbers - -- if not already defined -*/ - -#include "zmatrix.h" -#include - -static char rcsid[] = "zfunc.c,v 1.1 1997/12/04 17:56:05 hines Exp"; - -#ifndef COMPLEX_H - -#ifndef zmake -/* zmake -- create complex number real + i*imag */ -complex zmake(real,imag) -double real, imag; -{ - complex w; /* == real + i*imag */ - - w.re = real; w.im = imag; - return w; -} -#endif - -#ifndef zneg -/* zneg -- returns negative of z */ -complex zneg(z) -complex z; -{ - z.re = - z.re; - z.im = - z.im; - - return z; -} -#endif - -#ifndef zabs -/* zabs -- returns |z| */ -double zabs(z) -complex z; -{ - Real x, y, tmp; - int x_expt, y_expt; - - /* Note: we must ensure that overflow does not occur! */ - x = ( z.re >= 0.0 ) ? z.re : -z.re; - y = ( z.im >= 0.0 ) ? z.im : -z.im; - if ( x < y ) - { - tmp = x; - x = y; - y = tmp; - } - if ( x == 0.0 ) /* then y == 0.0 as well */ - return 0.0; - x = frexp(x,&x_expt); - y = frexp(y,&y_expt); - y = ldexp(y,y_expt-x_expt); - tmp = sqrt(x*x+y*y); - - return ldexp(tmp,x_expt); -} -#endif - -#ifndef zadd -/* zadd -- returns z1+z2 */ -complex zadd(z1,z2) -complex z1, z2; -{ - complex z; - - z.re = z1.re + z2.re; - z.im = z1.im + z2.im; - - return z; -} -#endif - -#ifndef zsub -/* zsub -- returns z1-z2 */ -complex zsub(z1,z2) -complex z1, z2; -{ - complex z; - - z.re = z1.re - z2.re; - z.im = z1.im - z2.im; - - return z; -} -#endif - -#ifndef zmlt -/* zmlt -- returns z1*z2 */ -complex zmlt(z1,z2) -complex z1, z2; -{ - complex z; - - z.re = z1.re * z2.re - z1.im * z2.im; - z.im = z1.re * z2.im + z1.im * z2.re; - - return z; -} -#endif - -#ifndef zinv -/* zmlt -- returns 1/z */ -complex zinv(z) -complex z; -{ - Real x, y, tmp; - int x_expt, y_expt; - - if ( z.re == 0.0 && z.im == 0.0 ) - error(E_SING,"zinv"); - /* Note: we must ensure that overflow does not occur! */ - x = ( z.re >= 0.0 ) ? z.re : -z.re; - y = ( z.im >= 0.0 ) ? z.im : -z.im; - if ( x < y ) - { - tmp = x; - x = y; - y = tmp; - } - x = frexp(x,&x_expt); - y = frexp(y,&y_expt); - y = ldexp(y,y_expt-x_expt); - - tmp = 1.0/(x*x + y*y); - z.re = z.re*tmp*ldexp(1.0,-2*x_expt); - z.im = -z.im*tmp*ldexp(1.0,-2*x_expt); - - return z; -} -#endif - -#ifndef zdiv -/* zdiv -- returns z1/z2 */ -complex zdiv(z1,z2) -complex z1, z2; -{ - return zmlt(z1,zinv(z2)); -} -#endif - -#ifndef zsqrt -/* zsqrt -- returns sqrt(z); uses branch with Re sqrt(z) >= 0 */ -complex zsqrt(z) -complex z; -{ - complex w; /* == sqrt(z) at end */ - Real alpha; - - alpha = sqrt(0.5*(fabs(z.re) + zabs(z))); - if (alpha!=0) - { - if (z.re>=0.0) - { - w.re = alpha; - w.im = z.im / (2.0*alpha); - } - else - { - w.re = fabs(z.im)/(2.0*alpha); - w.im = ( z.im >= 0 ) ? alpha : - alpha; - } - } - else - w.re = w.im = 0.0; - - return w; -} -#endif - -#ifndef zexp -/* zexp -- returns exp(z) */ -complex zexp(z) -complex z; -{ - complex w; /* == exp(z) at end */ - Real r; - - r = exp(z.re); - w.re = r*cos(z.im); - w.im = r*sin(z.im); - - return w; -} -#endif - -#ifndef zlog -/* zlog -- returns log(z); uses principal branch with -pi <= Im log(z) <= pi */ -complex zlog(z) -complex z; -{ - complex w; /* == log(z) at end */ - - w.re = log(zabs(z)); - w.im = atan2(z.im,z.re); - - return w; -} -#endif - -#ifndef zconj -complex zconj(z) -complex z; -{ - complex w; /* == conj(z) */ - - w.re = z.re; - w.im = - z.im; - return w; -} -#endif - -#endif - diff --git a/src/mesch/zgivens.c b/src/mesch/zgivens.c deleted file mode 100755 index af38671ac2..0000000000 --- a/src/mesch/zgivens.c +++ /dev/null @@ -1,185 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Givens operations file. Contains routines for calculating and - applying givens rotations for/to vectors and also to matrices by - row and by column. - - Complex version. -*/ - -static char rcsid[] = "$Id: "; - -#include -#include "zmatrix.h" -#include "zmatrix2.h" -#include - -/* - (Complex) Givens rotation matrix: - [ c -s ] - [ s* c ] - Note that c is real and s is complex -*/ - -/* zgivens -- returns c,s parameters for Givens rotation to - eliminate y in the **column** vector [ x y ] */ -void zgivens(x,y,c,s) -complex x,y,*s; -Real *c; -{ - Real inv_norm, norm; - complex tmp; - - /* this is a safe way of computing sqrt(|x|^2+|y|^2) */ - tmp.re = zabs(x); tmp.im = zabs(y); - norm = zabs(tmp); - - if ( norm == 0.0 ) - { *c = 1.0; s->re = s->im = 0.0; } /* identity */ - else - { - inv_norm = 1.0 / tmp.re; /* inv_norm = 1/|x| */ - x.re *= inv_norm; - x.im *= inv_norm; /* normalise x */ - inv_norm = 1.0/norm; /* inv_norm = 1/||[x,y]||2 */ - *c = tmp.re * inv_norm; - /* now compute - conj(normalised x).y/||[x,y]||2 */ - s->re = - inv_norm*(x.re*y.re + x.im*y.im); - s->im = inv_norm*(x.re*y.im - x.im*y.re); - } -} - -/* rot_zvec -- apply Givens rotation to x's i & k components */ -ZVEC *rot_zvec(x,i,k,c,s,out) -ZVEC *x,*out; -int i,k; -double c; -complex s; -{ - - complex temp1, temp2; - - if ( x==ZVNULL ) - error(E_NULL,"rot_zvec"); - if ( i < 0 || i >= x->dim || k < 0 || k >= x->dim ) - error(E_RANGE,"rot_zvec"); - if ( x != out ) - out = zv_copy(x,out); - - /* temp1 = c*out->ve[i] - s*out->ve[k]; */ - temp1.re = c*out->ve[i].re - - s.re*out->ve[k].re + s.im*out->ve[k].im; - temp1.im = c*out->ve[i].im - - s.re*out->ve[k].im - s.im*out->ve[k].re; - - /* temp2 = c*out->ve[k] + zconj(s)*out->ve[i]; */ - temp2.re = c*out->ve[k].re - + s.re*out->ve[i].re + s.im*out->ve[i].im; - temp2.im = c*out->ve[k].im - + s.re*out->ve[i].im - s.im*out->ve[i].re; - - out->ve[i] = temp1; - out->ve[k] = temp2; - - return (out); -} - -/* zrot_rows -- premultiply mat by givens rotation described by c,s */ -ZMAT *zrot_rows(mat,i,k,c,s,out) -ZMAT *mat,*out; -int i,k; -double c; -complex s; -{ - u_int j; - complex temp1, temp2; - - if ( mat==ZMNULL ) - error(E_NULL,"zrot_rows"); - if ( i < 0 || i >= mat->m || k < 0 || k >= mat->m ) - error(E_RANGE,"zrot_rows"); - - if ( mat != out ) - out = zm_copy(mat,zm_resize(out,mat->m,mat->n)); - - /* temp1 = c*out->me[i][j] - s*out->me[k][j]; */ - for ( j=0; jn; j++ ) - { - /* temp1 = c*out->me[i][j] - s*out->me[k][j]; */ - temp1.re = c*out->me[i][j].re - - s.re*out->me[k][j].re + s.im*out->me[k][j].im; - temp1.im = c*out->me[i][j].im - - s.re*out->me[k][j].im - s.im*out->me[k][j].re; - - /* temp2 = c*out->me[k][j] + conj(s)*out->me[i][j]; */ - temp2.re = c*out->me[k][j].re - + s.re*out->me[i][j].re + s.im*out->me[i][j].im; - temp2.im = c*out->me[k][j].im - + s.re*out->me[i][j].im - s.im*out->me[i][j].re; - - out->me[i][j] = temp1; - out->me[k][j] = temp2; - } - - return (out); -} - -/* zrot_cols -- postmultiply mat by adjoint Givens rotation described by c,s */ -ZMAT *zrot_cols(mat,i,k,c,s,out) -ZMAT *mat,*out; -int i,k; -double c; -complex s; -{ - u_int j; - complex x, y; - - if ( mat==ZMNULL ) - error(E_NULL,"zrot_cols"); - if ( i < 0 || i >= mat->n || k < 0 || k >= mat->n ) - error(E_RANGE,"zrot_cols"); - - if ( mat != out ) - out = zm_copy(mat,zm_resize(out,mat->m,mat->n)); - - for ( j=0; jm; j++ ) - { - x = out->me[j][i]; y = out->me[j][k]; - /* out->me[j][i] = c*x - conj(s)*y; */ - out->me[j][i].re = c*x.re - s.re*y.re - s.im*y.im; - out->me[j][i].im = c*x.im - s.re*y.im + s.im*y.re; - - /* out->me[j][k] = c*y + s*x; */ - out->me[j][k].re = c*y.re + s.re*x.re - s.im*x.im; - out->me[j][k].im = c*y.im + s.re*x.im + s.im*x.re; - } - - return (out); -} - diff --git a/src/mesch/zhessen.c b/src/mesch/zhessen.c deleted file mode 100755 index fc927dc957..0000000000 --- a/src/mesch/zhessen.c +++ /dev/null @@ -1,153 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - File containing routines for determining Hessenberg - factorisations. - - Complex version -*/ - -static char rcsid[] = "zhessen.c,v 1.1 1997/12/04 17:56:07 hines Exp"; - -#include -#include "zmatrix.h" -#include "zmatrix2.h" - - -/* zHfactor -- compute Hessenberg factorisation in compact form. - -- factorisation performed in situ - -- for details of the compact form see zQRfactor.c and zmatrix2.doc */ -ZMAT *zHfactor(A, diag) -ZMAT *A; -ZVEC *diag; -{ - static ZVEC *tmp1 = ZVNULL; - Real beta; - int k, limit; - - if ( ! A || ! diag ) - error(E_NULL,"zHfactor"); - if ( diag->dim < A->m - 1 ) - error(E_SIZES,"zHfactor"); - if ( A->m != A->n ) - error(E_SQUARE,"zHfactor"); - limit = A->m - 1; - - tmp1 = zv_resize(tmp1,A->m); - MEM_STAT_REG(tmp1,TYPE_ZVEC); - - for ( k = 0; k < limit; k++ ) - { - zget_col(A,k,tmp1); - zhhvec(tmp1,k+1,&beta,tmp1,&A->me[k+1][k]); - diag->ve[k] = tmp1->ve[k+1]; - /* printf("zHfactor: k = %d, beta = %g, tmp1 =\n",k,beta); - zv_output(tmp1); */ - - zhhtrcols(A,k+1,k+1,tmp1,beta); - zhhtrrows(A,0 ,k+1,tmp1,beta); - /* printf("# at stage k = %d, A =\n",k); zm_output(A); */ - } - - return (A); -} - -/* zHQunpack -- unpack the compact representation of H and Q of a - Hessenberg factorisation - -- if either H or Q is NULL, then it is not unpacked - -- it can be in situ with HQ == H - -- returns HQ -*/ -ZMAT *zHQunpack(HQ,diag,Q,H) -ZMAT *HQ, *Q, *H; -ZVEC *diag; -{ - int i, j, limit; - Real beta, r_ii, tmp_val; - static ZVEC *tmp1 = ZVNULL, *tmp2 = ZVNULL; - - if ( HQ==ZMNULL || diag==ZVNULL ) - error(E_NULL,"zHQunpack"); - if ( HQ == Q || H == Q ) - error(E_INSITU,"zHQunpack"); - limit = HQ->m - 1; - if ( diag->dim < limit ) - error(E_SIZES,"zHQunpack"); - if ( HQ->m != HQ->n ) - error(E_SQUARE,"zHQunpack"); - - - if ( Q != ZMNULL ) - { - Q = zm_resize(Q,HQ->m,HQ->m); - tmp1 = zv_resize(tmp1,H->m); - tmp2 = zv_resize(tmp2,H->m); - MEM_STAT_REG(tmp1,TYPE_ZVEC); - MEM_STAT_REG(tmp2,TYPE_ZVEC); - - for ( i = 0; i < H->m; i++ ) - { - /* tmp1 = i'th basis vector */ - for ( j = 0; j < H->m; j++ ) - tmp1->ve[j].re = tmp1->ve[j].im = 0.0; - tmp1->ve[i].re = 1.0; - - /* apply H/h transforms in reverse order */ - for ( j = limit-1; j >= 0; j-- ) - { - zget_col(HQ,j,tmp2); - r_ii = zabs(tmp2->ve[j+1]); - tmp2->ve[j+1] = diag->ve[j]; - tmp_val = (r_ii*zabs(diag->ve[j])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - /* printf("zHQunpack: j = %d, beta = %g, tmp2 =\n", - j,beta); - zv_output(tmp2); */ - zhhtrvec(tmp2,beta,j+1,tmp1,tmp1); - } - - /* insert into Q */ - zset_col(Q,i,tmp1); - } - } - - if ( H != ZMNULL ) - { - H = zm_copy(HQ,zm_resize(H,HQ->m,HQ->n)); - - limit = H->m; - for ( i = 1; i < limit; i++ ) - for ( j = 0; j < i-1; j++ ) - H->me[i][j].re = H->me[i][j].im = 0.0; - } - - return HQ; -} - - - diff --git a/src/mesch/zhsehldr.c b/src/mesch/zhsehldr.c deleted file mode 100755 index 01a23e5da0..0000000000 --- a/src/mesch/zhsehldr.c +++ /dev/null @@ -1,209 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Files for matrix computations - - Householder transformation file. Contains routines for calculating - householder transformations, applying them to vectors and matrices - by both row & column. - - Complex version -*/ - -static char rcsid[] = "zhsehldr.c,v 1.1 1997/12/04 17:56:08 hines Exp"; - -#include -#include "zmatrix.h" -#include "zmatrix2.h" -#include - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0) - -/* zhhvec -- calulates Householder vector to eliminate all entries after the - i0 entry of the vector vec. It is returned as out. May be in-situ */ -ZVEC *zhhvec(vec,i0,beta,out,newval) -ZVEC *vec,*out; -int i0; -Real *beta; -complex *newval; -{ - complex tmp; - Real norm, abs_val; - - if ( i0 < 0 || i0 >= vec->dim ) - error(E_BOUNDS,"zhhvec"); - out = _zv_copy(vec,out,i0); - tmp = _zin_prod(out,out,i0,Z_CONJ); - if ( tmp.re <= 0.0 ) - { - *beta = 0.0; - *newval = out->ve[i0]; - return (out); - } - norm = sqrt(tmp.re); - abs_val = zabs(out->ve[i0]); - *beta = 1.0/(norm * (norm+abs_val)); - if ( abs_val == 0.0 ) - { - newval->re = norm; - newval->im = 0.0; - } - else - { - abs_val = -norm / abs_val; - newval->re = abs_val*out->ve[i0].re; - newval->im = abs_val*out->ve[i0].im; - } abs_val = -norm / abs_val; - out->ve[i0].re -= newval->re; - out->ve[i0].im -= newval->im; - - return (out); -} - -/* zhhtrvec -- apply Householder transformation to vector -- may be in-situ */ -ZVEC *zhhtrvec(hh,beta,i0,in,out) -ZVEC *hh,*in,*out; /* hh = Householder vector */ -int i0; -double beta; -{ - complex scale, tmp; - /* u_int i; */ - - if ( hh==ZVNULL || in==ZVNULL ) - error(E_NULL,"zhhtrvec"); - if ( in->dim != hh->dim ) - error(E_SIZES,"zhhtrvec"); - if ( i0 < 0 || i0 > in->dim ) - error(E_BOUNDS,"zhhvec"); - - tmp = _zin_prod(hh,in,i0,Z_CONJ); - scale.re = -beta*tmp.re; - scale.im = -beta*tmp.im; - out = zv_copy(in,out); - __zmltadd__(&(out->ve[i0]),&(hh->ve[i0]),scale, - (int)(in->dim-i0),Z_NOCONJ); - /************************************************************ - for ( i=i0; idim; i++ ) - out->ve[i] = in->ve[i] - scale*hh->ve[i]; - ************************************************************/ - - return (out); -} - -/* zhhtrrows -- transform a matrix by a Householder vector by rows - starting at row i0 from column j0 -- in-situ */ -ZMAT *zhhtrrows(M,i0,j0,hh,beta) -ZMAT *M; -int i0, j0; -ZVEC *hh; -double beta; -{ - complex ip, scale; - int i /*, j */; - - if ( M==ZMNULL || hh==ZVNULL ) - error(E_NULL,"zhhtrrows"); - if ( M->n != hh->dim ) - error(E_RANGE,"zhhtrrows"); - if ( i0 < 0 || i0 > M->m || j0 < 0 || j0 > M->n ) - error(E_BOUNDS,"zhhtrrows"); - - if ( beta == 0.0 ) return (M); - - /* for each row ... */ - for ( i = i0; i < M->m; i++ ) - { /* compute inner product */ - ip = __zip__(&(M->me[i][j0]),&(hh->ve[j0]), - (int)(M->n-j0),Z_NOCONJ); - /************************************************** - ip = 0.0; - for ( j = j0; j < M->n; j++ ) - ip += M->me[i][j]*hh->ve[j]; - **************************************************/ - scale.re = -beta*ip.re; - scale.im = -beta*ip.im; - /* if ( scale == 0.0 ) */ - if ( is_zero(scale) ) - continue; - - /* do operation */ - __zmltadd__(&(M->me[i][j0]),&(hh->ve[j0]),scale, - (int)(M->n-j0),Z_CONJ); - /************************************************** - for ( j = j0; j < M->n; j++ ) - M->me[i][j] -= scale*hh->ve[j]; - **************************************************/ - } - - return (M); -} - - -/* zhhtrcols -- transform a matrix by a Householder vector by columns - starting at row i0 from column j0 -- in-situ */ -ZMAT *zhhtrcols(M,i0,j0,hh,beta) -ZMAT *M; -int i0, j0; -ZVEC *hh; -double beta; -{ - /* Real ip, scale; */ - complex scale; - int i /*, k */; - static ZVEC *w = ZVNULL; - - if ( M==ZMNULL || hh==ZVNULL ) - error(E_NULL,"zhhtrcols"); - if ( M->m != hh->dim ) - error(E_SIZES,"zhhtrcols"); - if ( i0 < 0 || i0 > M->m || j0 < 0 || j0 > M->n ) - error(E_BOUNDS,"zhhtrcols"); - - if ( beta == 0.0 ) return (M); - - w = zv_resize(w,M->n); - MEM_STAT_REG(w,TYPE_ZVEC); - zv_zero(w); - - for ( i = i0; i < M->m; i++ ) - /* if ( hh->ve[i] != 0.0 ) */ - if ( ! is_zero(hh->ve[i]) ) - __zmltadd__(&(w->ve[j0]),&(M->me[i][j0]),hh->ve[i], - (int)(M->n-j0),Z_CONJ); - for ( i = i0; i < M->m; i++ ) - /* if ( hh->ve[i] != 0.0 ) */ - if ( ! is_zero(hh->ve[i]) ) - { - scale.re = -beta*hh->ve[i].re; - scale.im = -beta*hh->ve[i].im; - __zmltadd__(&(M->me[i][j0]),&(w->ve[j0]),scale, - (int)(M->n-j0),Z_CONJ); - } - return (M); -} - diff --git a/src/mesch/zlufctr.c b/src/mesch/zlufctr.c deleted file mode 100755 index 9377825586..0000000000 --- a/src/mesch/zlufctr.c +++ /dev/null @@ -1,279 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - Matrix factorisation routines to work with the other matrix files. - Complex version -*/ - -static char rcsid[] = "zlufctr.c,v 1.1 1997/12/04 17:56:09 hines Exp"; - -#include -#include "zmatrix.h" -#include "zmatrix2.h" -#include - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0) - - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* zLUfactor -- Gaussian elimination with scaled partial pivoting - -- Note: returns LU matrix which is A */ -ZMAT *zLUfactor(A,pivot) -ZMAT *A; -PERM *pivot; -{ - u_int i, j, k, k_max, m, n; - int i_max; - Real dtemp, max1; - complex **A_v, *A_piv, *A_row, temp; - static VEC *scale = VNULL; - - if ( A==ZMNULL || pivot==PNULL ) - error(E_NULL,"zLUfactor"); - if ( pivot->size != A->m ) - error(E_SIZES,"zLUfactor"); - m = A->m; n = A->n; - scale = v_resize(scale,A->m); - MEM_STAT_REG(scale,TYPE_VEC); - A_v = A->me; - - /* initialise pivot with identity permutation */ - for ( i=0; ipe[i] = i; - - /* set scale parameters */ - for ( i=0; ive[i] = max1; - } - - /* main loop */ - k_max = min(m,n)-1; - for ( k=0; kve[i] > 0.0 ) - { - dtemp = zabs(A_v[i][k])/scale->ve[i]; - if ( dtemp > max1 ) - { max1 = dtemp; i_max = i; } - } - - /* if no pivot then ignore column k... */ - if ( i_max == -1 ) - continue; - - /* do we pivot ? */ - if ( i_max != k ) /* yes we do... */ - { - px_transp(pivot,i_max,k); - for ( j=0; jm != A->n || A->n != b->dim ) - error(E_SIZES,"zLUsolve"); - - x = px_zvec(pivot,b,x); /* x := P.b */ - zLsolve(A,x,x,1.0); /* implicit diagonal = 1 */ - zUsolve(A,x,x,0.0); /* explicit diagonal */ - - return (x); -} - -/* zLUAsolve -- given an LU factorisation in A, solve A^*.x=b */ -ZVEC *zLUAsolve(LU,pivot,b,x) -ZMAT *LU; -PERM *pivot; -ZVEC *b,*x; -{ - if ( ! LU || ! b || ! pivot ) - error(E_NULL,"zLUAsolve"); - if ( LU->m != LU->n || LU->n != b->dim ) - error(E_SIZES,"zLUAsolve"); - - x = zv_copy(b,x); - zUAsolve(LU,x,x,0.0); /* explicit diagonal */ - zLAsolve(LU,x,x,1.0); /* implicit diagonal = 1 */ - pxinv_zvec(pivot,x,x); /* x := P^*.x */ - - return (x); -} - -/* zm_inverse -- returns inverse of A, provided A is not too rank deficient - -- uses LU factorisation */ -ZMAT *zm_inverse(A,out) -ZMAT *A, *out; -{ - int i; - ZVEC *tmp, *tmp2; - ZMAT *A_cp; - PERM *pivot; - - if ( ! A ) - error(E_NULL,"zm_inverse"); - if ( A->m != A->n ) - error(E_SQUARE,"zm_inverse"); - if ( ! out || out->m < A->m || out->n < A->n ) - out = zm_resize(out,A->m,A->n); - - A_cp = zm_copy(A,ZMNULL); - tmp = zv_get(A->m); - tmp2 = zv_get(A->m); - pivot = px_get(A->m); - tracecatch(zLUfactor(A_cp,pivot),"zm_inverse"); - for ( i = 0; i < A->n; i++ ) - { - zv_zero(tmp); - tmp->ve[i].re = 1.0; - tmp->ve[i].im = 0.0; - tracecatch(zLUsolve(A_cp,pivot,tmp,tmp2),"m_inverse"); - zset_col(out,i,tmp2); - } - - ZM_FREE(A_cp); - ZV_FREE(tmp); ZV_FREE(tmp2); - PX_FREE(pivot); - - return out; -} - -/* zLUcondest -- returns an estimate of the condition number of LU given the - LU factorisation in compact form */ -double zLUcondest(LU,pivot) -ZMAT *LU; -PERM *pivot; -{ - static ZVEC *y = ZVNULL, *z = ZVNULL; - Real cond_est, L_norm, U_norm, norm, sn_inv; - complex sum; - int i, j, n; - - if ( ! LU || ! pivot ) - error(E_NULL,"zLUcondest"); - if ( LU->m != LU->n ) - error(E_SQUARE,"zLUcondest"); - if ( LU->n != pivot->size ) - error(E_SIZES,"zLUcondest"); - - n = LU->n; - y = zv_resize(y,n); - z = zv_resize(z,n); - MEM_STAT_REG(y,TYPE_ZVEC); - MEM_STAT_REG(z,TYPE_ZVEC); - - cond_est = 0.0; /* should never be returned */ - - for ( i = 0; i < n; i++ ) - { - sum.re = 1.0; - sum.im = 0.0; - for ( j = 0; j < i; j++ ) - /* sum -= LU->me[j][i]*y->ve[j]; */ - sum = zsub(sum,zmlt(LU->me[j][i],y->ve[j])); - /* sum -= (sum < 0.0) ? 1.0 : -1.0; */ - sn_inv = 1.0 / zabs(sum); - sum.re += sum.re * sn_inv; - sum.im += sum.im * sn_inv; - if ( is_zero(LU->me[i][i]) ) - return HUGE; - /* y->ve[i] = sum / LU->me[i][i]; */ - y->ve[i] = zdiv(sum,LU->me[i][i]); - } - - zLAsolve(LU,y,y,1.0); - zLUsolve(LU,pivot,y,z); - - /* now estimate norm of A (even though it is not directly available) */ - /* actually computes ||L||_inf.||U||_inf */ - U_norm = 0.0; - for ( i = 0; i < n; i++ ) - { - norm = 0.0; - for ( j = i; j < n; j++ ) - norm += zabs(LU->me[i][j]); - if ( norm > U_norm ) - U_norm = norm; - } - L_norm = 0.0; - for ( i = 0; i < n; i++ ) - { - norm = 1.0; - for ( j = 0; j < i; j++ ) - norm += zabs(LU->me[i][j]); - if ( norm > L_norm ) - L_norm = norm; - } - - tracecatch(cond_est = U_norm*L_norm*zv_norm_inf(z)/zv_norm_inf(y), - "LUcondest"); - - return cond_est; -} diff --git a/src/mesch/zmachine.c b/src/mesch/zmachine.c deleted file mode 100755 index 28b2c8d5ad..0000000000 --- a/src/mesch/zmachine.c +++ /dev/null @@ -1,175 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file contains basic routines which are used by the functions - involving complex vectors. - These are the routines that should be modified in order to take - full advantage of specialised architectures (pipelining, vector - processors etc). - */ -static char *rcsid = "zmachine.c,v 1.1 1997/12/04 17:56:10 hines Exp"; - -#include "machine.h" -#include "zmatrix.h" -#include - - -/* __zconj__ -- complex conjugate */ -void __zconj__(zp,len) -complex *zp; -int len; -{ - int i; - - for ( i = 0; i < len; i++ ) - zp[i].im = - zp[i].im; -} - -/* __zip__ -- inner product - -- computes sum_i zp1[i].zp2[i] if flag == 0 - sum_i zp1[i]*.zp2[i] if flag != 0 */ -complex __zip__(zp1,zp2,len,flag) -complex *zp1, *zp2; -int flag, len; -{ - complex sum; - int i; - - sum.re = sum.im = 0.0; - if ( flag ) - { - for ( i = 0; i < len; i++ ) - { - sum.re += zp1[i].re*zp2[i].re + zp1[i].im*zp2[i].im; - sum.im += zp1[i].re*zp2[i].im - zp1[i].im*zp2[i].re; - } - } - else - { - for ( i = 0; i < len; i++ ) - { - sum.re += zp1[i].re*zp2[i].re - zp1[i].im*zp2[i].im; - sum.im += zp1[i].re*zp2[i].im + zp1[i].im*zp2[i].re; - } - } - - return sum; -} - -/* __zmltadd__ -- scalar multiply and add i.e. complex saxpy - -- computes zp1[i] += s.zp2[i] if flag == 0 - -- computes zp1[i] += s.zp2[i]* if flag != 0 */ -void __zmltadd__(zp1,zp2,s,len,flag) -complex *zp1, *zp2, s; -int flag, len; -{ - int i; - LongReal t_re, t_im; - - if ( ! flag ) - { - for ( i = 0; i < len; i++ ) - { - t_re = zp1[i].re + s.re*zp2[i].re - s.im*zp2[i].im; - t_im = zp1[i].im + s.re*zp2[i].im + s.im*zp2[i].re; - zp1[i].re = t_re; - zp1[i].im = t_im; - } - } - else - { - for ( i = 0; i < len; i++ ) - { - t_re = zp1[i].re + s.re*zp2[i].re + s.im*zp2[i].im; - t_im = zp1[i].im - s.re*zp2[i].im + s.im*zp2[i].re; - zp1[i].re = t_re; - zp1[i].im = t_im; - } - } -} - -/* __zmlt__ scalar complex multiply array c.f. sv_mlt() */ -void __zmlt__(zp,s,out,len) -complex *zp, s, *out; -register int len; -{ - int i; - LongReal t_re, t_im; - - for ( i = 0; i < len; i++ ) - { - t_re = s.re*zp[i].re - s.im*zp[i].im; - t_im = s.re*zp[i].im + s.im*zp[i].re; - out[i].re = t_re; - out[i].im = t_im; - } -} - -/* __zadd__ -- add complex arrays c.f. v_add() */ -void __zadd__(zp1,zp2,out,len) -complex *zp1, *zp2, *out; -int len; -{ - int i; - for ( i = 0; i < len; i++ ) - { - out[i].re = zp1[i].re + zp2[i].re; - out[i].im = zp1[i].im + zp2[i].im; - } -} - -/* __zsub__ -- subtract complex arrays c.f. v_sub() */ -void __zsub__(zp1,zp2,out,len) -complex *zp1, *zp2, *out; -int len; -{ - int i; - for ( i = 0; i < len; i++ ) - { - out[i].re = zp1[i].re - zp2[i].re; - out[i].im = zp1[i].im - zp2[i].im; - } -} - -/* __zzero__ -- zeros an array of complex numbers */ -void __zzero__(zp,len) -complex *zp; -int len; -{ - /* if a Real precision zero is equivalent to a string of nulls */ - MEM_ZERO((char *)zp,len*sizeof(complex)); - /* else, need to zero the array entry by entry */ - /****************************** - while ( len-- ) - { - zp->re = zp->im = 0.0; - zp++; - } - ******************************/ -} - diff --git a/src/mesch/zmatio.c b/src/mesch/zmatio.c deleted file mode 100755 index ff3c9ff483..0000000000 --- a/src/mesch/zmatio.c +++ /dev/null @@ -1,401 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - - -#include -#include -#include "zmatrix.h" - -static char rcsid[] = "zmatio.c,v 1.1 1997/12/04 17:56:11 hines Exp"; - - - -/* local variables */ -static char line[MAXLINE]; - -/************************************************************************** - Input routines - **************************************************************************/ - -complex z_finput(fp) -FILE *fp; -{ - int io_code; - complex z; - - skipjunk(fp); - if ( isatty(fileno(fp)) ) - { - do { - fprintf(stderr,"real and imag parts: "); - if ( fgets(line,MAXLINE,fp) == NULL ) - error(E_EOF,"z_finput"); -#if REAL == DOUBLE - io_code = sscanf(line,"%lf%lf",&z.re,&z.im); -#elif REAL == FLOAT - io_code = sscanf(line,"%f%f",&z.re,&z.im); -#endif - - } while ( io_code != 2 ); - } - else -#if REAL == DOUBLE - if ( (io_code=fscanf(fp," (%lf,%lf)",&z.re,&z.im)) < 2 ) -#elif REAL == FLOAT - if ( (io_code=fscanf(fp," (%f,%f)",&z.re,&z.im)) < 2 ) -#endif - error((io_code == EOF) ? E_EOF : E_FORMAT,"z_finput"); - - return z; -} - - -ZMAT *zm_finput(fp,a) -FILE *fp; -ZMAT *a; -{ - ZMAT *izm_finput(),*bzm_finput(); - - if ( isatty(fileno(fp)) ) - return izm_finput(fp,a); - else - return bzm_finput(fp,a); -} - -/* izm_finput -- interactive input of matrix */ -ZMAT *izm_finput(fp,mat) -FILE *fp; -ZMAT *mat; -{ - char c; - u_int i, j, m, n, dynamic; - /* dynamic set to TRUE if memory allocated here */ - - /* get matrix size */ - if ( mat != ZMNULL && mat->mnm; n = mat->n; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"ComplexMatrix: rows cols:"); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"izm_finput"); - } while ( sscanf(line,"%u%u",&m,&n)<2 || m>MAXDIM || n>MAXDIM ); - mat = zm_get(m,n); - } - - /* input elements */ - for ( i=0; ime[i][j].re,mat->me[i][j].im); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"izm_finput"); - if ( (*line == 'b' || *line == 'B') && j > 0 ) - { j--; dynamic = FALSE; goto redo2; } - if ( (*line == 'f' || *line == 'F') && j < n-1 ) - { j++; dynamic = FALSE; goto redo2; } - } while ( *line=='\0' || -#if REAL == DOUBLE - sscanf(line,"%lf%lf", -#elif REAL == FLOAT - sscanf(line,"%f%f", -#endif - &mat->me[i][j].re,&mat->me[i][j].im)<1 ); - fprintf(stderr,"Continue: "); - if (fscanf(fp,"%c",&c) != 1) { error(E_INPUT, "izm_finput"); } - if ( c == 'n' || c == 'N' ) - { dynamic = FALSE; goto redo; } - if ( (c == 'b' || c == 'B') /* && i > 0 */ ) - { if ( i > 0 ) - i--; - dynamic = FALSE; goto redo; - } - } - - return (mat); -} - -/* bzm_finput -- batch-file input of matrix */ -ZMAT *bzm_finput(fp,mat) -FILE *fp; -ZMAT *mat; -{ - u_int i,j,m,n,dummy; - int io_code; - - /* get dimension */ - skipjunk(fp); - if ((io_code=fscanf(fp," ComplexMatrix: %u by %u",&m,&n)) < 2 || - m>MAXDIM || n>MAXDIM ) - error(io_code==EOF ? E_EOF : E_FORMAT,"bzm_finput"); - - /* allocate memory if necessary */ - if ( mat==ZMNULL || mat->mnme[i][j].re,&mat->me[i][j].im)) < 2 ) - error(io_code==EOF ? E_EOF : E_FORMAT,"bzm_finput"); - } - } - - return (mat); -} - -ZVEC *zv_finput(fp,x) -FILE *fp; -ZVEC *x; -{ - ZVEC *izv_finput(),*bzv_finput(); - - if ( isatty(fileno(fp)) ) - return izv_finput(fp,x); - else - return bzv_finput(fp,x); -} - -/* izv_finput -- interactive input of vector */ -ZVEC *izv_finput(fp,vec) -FILE *fp; -ZVEC *vec; -{ - u_int i,dim,dynamic; /* dynamic set if memory allocated here */ - - /* get vector dimension */ - if ( vec != ZVNULL && vec->dimdim; dynamic = FALSE; } - else - { - dynamic = TRUE; - do - { - fprintf(stderr,"ComplexVector: dim: "); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"izv_finput"); - } while ( sscanf(line,"%u",&dim)<1 || dim>MAXDIM ); - vec = zv_get(dim); - } - - /* input elements */ - for ( i=0; ive[i].re,vec->ve[i].im); - if ( fgets(line,MAXLINE,fp)==NULL ) - error(E_INPUT,"izv_finput"); - if ( (*line == 'b' || *line == 'B') && i > 0 ) - { i--; dynamic = FALSE; goto redo; } - if ( (*line == 'f' || *line == 'F') && i < dim-1 ) - { i++; dynamic = FALSE; goto redo; } - } while ( *line=='\0' || -#if REAL == DOUBLE - sscanf(line,"%lf%lf", -#elif REAL == FLOAT - sscanf(line,"%f%f", -#endif - &vec->ve[i].re,&vec->ve[i].im) < 2 ); - - return (vec); -} - -/* bzv_finput -- batch-file input of vector */ -ZVEC *bzv_finput(fp,vec) -FILE *fp; -ZVEC *vec; -{ - u_int i,dim; - int io_code; - - /* get dimension */ - skipjunk(fp); - if ((io_code=fscanf(fp," ComplexVector: dim:%u",&dim)) < 1 || - dim>MAXDIM ) - error(io_code==EOF ? 7 : 6,"bzv_finput"); - - - /* allocate memory if necessary */ - if ( vec==ZVNULL || vec->dimve[i].re,&vec->ve[i].im)) < 2 ) - error(io_code==EOF ? 7 : 6,"bzv_finput"); - - return (vec); -} - -/************************************************************************** - Output routines - **************************************************************************/ -static char *zformat = " (%14.9g, %14.9g) "; - -char *setzformat(f_string) -char *f_string; -{ - char *old_f_string; - old_f_string = zformat; - if ( f_string != (char *)NULL && *f_string != '\0' ) - zformat = f_string; - - return old_f_string; -} - -void z_foutput(fp,z) -FILE *fp; -complex z; -{ - fprintf(fp,zformat,z.re,z.im); - putc('\n',fp); -} - -void zm_foutput(fp,a) -FILE *fp; -ZMAT *a; -{ - u_int i, j, tmp; - - if ( a == ZMNULL ) - { fprintf(fp,"ComplexMatrix: NULL\n"); return; } - fprintf(fp,"ComplexMatrix: %d by %d\n",a->m,a->n); - if ( a->me == (complex **)NULL ) - { fprintf(fp,"NULL\n"); return; } - for ( i=0; im; i++ ) /* for each row... */ - { - fprintf(fp,"row %u: ",i); - for ( j=0, tmp=1; jn; j++, tmp++ ) - { /* for each col in row... */ - fprintf(fp,zformat,a->me[i][j].re,a->me[i][j].im); - if ( ! (tmp % 2) ) putc('\n',fp); - } - if ( tmp % 2 != 1 ) putc('\n',fp); - } -} - -void zv_foutput(fp,x) -FILE *fp; -ZVEC *x; -{ - u_int i, tmp; - - if ( x == ZVNULL ) - { fprintf(fp,"ComplexVector: NULL\n"); return; } - fprintf(fp,"ComplexVector: dim: %d\n",x->dim); - if ( x->ve == (complex *)NULL ) - { fprintf(fp,"NULL\n"); return; } - for ( i=0, tmp=0; idim; i++, tmp++ ) - { - fprintf(fp,zformat,x->ve[i].re,x->ve[i].im); - if ( (tmp % 2) == 1 ) putc('\n',fp); - } - if ( (tmp % 2) != 0 ) putc('\n',fp); -} - - -void zm_dump(fp,a) -FILE *fp; -ZMAT *a; -{ - u_int i, j, tmp; - - if ( a == ZMNULL ) - { fprintf(fp,"ComplexMatrix: NULL\n"); return; } - fprintf(fp,"ComplexMatrix: %d by %d @ 0x%p\n",a->m,a->n,a); - fprintf(fp,"\tmax_m = %d, max_n = %d, max_size = %d\n", - a->max_m, a->max_n, a->max_size); - if ( a->me == (complex **)NULL ) - { fprintf(fp,"NULL\n"); return; } - fprintf(fp,"a->me @ 0x%p\n",(a->me)); - fprintf(fp,"a->base @ 0x%p\n",(a->base)); - for ( i=0; im; i++ ) /* for each row... */ - { - fprintf(fp,"row %u: @ 0x%p ",i,(a->me[i])); - for ( j=0, tmp=1; jn; j++, tmp++ ) - { /* for each col in row... */ - fprintf(fp,zformat,a->me[i][j].re,a->me[i][j].im); - if ( ! (tmp % 2) ) putc('\n',fp); - } - if ( tmp % 2 != 1 ) putc('\n',fp); - } -} - - - -void zv_dump(fp,x) -FILE *fp; -ZVEC *x; -{ - u_int i, tmp; - - if ( ! x ) - { fprintf(fp,"ComplexVector: NULL\n"); return; } - fprintf(fp,"ComplexVector: dim: %d @ 0x%p\n",x->dim,(x)); - if ( ! x->ve ) - { fprintf(fp,"NULL\n"); return; } - fprintf(fp,"x->ve @ 0x%p\n",(x->ve)); - for ( i=0, tmp=0; idim; i++, tmp++ ) - { - fprintf(fp,zformat,x->ve[i].re,x->ve[i].im); - if ( tmp % 2 == 1 ) putc('\n',fp); - } - if ( tmp % 2 != 0 ) putc('\n',fp); -} - diff --git a/src/mesch/zmatlab.c b/src/mesch/zmatlab.c deleted file mode 100755 index 9f0ccb356f..0000000000 --- a/src/mesch/zmatlab.c +++ /dev/null @@ -1,223 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - This file contains routines for import/exporting complex data - to/from MATLAB. The main routines are: - ZMAT *zm_save(FILE *fp,ZMAT *A,char *name) - ZVEC *zv_save(FILE *fp,ZVEC *x,char *name) - complex z_save(FILE *fp,complex z,char *name) - ZMAT *zm_load(FILE *fp,char **name) -*/ - -#include -#include "zmatrix.h" -#include "matlab.h" - -static char rcsid[] = "zmatlab.c,v 1.1 1997/12/04 17:56:11 hines Exp"; - -/* zm_save -- save matrix in ".mat" file for MATLAB - -- returns matrix to be saved */ -ZMAT *zm_save(fp,A,name) -FILE *fp; -ZMAT *A; -char *name; -{ - int i, j; - matlab mat; - - if ( ! A ) - error(E_NULL,"zm_save"); - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = A->m; - mat.n = A->n; - mat.imag = TRUE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ -#if ORDER == ROW_ORDER - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < A->n; j++ ) - fwrite(&(A->me[i][j].re),sizeof(Real),1,fp); - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < A->n; j++ ) - fwrite(&(A->me[i][j].im),sizeof(Real),1,fp); -#else /* column major order: ORDER == COL_ORDER */ - for ( j = 0; j < A->n; j++ ) - for ( i = 0; i < A->m; i++ ) - fwrite(&(A->me[i][j].re),sizeof(Real),1,fp); - for ( j = 0; j < A->n; j++ ) - for ( i = 0; i < A->m; i++ ) - fwrite(&(A->me[i][j].im),sizeof(Real),1,fp); -#endif - - return A; -} - - -/* zv_save -- save vector in ".mat" file for MATLAB - -- saves it as a row vector - -- returns vector to be saved */ -ZVEC *zv_save(fp,x,name) -FILE *fp; -ZVEC *x; -char *name; -{ - int i; - matlab mat; - - if ( ! x ) - error(E_NULL,"zv_save"); - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = x->dim; - mat.n = 1; - mat.imag = TRUE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ - for ( i = 0; i < x->dim; i++ ) - fwrite(&(x->ve[i].re),sizeof(Real),1,fp); - for ( i = 0; i < x->dim; i++ ) - fwrite(&(x->ve[i].im),sizeof(Real),1,fp); - - return x; -} - -/* z_save -- saves complex number in ".mat" file for MATLAB - -- returns complex number to be saved */ -complex z_save(fp,z,name) -FILE *fp; -complex z; -char *name; -{ - matlab mat; - - mat.type = 1000*MACH_ID + 100*ORDER + 10*PRECISION + 0; - mat.m = 1; - mat.n = 1; - mat.imag = TRUE; - mat.namlen = (name == (char *)NULL) ? 1 : strlen(name)+1; - - /* write header */ - fwrite(&mat,sizeof(matlab),1,fp); - /* write name */ - if ( name == (char *)NULL ) - fwrite("",sizeof(char),1,fp); - else - fwrite(name,sizeof(char),(int)(mat.namlen),fp); - /* write actual data */ - fwrite(&z,sizeof(complex),1,fp); - - return z; -} - - - -/* zm_load -- loads in a ".mat" file variable as produced by MATLAB - -- matrix returned; imaginary parts ignored */ -ZMAT *zm_load(fp,name) -FILE *fp; -char **name; -{ - ZMAT *A; - int i; - int m_flag, o_flag, p_flag, t_flag; - float f_temp; - double d_temp; - matlab mat; - - if ( fread(&mat,sizeof(matlab),1,fp) != 1 ) - error(E_FORMAT,"zm_load"); - if ( mat.type >= 10000 ) /* don't load a sparse matrix! */ - error(E_FORMAT,"zm_load"); - m_flag = (mat.type/1000) % 10; - o_flag = (mat.type/100) % 10; - p_flag = (mat.type/10) % 10; - t_flag = (mat.type) % 10; - if ( m_flag != MACH_ID ) - error(E_FORMAT,"zm_load"); - if ( t_flag != 0 ) - error(E_FORMAT,"zm_load"); - if ( p_flag != DOUBLE_PREC && p_flag != SINGLE_PREC ) - error(E_FORMAT,"zm_load"); - *name = (char *)malloc((unsigned)(mat.namlen)+1); - if ( fread(*name,sizeof(char),(unsigned)(mat.namlen),fp) == 0 ) - error(E_FORMAT,"zm_load"); - A = zm_get((unsigned)(mat.m),(unsigned)(mat.n)); - for ( i = 0; i < A->m*A->n; i++ ) - { - if ( p_flag == DOUBLE_PREC ) { - if (fread(&d_temp,sizeof(double),1,fp) != 1) {error(E_INPUT, "zm_load");} - } else { - if (fread(&f_temp,sizeof(float),1,fp) != 1) {error(E_INPUT, "zm_load");} - d_temp = f_temp; - } - if ( o_flag == ROW_ORDER ) { - A->me[i / A->n][i % A->n].re = d_temp; - } else if ( o_flag == COL_ORDER ) { - A->me[i % A->m][i / A->m].re = d_temp; - } else { - error(E_FORMAT,"zm_load"); - } - } - - if ( mat.imag ) /* skip imaginary part */ - for ( i = 0; i < A->m*A->n; i++ ) - { - if ( p_flag == DOUBLE_PREC ) { - if (fread(&d_temp,sizeof(double),1,fp) != 1) {error(E_INPUT, "zm_load");} - } else { - if (fread(&f_temp,sizeof(float),1,fp) != 1) {error(E_INPUT, "zm_load");} - d_temp = f_temp; - } - if ( o_flag == ROW_ORDER ) - A->me[i / A->n][i % A->n].im = d_temp; - else if ( o_flag == COL_ORDER ) - A->me[i % A->m][i / A->m].im = d_temp; - else - error(E_FORMAT,"zm_load"); - } - - return A; -} - diff --git a/src/mesch/zmatop.c b/src/mesch/zmatop.c deleted file mode 100755 index a974aed2e3..0000000000 --- a/src/mesch/zmatop.c +++ /dev/null @@ -1,616 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - - -#include -#include "zmatrix.h" - -static char rcsid[] = "zmatop.c,v 1.1 1997/12/04 17:56:12 hines Exp"; - - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0) - -/* zm_add -- matrix addition -- may be in-situ */ -ZMAT *zm_add(mat1,mat2,out) -ZMAT *mat1,*mat2,*out; -{ - u_int m,n,i; - - if ( mat1==ZMNULL || mat2==ZMNULL ) - error(E_NULL,"zm_add"); - if ( mat1->m != mat2->m || mat1->n != mat2->n ) - error(E_SIZES,"zm_add"); - if ( out==ZMNULL || out->m != mat1->m || out->n != mat1->n ) - out = zm_resize(out,mat1->m,mat1->n); - m = mat1->m; n = mat1->n; - for ( i=0; ime[i],mat2->me[i],out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = mat1->me[i][j]+mat2->me[i][j]; - **************************************************/ - } - - return (out); -} - -/* zm_sub -- matrix subtraction -- may be in-situ */ -ZMAT *zm_sub(mat1,mat2,out) -ZMAT *mat1,*mat2,*out; -{ - u_int m,n,i; - - if ( mat1==ZMNULL || mat2==ZMNULL ) - error(E_NULL,"zm_sub"); - if ( mat1->m != mat2->m || mat1->n != mat2->n ) - error(E_SIZES,"zm_sub"); - if ( out==ZMNULL || out->m != mat1->m || out->n != mat1->n ) - out = zm_resize(out,mat1->m,mat1->n); - m = mat1->m; n = mat1->n; - for ( i=0; ime[i],mat2->me[i],out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = mat1->me[i][j]-mat2->me[i][j]; - **************************************************/ - } - - return (out); -} - -/* - Note: In the following routines, "adjoint" means complex conjugate - transpose: - A* = conjugate(A^T) - */ - -/* zm_mlt -- matrix-matrix multiplication */ -ZMAT *zm_mlt(A,B,OUT) -ZMAT *A,*B,*OUT; -{ - u_int i, /* j, */ k, m, n, p; - complex **A_v, **B_v /*, *B_row, *OUT_row, sum, tmp */; - - if ( A==ZMNULL || B==ZMNULL ) - error(E_NULL,"zm_mlt"); - if ( A->n != B->m ) - error(E_SIZES,"zm_mlt"); - if ( A == OUT || B == OUT ) - error(E_INSITU,"zm_mlt"); - m = A->m; n = A->n; p = B->n; - A_v = A->me; B_v = B->me; - - if ( OUT==ZMNULL || OUT->m != A->m || OUT->n != B->n ) - OUT = zm_resize(OUT,A->m,B->n); - - /**************************************************************** - for ( i=0; ime[i][j] = sum; - } - ****************************************************************/ - zm_zero(OUT); - for ( i=0; ime[i],B_v[k],A_v[i][k],(int)p,Z_NOCONJ); - /************************************************** - B_row = B_v[k]; OUT_row = OUT->me[i]; - for ( j=0; jn != B->n ) - error(E_SIZES,"zmma_mlt"); - if ( ! OUT || OUT->m != A->m || OUT->n != B->m ) - OUT = zm_resize(OUT,A->m,B->m); - - limit = A->n; - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < B->m; j++ ) - { - OUT->me[i][j] = __zip__(B->me[j],A->me[i],(int)limit,Z_CONJ); - /************************************************** - sum = 0.0; - A_row = A->me[i]; - B_row = B->me[j]; - for ( k = 0; k < limit; k++ ) - sum += (*A_row++)*(*B_row++); - OUT->me[i][j] = sum; - **************************************************/ - } - - return OUT; -} - -/* zmam_mlt -- matrix adjoint-matrix multiplication - -- A*.B is returned, result stored in OUT */ -ZMAT *zmam_mlt(A,B,OUT) -ZMAT *A, *B, *OUT; -{ - int i, k, limit; - /* complex *B_row, *OUT_row, multiplier; */ - complex tmp; - - if ( ! A || ! B ) - error(E_NULL,"zmam_mlt"); - if ( A == OUT || B == OUT ) - error(E_INSITU,"zmam_mlt"); - if ( A->m != B->m ) - error(E_SIZES,"zmam_mlt"); - if ( ! OUT || OUT->m != A->n || OUT->n != B->n ) - OUT = zm_resize(OUT,A->n,B->n); - - limit = B->n; - zm_zero(OUT); - for ( k = 0; k < A->m; k++ ) - for ( i = 0; i < A->n; i++ ) - { - tmp.re = A->me[k][i].re; - tmp.im = - A->me[k][i].im; - if ( ! is_zero(tmp) ) - __zmltadd__(OUT->me[i],B->me[k],tmp,(int)limit,Z_NOCONJ); - } - - return OUT; -} - -/* zmv_mlt -- matrix-vector multiplication - -- Note: b is treated as a column vector */ -ZVEC *zmv_mlt(A,b,out) -ZMAT *A; -ZVEC *b,*out; -{ - u_int i, m, n; - complex **A_v, *b_v /*, *A_row */; - /* register complex sum; */ - - if ( A==ZMNULL || b==ZVNULL ) - error(E_NULL,"zmv_mlt"); - if ( A->n != b->dim ) - error(E_SIZES,"zmv_mlt"); - if ( b == out ) - error(E_INSITU,"zmv_mlt"); - if ( out == ZVNULL || out->dim != A->m ) - out = zv_resize(out,A->m); - - m = A->m; n = A->n; - A_v = A->me; b_v = b->ve; - for ( i=0; ive[i] = __zip__(A_v[i],b_v,(int)n,Z_NOCONJ); - /************************************************** - A_row = A_v[i]; b_v = b->ve; - for ( j=0; jve[i] = sum; - **************************************************/ - } - - return out; -} - -/* zsm_mlt -- scalar-matrix multiply -- may be in-situ */ -ZMAT *zsm_mlt(scalar,matrix,out) -complex scalar; -ZMAT *matrix,*out; -{ - u_int m,n,i; - - if ( matrix==ZMNULL ) - error(E_NULL,"zsm_mlt"); - if ( out==ZMNULL || out->m != matrix->m || out->n != matrix->n ) - out = zm_resize(out,matrix->m,matrix->n); - m = matrix->m; n = matrix->n; - for ( i=0; ime[i],scalar,out->me[i],(int)n); - /************************************************** - for ( j=0; jme[i][j] = scalar*matrix->me[i][j]; - **************************************************/ - return (out); -} - -/* zvm_mlt -- vector adjoint-matrix multiplication */ -ZVEC *zvm_mlt(A,b,out) -ZMAT *A; -ZVEC *b,*out; -{ - u_int j,m,n; - /* complex sum,**A_v,*b_v; */ - - if ( A==ZMNULL || b==ZVNULL ) - error(E_NULL,"zvm_mlt"); - if ( A->m != b->dim ) - error(E_SIZES,"zvm_mlt"); - if ( b == out ) - error(E_INSITU,"zvm_mlt"); - if ( out == ZVNULL || out->dim != A->n ) - out = zv_resize(out,A->n); - - m = A->m; n = A->n; - - zv_zero(out); - for ( j = 0; j < m; j++ ) - if ( b->ve[j].re != 0.0 || b->ve[j].im != 0.0 ) - __zmltadd__(out->ve,A->me[j],b->ve[j],(int)n,Z_CONJ); - /************************************************** - A_v = A->me; b_v = b->ve; - for ( j=0; jve[j] = sum; - } - **************************************************/ - - return out; -} - -/* zm_adjoint -- adjoint matrix */ -ZMAT *zm_adjoint(in,out) -ZMAT *in, *out; -{ - int i, j; - int in_situ; - complex tmp; - - if ( in == ZMNULL ) - error(E_NULL,"zm_adjoint"); - if ( in == out && in->n != in->m ) - error(E_INSITU2,"zm_adjoint"); - in_situ = ( in == out ); - if ( out == ZMNULL || out->m != in->n || out->n != in->m ) - out = zm_resize(out,in->n,in->m); - - if ( ! in_situ ) - { - for ( i = 0; i < in->m; i++ ) - for ( j = 0; j < in->n; j++ ) - { - out->me[j][i].re = in->me[i][j].re; - out->me[j][i].im = - in->me[i][j].im; - } - } - else - { - for ( i = 0 ; i < in->m; i++ ) - { - for ( j = 0; j < i; j++ ) - { - tmp.re = in->me[i][j].re; - tmp.im = in->me[i][j].im; - in->me[i][j].re = in->me[j][i].re; - in->me[i][j].im = - in->me[j][i].im; - in->me[j][i].re = tmp.re; - in->me[j][i].im = - tmp.im; - } - in->me[i][i].im = - in->me[i][i].im; - } - } - - return out; -} - -/* zswap_rows -- swaps rows i and j of matrix A upto column lim */ -ZMAT *zswap_rows(A,i,j,lo,hi) -ZMAT *A; -int i, j, lo, hi; -{ - int k; - complex **A_me, tmp; - - if ( ! A ) - error(E_NULL,"swap_rows"); - if ( i < 0 || j < 0 || i >= A->m || j >= A->m ) - error(E_SIZES,"swap_rows"); - lo = max(0,lo); - hi = min(hi,A->n-1); - A_me = A->me; - - for ( k = lo; k <= hi; k++ ) - { - tmp = A_me[k][i]; - A_me[k][i] = A_me[k][j]; - A_me[k][j] = tmp; - } - return A; -} - -/* zswap_cols -- swap columns i and j of matrix A upto row lim */ -ZMAT *zswap_cols(A,i,j,lo,hi) -ZMAT *A; -int i, j, lo, hi; -{ - int k; - complex **A_me, tmp; - - if ( ! A ) - error(E_NULL,"swap_cols"); - if ( i < 0 || j < 0 || i >= A->n || j >= A->n ) - error(E_SIZES,"swap_cols"); - lo = max(0,lo); - hi = min(hi,A->m-1); - A_me = A->me; - - for ( k = lo; k <= hi; k++ ) - { - tmp = A_me[i][k]; - A_me[i][k] = A_me[j][k]; - A_me[j][k] = tmp; - } - return A; -} - -/* mz_mltadd -- matrix-scalar multiply and add - -- may be in situ - -- returns out == A1 + s*A2 */ -ZMAT *mz_mltadd(A1,A2,s,out) -ZMAT *A1, *A2, *out; -complex s; -{ - /* register complex *A1_e, *A2_e, *out_e; */ - /* register int j; */ - int i, m, n; - - if ( ! A1 || ! A2 ) - error(E_NULL,"mz_mltadd"); - if ( A1->m != A2->m || A1->n != A2->n ) - error(E_SIZES,"mz_mltadd"); - - if ( out != A1 && out != A2 ) - out = zm_resize(out,A1->m,A1->n); - - if ( s.re == 0.0 && s.im == 0.0 ) - return zm_copy(A1,out); - if ( s.re == 1.0 && s.im == 0.0 ) - return zm_add(A1,A2,out); - - out = zm_copy(A1,out); - - m = A1->m; n = A1->n; - for ( i = 0; i < m; i++ ) - { - __zmltadd__(out->me[i],A2->me[i],s,(int)n,Z_NOCONJ); - /************************************************** - A1_e = A1->me[i]; - A2_e = A2->me[i]; - out_e = out->me[i]; - for ( j = 0; j < n; j++ ) - out_e[j] = A1_e[j] + s*A2_e[j]; - **************************************************/ - } - - return out; -} - -/* zmv_mltadd -- matrix-vector multiply and add - -- may not be in situ - -- returns out == v1 + alpha*A*v2 */ -ZVEC *zmv_mltadd(v1,v2,A,alpha,out) -ZVEC *v1, *v2, *out; -ZMAT *A; -complex alpha; -{ - /* register int j; */ - int i, m, n; - complex tmp, *v2_ve, *out_ve; - - if ( ! v1 || ! v2 || ! A ) - error(E_NULL,"zmv_mltadd"); - if ( out == v2 ) - error(E_INSITU,"zmv_mltadd"); - if ( v1->dim != A->m || v2->dim != A-> n ) - error(E_SIZES,"zmv_mltadd"); - - tracecatch(out = zv_copy(v1,out),"zmv_mltadd"); - - v2_ve = v2->ve; out_ve = out->ve; - m = A->m; n = A->n; - - if ( alpha.re == 0.0 && alpha.im == 0.0 ) - return out; - - for ( i = 0; i < m; i++ ) - { - tmp = __zip__(A->me[i],v2_ve,(int)n,Z_NOCONJ); - out_ve[i].re += alpha.re*tmp.re - alpha.im*tmp.im; - out_ve[i].im += alpha.re*tmp.im + alpha.im*tmp.re; - /************************************************** - A_e = A->me[i]; - sum = 0.0; - for ( j = 0; j < n; j++ ) - sum += A_e[j]*v2_ve[j]; - out_ve[i] = v1->ve[i] + alpha*sum; - **************************************************/ - } - - return out; -} - -/* zvm_mltadd -- vector-matrix multiply and add a la zvm_mlt() - -- may not be in situ - -- returns out == v1 + v2*.A */ -ZVEC *zvm_mltadd(v1,v2,A,alpha,out) -ZVEC *v1, *v2, *out; -ZMAT *A; -complex alpha; -{ - int /* i, */ j, m, n; - complex tmp, /* *A_e, */ *out_ve; - - if ( ! v1 || ! v2 || ! A ) - error(E_NULL,"zvm_mltadd"); - if ( v2 == out ) - error(E_INSITU,"zvm_mltadd"); - if ( v1->dim != A->n || A->m != v2->dim ) - error(E_SIZES,"zvm_mltadd"); - - tracecatch(out = zv_copy(v1,out),"zvm_mltadd"); - - out_ve = out->ve; m = A->m; n = A->n; - for ( j = 0; j < m; j++ ) - { - /* tmp = zmlt(v2->ve[j],alpha); */ - tmp.re = v2->ve[j].re*alpha.re - v2->ve[j].im*alpha.im; - tmp.im = v2->ve[j].re*alpha.im + v2->ve[j].im*alpha.re; - if ( tmp.re != 0.0 || tmp.im != 0.0 ) - __zmltadd__(out_ve,A->me[j],tmp,(int)n,Z_CONJ); - /************************************************** - A_e = A->me[j]; - for ( i = 0; i < n; i++ ) - out_ve[i] += A_e[i]*tmp; - **************************************************/ - } - - return out; -} - -/* zget_col -- gets a specified column of a matrix; returned as a vector */ -ZVEC *zget_col(mat,col,vec) -int col; -ZMAT *mat; -ZVEC *vec; -{ - u_int i; - - if ( mat==ZMNULL ) - error(E_NULL,"zget_col"); - if ( col < 0 || col >= mat->n ) - error(E_RANGE,"zget_col"); - if ( vec==ZVNULL || vec->dimm ) - vec = zv_resize(vec,mat->m); - - for ( i=0; im; i++ ) - vec->ve[i] = mat->me[i][col]; - - return (vec); -} - -/* zget_row -- gets a specified row of a matrix and retruns it as a vector */ -ZVEC *zget_row(mat,row,vec) -int row; -ZMAT *mat; -ZVEC *vec; -{ - int /* i, */ lim; - - if ( mat==ZMNULL ) - error(E_NULL,"zget_row"); - if ( row < 0 || row >= mat->m ) - error(E_RANGE,"zget_row"); - if ( vec==ZVNULL || vec->dimn ) - vec = zv_resize(vec,mat->n); - - lim = min(mat->n,vec->dim); - - /* for ( i=0; in; i++ ) */ - /* vec->ve[i] = mat->me[row][i]; */ - MEMCOPY(mat->me[row],vec->ve,lim,complex); - - return (vec); -} - -/* zset_col -- sets column of matrix to values given in vec (in situ) */ -ZMAT *zset_col(mat,col,vec) -ZMAT *mat; -ZVEC *vec; -int col; -{ - u_int i,lim; - - if ( mat==ZMNULL || vec==ZVNULL ) - error(E_NULL,"zset_col"); - if ( col < 0 || col >= mat->n ) - error(E_RANGE,"zset_col"); - lim = min(mat->m,vec->dim); - for ( i=0; ime[i][col] = vec->ve[i]; - - return (mat); -} - -/* zset_row -- sets row of matrix to values given in vec (in situ) */ -ZMAT *zset_row(mat,row,vec) -ZMAT *mat; -ZVEC *vec; -int row; -{ - u_int /* j, */ lim; - - if ( mat==ZMNULL || vec==ZVNULL ) - error(E_NULL,"zset_row"); - if ( row < 0 || row >= mat->m ) - error(E_RANGE,"zset_row"); - lim = min(mat->n,vec->dim); - /* for ( j=j0; jme[row][j] = vec->ve[j]; */ - MEMCOPY(vec->ve,mat->me[row],lim,complex); - - return (mat); -} - -/* zm_rand -- randomise a complex matrix; uniform in [0,1)+[0,1)*i */ -ZMAT *zm_rand(A) -ZMAT *A; -{ - int i; - - if ( ! A ) - error(E_NULL,"zm_rand"); - - for ( i = 0; i < A->m; i++ ) - mrandlist((Real *)(A->me[i]),2*A->n); - - return A; -} diff --git a/src/mesch/zmatrix.h b/src/mesch/zmatrix.h deleted file mode 100755 index 8eeef59c7d..0000000000 --- a/src/mesch/zmatrix.h +++ /dev/null @@ -1,283 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* Main include file for zmeschach library -- complex vectors and matrices */ - -#ifndef ZMATRIXH -#define ZMATRIXH - -#include "matrix.h" - - - /* Type definitions for complex vectors and matrices */ - - -/* complex definition */ -typedef struct { - Real re,im; - } complex; - -/* complex vector definition */ -typedef struct { - u_int dim, max_dim; - complex *ve; - } ZVEC; - -/* complex matrix definition */ -typedef struct { - u_int m, n; - u_int max_m, max_n, max_size; - complex *base; /* base is base of alloc'd mem */ - complex **me; - } ZMAT; - -#define ZVNULL ((ZVEC *)NULL) -#define ZMNULL ((ZMAT *)NULL) - -#define Z_CONJ 1 -#define Z_NOCONJ 0 - - -/* memory functions */ - -#ifdef ANSI_C -int zv_get_vars(int dim,...); -int zm_get_vars(int m,int n,...); -int zv_resize_vars(int new_dim,...); -int zm_resize_vars(int m,int n,...); -int zv_free_vars(ZVEC **,...); -int zm_free_vars(ZMAT **,...); - -#elif VARARGS -int zv_get_vars(); -int zm_get_vars(); -int zv_resize_vars(); -int zm_resize_vars(); -int zv_free_vars(); -int zm_free_vars(); - -#endif - - - - -#ifdef ANSI_C -extern ZMAT *_zm_copy(ZMAT *in,ZMAT *out,u_int i0,u_int j0); -extern ZMAT * zm_move(ZMAT *, int, int, int, int, ZMAT *, int, int); -extern ZMAT *zvm_move(ZVEC *, int, ZMAT *, int, int, int, int); -extern ZVEC *_zv_copy(ZVEC *in,ZVEC *out,u_int i0); -extern ZVEC * zv_move(ZVEC *, int, int, ZVEC *, int); -extern ZVEC *zmv_move(ZMAT *, int, int, int, int, ZVEC *, int); -extern complex z_finput(FILE *fp); -extern ZMAT *zm_finput(FILE *fp,ZMAT *a); -extern ZVEC *zv_finput(FILE *fp,ZVEC *x); -extern ZMAT *zm_add(ZMAT *mat1,ZMAT *mat2,ZMAT *out); -extern ZMAT *zm_sub(ZMAT *mat1,ZMAT *mat2,ZMAT *out); -extern ZMAT *zm_mlt(ZMAT *A,ZMAT *B,ZMAT *OUT); -extern ZMAT *zmma_mlt(ZMAT *A,ZMAT *B,ZMAT *OUT); -extern ZMAT *zmam_mlt(ZMAT *A,ZMAT *B,ZMAT *OUT); -extern ZVEC *zmv_mlt(ZMAT *A,ZVEC *b,ZVEC *out); -extern ZMAT *zsm_mlt(complex scalar,ZMAT *matrix,ZMAT *out); -extern ZVEC *zvm_mlt(ZMAT *A,ZVEC *b,ZVEC *out); -extern ZMAT *zm_adjoint(ZMAT *in,ZMAT *out); -extern ZMAT *zswap_rows(ZMAT *A,int i,int j,int lo,int hi); -extern ZMAT *zswap_cols(ZMAT *A,int i,int j,int lo,int hi); -extern ZMAT *mz_mltadd(ZMAT *A1,ZMAT *A2,complex s,ZMAT *out); -extern ZVEC *zmv_mltadd(ZVEC *v1,ZVEC *v2,ZMAT *A,complex alpha,ZVEC *out); -extern ZVEC *zvm_mltadd(ZVEC *v1,ZVEC *v2,ZMAT *A,complex alpha,ZVEC *out); -extern ZVEC *zv_zero(ZVEC *x); -extern ZMAT *zm_zero(ZMAT *A); -extern ZMAT *zm_get(int m,int n); -extern ZVEC *zv_get(int dim); -extern ZMAT *zm_resize(ZMAT *A,int new_m,int new_n); -extern complex _zin_prod(ZVEC *x,ZVEC *y,u_int i0,u_int flag); -extern ZVEC *zv_resize(ZVEC *x,int new_dim); -extern ZVEC *zv_mlt(complex scalar,ZVEC *vector,ZVEC *out); -extern ZVEC *zv_add(ZVEC *vec1,ZVEC *vec2,ZVEC *out); -extern ZVEC *zv_mltadd(ZVEC *v1,ZVEC *v2,complex scale,ZVEC *out); -extern ZVEC *zv_sub(ZVEC *vec1,ZVEC *vec2,ZVEC *out); -#ifdef PROTOTYPES_IN_STRUCT -extern ZVEC *zv_map(complex (*f)(),ZVEC *x,ZVEC *out); -extern ZVEC *_zv_map(complex (*f)(),void *params,ZVEC *x,ZVEC *out); -#else -extern ZVEC *zv_map(complex (*f)(complex),ZVEC *x,ZVEC *out); -extern ZVEC *_zv_map(complex (*f)(void *,complex),void *params,ZVEC *x,ZVEC *out); -#endif -extern ZVEC *zv_lincomb(int n,ZVEC *v[],complex a[],ZVEC *out); -extern ZVEC *zv_linlist(ZVEC *out,ZVEC *v1,complex a1,...); -extern ZVEC *zv_star(ZVEC *x1, ZVEC *x2, ZVEC *out); -extern ZVEC *zv_slash(ZVEC *x1, ZVEC *x2, ZVEC *out); -extern int zm_free(ZMAT *mat); -extern int zv_free(ZVEC *vec); - -extern ZVEC *zv_rand(ZVEC *x); -extern ZMAT *zm_rand(ZMAT *A); - -extern ZVEC *zget_row(ZMAT *A, int i, ZVEC *out); -extern ZVEC *zget_col(ZMAT *A, int j, ZVEC *out); -extern ZMAT *zset_row(ZMAT *A, int i, ZVEC *in); -extern ZMAT *zset_col(ZMAT *A, int j, ZVEC *in); - -extern ZVEC *px_zvec(PERM *pi, ZVEC *in, ZVEC *out); -extern ZVEC *pxinv_zvec(PERM *pi, ZVEC *in, ZVEC *out); - -extern void __zconj__(complex zp[], int len); -extern complex __zip__(complex zp1[],complex zp2[],int len,int flag); -extern void __zmltadd__(complex zp1[],complex zp2[], - complex s,int len,int flag); -extern void __zmlt__(complex zp[],complex s,complex out[],int len); -extern void __zadd__(complex zp1[],complex zp2[],complex out[],int len); -extern void __zsub__(complex zp1[],complex zp2[],complex out[],int len); -extern void __zzero__(complex zp[],int len); -extern void z_foutput(FILE *fp,complex z); -extern void zm_foutput(FILE *fp,ZMAT *a); -extern void zv_foutput(FILE *fp,ZVEC *x); -extern void zm_dump(FILE *fp,ZMAT *a); -extern void zv_dump(FILE *fp,ZVEC *x); - -extern double _zv_norm1(ZVEC *x, VEC *scale); -extern double _zv_norm2(ZVEC *x, VEC *scale); -extern double _zv_norm_inf(ZVEC *x, VEC *scale); -extern double zm_norm1(ZMAT *A); -extern double zm_norm_inf(ZMAT *A); -extern double zm_norm_frob(ZMAT *A); - -complex zmake(double real, double imag); -double zabs(complex z); -complex zadd(complex z1,complex z2); -complex zsub(complex z1,complex z2); -complex zmlt(complex z1,complex z2); -complex zinv(complex z); -complex zdiv(complex z1,complex z2); -complex zsqrt(complex z); -complex zexp(complex z); -complex zlog(complex z); -complex zconj(complex z); -complex zneg(complex z); -#else -extern ZMAT *_zm_copy(); -extern ZVEC *_zv_copy(); -extern ZMAT *zm_finput(); -extern ZVEC *zv_finput(); -extern ZMAT *zm_add(); -extern ZMAT *zm_sub(); -extern ZMAT *zm_mlt(); -extern ZMAT *zmma_mlt(); -extern ZMAT *zmam_mlt(); -extern ZVEC *zmv_mlt(); -extern ZMAT *zsm_mlt(); -extern ZVEC *zvm_mlt(); -extern ZMAT *zm_adjoint(); -extern ZMAT *zswap_rows(); -extern ZMAT *zswap_cols(); -extern ZMAT *mz_mltadd(); -extern ZVEC *zmv_mltadd(); -extern ZVEC *zvm_mltadd(); -extern ZVEC *zv_zero(); -extern ZMAT *zm_zero(); -extern ZMAT *zm_get(); -extern ZVEC *zv_get(); -extern ZMAT *zm_resize(); -extern ZVEC *zv_resize(); -extern complex _zin_prod(); -extern ZVEC *zv_mlt(); -extern ZVEC *zv_add(); -extern ZVEC *zv_mltadd(); -extern ZVEC *zv_sub(); -extern ZVEC *zv_map(); -extern ZVEC *_zv_map(); -extern ZVEC *zv_lincomb(); -extern ZVEC *zv_linlist(); -extern ZVEC *zv_star(); -extern ZVEC *zv_slash(); - -extern ZVEC *px_zvec(); -extern ZVEC *pxinv_zvec(); - -extern ZVEC *zv_rand(); -extern ZMAT *zm_rand(); - -extern ZVEC *zget_row(); -extern ZVEC *zget_col(); -extern ZMAT *zset_row(); -extern ZMAT *zset_col(); - -extern int zm_free(); -extern int zv_free(); -extern void __zconj__(); -extern complex __zip__(); -extern void __zmltadd__(); -extern void __zmlt__(); -extern void __zadd__(); -extern void __zsub__(); -extern void __zzero__(); -extern void zm_foutput(); -extern void zv_foutput(); -extern void zm_dump(); -extern void zv_dump(); - -extern double _zv_norm1(); -extern double _zv_norm2(); -extern double _zv_norm_inf(); -extern double zm_norm1(); -extern double zm_norm_inf(); -extern double zm_norm_frob(); - -complex zmake(); -double zabs(); -complex zadd(); -complex zsub(); -complex zmlt(); -complex zinv(); -complex zdiv(); -complex zsqrt(); -complex zexp(); -complex zlog(); -complex zconj(); -complex zneg(); -#endif - -#define zv_copy(x,y) _zv_copy(x,y,0) -#define zm_copy(A,B) _zm_copy(A,B,0,0) - -#define z_input() z_finput(stdin) -#define zv_input(x) zv_finput(stdin,x) -#define zm_input(A) zm_finput(stdin,A) -#define z_output(z) z_foutput(stdout,z) -#define zv_output(x) zv_foutput(stdout,x) -#define zm_output(A) zm_foutput(stdout,A) - -#define ZV_FREE(x) ( zv_free(x), (x) = ZVNULL ) -#define ZM_FREE(A) ( zm_free(A), (A) = ZMNULL ) - -#define zin_prod(x,y) _zin_prod(x,y,0,Z_CONJ) - -#define zv_norm1(x) _zv_norm1(x,VNULL) -#define zv_norm2(x) _zv_norm2(x,VNULL) -#define zv_norm_inf(x) _zv_norm_inf(x,VNULL) - - -#endif diff --git a/src/mesch/zmatrix2.h b/src/mesch/zmatrix2.h deleted file mode 100755 index cfe7334466..0000000000 --- a/src/mesch/zmatrix2.h +++ /dev/null @@ -1,118 +0,0 @@ - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - 2nd header file for Meschach's complex routines. - This file contains declarations for complex factorisation/solve - routines. - -*/ - - -#ifndef ZMATRIX2H -#define ZMATRIX2H - -#include "zmatrix.h" - -#ifdef ANSI_C -extern ZVEC *zUsolve(ZMAT *matrix, ZVEC *b, ZVEC *out, double diag); -extern ZVEC *zLsolve(ZMAT *matrix, ZVEC *b, ZVEC *out, double diag); -extern ZVEC *zUAsolve(ZMAT *U, ZVEC *b, ZVEC *out, double diag); -extern ZVEC *zDsolve(ZMAT *A, ZVEC *b, ZVEC *x); -extern ZVEC *zLAsolve(ZMAT *L, ZVEC *b, ZVEC *out, double diag); - -extern ZVEC *zhhvec(ZVEC *,int,Real *,ZVEC *,complex *); -extern ZVEC *zhhtrvec(ZVEC *,double,int,ZVEC *,ZVEC *); -extern ZMAT *zhhtrrows(ZMAT *,int,int,ZVEC *,double); -extern ZMAT *zhhtrcols(ZMAT *,int,int,ZVEC *,double); -extern ZMAT *zHfactor(ZMAT *,ZVEC *); -extern ZMAT *zHQunpack(ZMAT *,ZVEC *,ZMAT *,ZMAT *); - -extern ZMAT *zQRfactor(ZMAT *A, ZVEC *diag); -extern ZMAT *zQRCPfactor(ZMAT *A, ZVEC *diag, PERM *px); -extern ZVEC *_zQsolve(ZMAT *QR, ZVEC *diag, ZVEC *b, ZVEC *x, ZVEC *tmp); -extern ZMAT *zmakeQ(ZMAT *QR, ZVEC *diag, ZMAT *Qout); -extern ZMAT *zmakeR(ZMAT *QR, ZMAT *Rout); -extern ZVEC *zQRsolve(ZMAT *QR, ZVEC *diag, ZVEC *b, ZVEC *x); -extern ZVEC *zQRAsolve(ZMAT *QR, ZVEC *diag, ZVEC *b, ZVEC *x); -extern ZVEC *zQRCPsolve(ZMAT *QR,ZVEC *diag,PERM *pivot,ZVEC *b,ZVEC *x); -extern ZVEC *zUmlt(ZMAT *U, ZVEC *x, ZVEC *out); -extern ZVEC *zUAmlt(ZMAT *U, ZVEC *x, ZVEC *out); -extern double zQRcondest(ZMAT *QR); - -extern ZVEC *zLsolve(ZMAT *, ZVEC *, ZVEC *, double); -extern ZMAT *zset_col(ZMAT *, int, ZVEC *); - -extern ZMAT *zLUfactor(ZMAT *A, PERM *pivot); -extern ZVEC *zLUsolve(ZMAT *A, PERM *pivot, ZVEC *b, ZVEC *x); -extern ZVEC *zLUAsolve(ZMAT *LU, PERM *pivot, ZVEC *b, ZVEC *x); -extern ZMAT *zm_inverse(ZMAT *A, ZMAT *out); -extern double zLUcondest(ZMAT *LU, PERM *pivot); - -extern void zgivens(complex, complex, Real *, complex *); -extern ZMAT *zrot_rows(ZMAT *A, int i, int k, double c, complex s, - ZMAT *out); -extern ZMAT *zrot_cols(ZMAT *A, int i, int k, double c, complex s, - ZMAT *out); -extern ZVEC *rot_zvec(ZVEC *x, int i, int k, double c, complex s, - ZVEC *out); -extern ZMAT *zschur(ZMAT *A,ZMAT *Q); -/* extern ZMAT *schur_vecs(ZMAT *T,ZMAT *Q,X_re,X_im) */ -#else -extern ZVEC *zUsolve(), *zLsolve(), *zUAsolve(), *zDsolve(), *zLAsolve(); - -extern ZVEC *zhhvec(); -extern ZVEC *zhhtrvec(); -extern ZMAT *zhhtrrows(); -extern ZMAT *zhhtrcols(); -extern ZMAT *zHfactor(); -extern ZMAT *zHQunpack(); - - -extern ZMAT *zQRfactor(), *zQRCPfactor(); -extern ZVEC *_zQsolve(); -extern ZMAT *zmakeQ(), *zmakeR(); -extern ZVEC *zQRsolve(), *zQRAsolve(), *zQRCPsolve(); -extern ZVEC *zUmlt(), *zUAmlt(); -extern double zQRcondest(); - -extern ZVEC *zLsolve(); -extern ZMAT *zset_col(); - -extern ZMAT *zLUfactor(); -extern ZVEC *zLUsolve(), *zLUAsolve(); -extern ZMAT *zm_inverse(); -extern double zLUcondest(); - -extern void zgivens(); -extern ZMAT *zrot_rows(), *zrot_cols(); -extern ZVEC *rot_zvec(); -extern ZMAT *zschur(); -/* extern ZMAT *schur_vecs(); */ -#endif - -#endif - diff --git a/src/mesch/zmemory.c b/src/mesch/zmemory.c deleted file mode 100755 index 3a59ea2a0a..0000000000 --- a/src/mesch/zmemory.c +++ /dev/null @@ -1,714 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* Memory allocation and de-allocation for complex matrices and vectors */ - -#include -#include "zmatrix.h" - -static char rcsid[] = "zmemory.c,v 1.1 1997/12/04 17:56:13 hines Exp"; - - - -/* zv_zero -- zeros all entries of a complex vector - -- uses __zzero__() */ -ZVEC *zv_zero(x) -ZVEC *x; -{ - if ( ! x ) - error(E_NULL,"zv_zero"); - __zzero__(x->ve,x->dim); - - return x; -} - -/* zm_zero -- zeros all entries of a complex matrix - -- uses __zzero__() */ -ZMAT *zm_zero(A) -ZMAT *A; -{ - int i; - - if ( ! A ) - error(E_NULL,"zm_zero"); - for ( i = 0; i < A->m; i++ ) - __zzero__(A->me[i],A->n); - - return A; -} - -/* zm_get -- gets an mxn complex matrix (in ZMAT form) */ -ZMAT *zm_get(m,n) -int m,n; -{ - ZMAT *matrix; - u_int i; - - if (m < 0 || n < 0) - error(E_NEG,"zm_get"); - - if ((matrix=NEW(ZMAT)) == (ZMAT *)NULL ) - error(E_MEM,"zm_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,sizeof(ZMAT)); - mem_numvar(TYPE_ZMAT,1); - } - - matrix->m = m; matrix->n = matrix->max_n = n; - matrix->max_m = m; matrix->max_size = m*n; -#ifndef SEGMENTED - if ((matrix->base = NEW_A(m*n,complex)) == (complex *)NULL ) - { - free(matrix); - error(E_MEM,"zm_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,m*n*sizeof(complex)); - } -#else - matrix->base = (complex *)NULL; -#endif - if ((matrix->me = (complex **)calloc(m,sizeof(complex *))) == - (complex **)NULL ) - { free(matrix->base); free(matrix); - error(E_MEM,"zm_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,m*sizeof(complex *)); - } -#ifndef SEGMENTED - /* set up pointers */ - for ( i=0; ime[i] = &(matrix->base[i*n]); -#else - for ( i = 0; i < m; i++ ) - if ( (matrix->me[i]=NEW_A(n,complex)) == (complex *)NULL ) - error(E_MEM,"zm_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,n*sizeof(complex)); - } -#endif - - return (matrix); -} - - -/* zv_get -- gets a ZVEC of dimension 'dim' - -- Note: initialized to zero */ -ZVEC *zv_get(size) -int size; -{ - ZVEC *vector; - - if (size < 0) - error(E_NEG,"zv_get"); - - if ((vector=NEW(ZVEC)) == (ZVEC *)NULL ) - error(E_MEM,"zv_get"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZVEC,0,sizeof(ZVEC)); - mem_numvar(TYPE_ZVEC,1); - } - vector->dim = vector->max_dim = size; - if ((vector->ve=NEW_A(size,complex)) == (complex *)NULL ) - { - free(vector); - error(E_MEM,"zv_get"); - } - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZVEC,0,size*sizeof(complex)); - } - return (vector); -} - -/* zm_free -- returns ZMAT & asoociated memory back to memory heap */ -int zm_free(mat) -ZMAT *mat; -{ -#ifdef SEGMENTED - int i; -#endif - - if ( mat==(ZMAT *)NULL || (int)(mat->m) < 0 || - (int)(mat->n) < 0 ) - /* don't trust it */ - return (-1); - -#ifndef SEGMENTED - if ( mat->base != (complex *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,mat->max_m*mat->max_n*sizeof(complex),0); - } - free((char *)(mat->base)); - } -#else - for ( i = 0; i < mat->max_m; i++ ) - if ( mat->me[i] != (complex *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,mat->max_n*sizeof(complex),0); - } - free((char *)(mat->me[i])); - } -#endif - if ( mat->me != (complex **)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,mat->max_m*sizeof(complex *),0); - } - free((char *)(mat->me)); - } - - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,sizeof(ZMAT),0); - mem_numvar(TYPE_ZMAT,-1); - } - free((char *)mat); - - return (0); -} - - -/* zv_free -- returns ZVEC & asoociated memory back to memory heap */ -int zv_free(vec) -ZVEC *vec; -{ - if ( vec==(ZVEC *)NULL || (int)(vec->dim) < 0 ) - /* don't trust it */ - return (-1); - - if ( vec->ve == (complex *)NULL ) { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZVEC,sizeof(ZVEC),0); - mem_numvar(TYPE_ZVEC,-1); - } - free((char *)vec); - } - else - { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZVEC,vec->max_dim*sizeof(complex)+ - sizeof(ZVEC),0); - mem_numvar(TYPE_ZVEC,-1); - } - - free((char *)vec->ve); - free((char *)vec); - } - - return (0); -} - - -/* zm_resize -- returns the matrix A of size new_m x new_n; A is zeroed - -- if A == NULL on entry then the effect is equivalent to m_get() */ -ZMAT *zm_resize(A,new_m,new_n) -ZMAT *A; -int new_m, new_n; -{ - u_int i, new_max_m, new_max_n, new_size, old_m, old_n; - - if (new_m < 0 || new_n < 0) - error(E_NEG,"zm_resize"); - - if ( ! A ) - return zm_get(new_m,new_n); - - if (new_m == A->m && new_n == A->n) - return A; - - old_m = A->m; old_n = A->n; - if ( new_m > A->max_m ) - { /* re-allocate A->me */ - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,A->max_m*sizeof(complex *), - new_m*sizeof(complex *)); - } - - A->me = RENEW(A->me,new_m,complex *); - if ( ! A->me ) - error(E_MEM,"zm_resize"); - } - new_max_m = max(new_m,A->max_m); - new_max_n = max(new_n,A->max_n); - -#ifndef SEGMENTED - new_size = new_max_m*new_max_n; - if ( new_size > A->max_size ) - { /* re-allocate A->base */ - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,A->max_m*A->max_n*sizeof(complex), - new_size*sizeof(complex)); - } - - A->base = RENEW(A->base,new_size,complex); - if ( ! A->base ) - error(E_MEM,"zm_resize"); - A->max_size = new_size; - } - - /* now set up A->me[i] */ - for ( i = 0; i < new_m; i++ ) - A->me[i] = &(A->base[i*new_n]); - - /* now shift data in matrix */ - if ( old_n > new_n ) - { - for ( i = 1; i < min(old_m,new_m); i++ ) - MEM_COPY((char *)&(A->base[i*old_n]), - (char *)&(A->base[i*new_n]), - sizeof(complex)*new_n); - } - else if ( old_n < new_n ) - { - for ( i = min(old_m,new_m)-1; i > 0; i-- ) - { /* copy & then zero extra space */ - MEM_COPY((char *)&(A->base[i*old_n]), - (char *)&(A->base[i*new_n]), - sizeof(complex)*old_n); - __zzero__(&(A->base[i*new_n+old_n]),(new_n-old_n)); - } - __zzero__(&(A->base[old_n]),(new_n-old_n)); - A->max_n = new_n; - } - /* zero out the new rows.. */ - for ( i = old_m; i < new_m; i++ ) - __zzero__(&(A->base[i*new_n]),new_n); -#else - if ( A->max_n < new_n ) - { - complex *tmp; - - for ( i = 0; i < A->max_m; i++ ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,A->max_n*sizeof(complex), - new_max_n*sizeof(complex)); - } - - if ( (tmp = RENEW(A->me[i],new_max_n,complex)) == NULL ) - error(E_MEM,"zm_resize"); - else { - A->me[i] = tmp; - } - } - for ( i = A->max_m; i < new_max_m; i++ ) - { - if ( (tmp = NEW_A(new_max_n,complex)) == NULL ) - error(E_MEM,"zm_resize"); - else { - A->me[i] = tmp; - if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,new_max_n*sizeof(complex)); - } - } - } - } - else if ( A->max_m < new_m ) - { - for ( i = A->max_m; i < new_m; i++ ) - if ( (A->me[i] = NEW_A(new_max_n,complex)) == NULL ) - error(E_MEM,"zm_resize"); - else if (mem_info_is_on()) { - mem_bytes(TYPE_ZMAT,0,new_max_n*sizeof(complex)); - } - - } - - if ( old_n < new_n ) - { - for ( i = 0; i < old_m; i++ ) - __zzero__(&(A->me[i][old_n]),new_n-old_n); - } - - /* zero out the new rows.. */ - for ( i = old_m; i < new_m; i++ ) - __zzero__(A->me[i],new_n); -#endif - - A->max_m = new_max_m; - A->max_n = new_max_n; - A->max_size = A->max_m*A->max_n; - A->m = new_m; A->n = new_n; - - return A; -} - - -/* zv_resize -- returns the (complex) vector x with dim new_dim - -- x is set to the zero vector */ -ZVEC *zv_resize(x,new_dim) -ZVEC *x; -int new_dim; -{ - if (new_dim < 0) - error(E_NEG,"zv_resize"); - - if ( ! x ) - return zv_get(new_dim); - - if (new_dim == x->dim) - return x; - - if ( x->max_dim == 0 ) /* assume that it's from sub_zvec */ - return zv_get(new_dim); - - if ( new_dim > x->max_dim ) - { - if (mem_info_is_on()) { - mem_bytes(TYPE_ZVEC,x->max_dim*sizeof(complex), - new_dim*sizeof(complex)); - } - - x->ve = RENEW(x->ve,new_dim,complex); - if ( ! x->ve ) - error(E_MEM,"zv_resize"); - x->max_dim = new_dim; - } - - if ( new_dim > x->dim ) - __zzero__(&(x->ve[x->dim]),new_dim - x->dim); - x->dim = new_dim; - - return x; -} - - -/* varying arguments */ - -#ifdef ANSI_C - -#include - - -/* To allocate memory to many arguments. - The function should be called: - zv_get_vars(dim,&x,&y,&z,...,NULL); - where - int dim; - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - dim is the length of vectors x,y,z,... - returned value is equal to the number of allocated variables - Other gec_... functions are similar. -*/ - -int zv_get_vars(int dim,...) -{ - va_list ap; - int i=0; - ZVEC **par; - - va_start(ap, dim); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - *par = zv_get(dim); - i++; - } - - va_end(ap); - return i; -} - - - -int zm_get_vars(int m,int n,...) -{ - va_list ap; - int i=0; - ZMAT **par; - - va_start(ap, n); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - *par = zm_get(m,n); - i++; - } - - va_end(ap); - return i; -} - - - -/* To resize memory for many arguments. - The function should be called: - v_resize_vars(new_dim,&x,&y,&z,...,NULL); - where - int new_dim; - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - rdim is the resized length of vectors x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. - Other *_resize_list() functions are similar. -*/ - -int zv_resize_vars(int new_dim,...) -{ - va_list ap; - int i=0; - ZVEC **par; - - va_start(ap, new_dim); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - *par = zv_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - - - -int zm_resize_vars(int m,int n,...) -{ - va_list ap; - int i=0; - ZMAT **par; - - va_start(ap, n); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - *par = zm_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - - -/* To deallocate memory for many arguments. - The function should be called: - v_free_vars(&x,&y,&z,...,NULL); - where - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - There must be at least one not NULL argument. - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. - Other *_free_list() functions are similar. -*/ - -int zv_free_vars(ZVEC **pv,...) -{ - va_list ap; - int i=1; - ZVEC **par; - - zv_free(*pv); - *pv = ZVNULL; - va_start(ap, pv); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - zv_free(*par); - *par = ZVNULL; - i++; - } - - va_end(ap); - return i; -} - - - -int zm_free_vars(ZMAT **va,...) -{ - va_list ap; - int i=1; - ZMAT **par; - - zm_free(*va); - *va = ZMNULL; - va_start(ap, va); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - zm_free(*par); - *par = ZMNULL; - i++; - } - - va_end(ap); - return i; -} - - - -#elif VARARGS - -#include - -/* To allocate memory to many arguments. - The function should be called: - v_get_vars(dim,&x,&y,&z,...,NULL); - where - int dim; - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - dim is the length of vectors x,y,z,... - returned value is equal to the number of allocated variables - Other gec_... functions are similar. -*/ - -int zv_get_vars(va_alist) va_dcl -{ - va_list ap; - int dim,i=0; - ZVEC **par; - - va_start(ap); - dim = va_arg(ap,int); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - *par = zv_get(dim); - i++; - } - - va_end(ap); - return i; -} - - - -int zm_get_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, n, m; - ZMAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - *par = zm_get(m,n); - i++; - } - - va_end(ap); - return i; -} - - - -/* To resize memory for many arguments. - The function should be called: - v_resize_vars(new_dim,&x,&y,&z,...,NULL); - where - int new_dim; - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - rdim is the resized length of vectors x,y,z,... - returned value is equal to the number of allocated variables. - If one of x,y,z,.. arguments is NULL then memory is allocated to this - argument. - Other *_resize_list() functions are similar. -*/ - -int zv_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, new_dim; - ZVEC **par; - - va_start(ap); - new_dim = va_arg(ap,int); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - *par = zv_resize(*par,new_dim); - i++; - } - - va_end(ap); - return i; -} - - -int zm_resize_vars(va_alist) va_dcl -{ - va_list ap; - int i=0, m, n; - ZMAT **par; - - va_start(ap); - m = va_arg(ap,int); - n = va_arg(ap,int); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - *par = zm_resize(*par,m,n); - i++; - } - - va_end(ap); - return i; -} - - - -/* To deallocate memory for many arguments. - The function should be called: - v_free_vars(&x,&y,&z,...,NULL); - where - ZVEC *x, *y, *z,...; - The last argument should be NULL ! - There must be at least one not NULL argument. - returned value is equal to the number of allocated variables. - Returned value of x,y,z,.. is VNULL. - Other *_free_list() functions are similar. -*/ - -int zv_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - ZVEC **par; - - va_start(ap); - while ((par = va_arg(ap,ZVEC **))) { /* NULL ends the list*/ - zv_free(*par); - *par = ZVNULL; - i++; - } - - va_end(ap); - return i; -} - - - -int zm_free_vars(va_alist) va_dcl -{ - va_list ap; - int i=0; - ZMAT **par; - - va_start(ap); - while ((par = va_arg(ap,ZMAT **))) { /* NULL ends the list*/ - zm_free(*par); - *par = ZMNULL; - i++; - } - - va_end(ap); - return i; -} - - -#endif - diff --git a/src/mesch/znorm.c b/src/mesch/znorm.c deleted file mode 100755 index 6f0fdaf078..0000000000 --- a/src/mesch/znorm.c +++ /dev/null @@ -1,209 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - A collection of functions for computing norms: scaled and unscaled - Complex version -*/ -static char rcsid[] = "znorm.c,v 1.1 1997/12/04 17:56:14 hines Exp"; - -#include -#include "zmatrix.h" -#include - - - -/* _zv_norm1 -- computes (scaled) 1-norms of vectors */ -double _zv_norm1(x,scale) -ZVEC *x; -VEC *scale; -{ - int i, dim; - Real s, sum; - - if ( x == ZVNULL ) - error(E_NULL,"_zv_norm1"); - dim = x->dim; - - sum = 0.0; - if ( scale == VNULL ) - for ( i = 0; i < dim; i++ ) - sum += zabs(x->ve[i]); - else if ( scale->dim < dim ) - error(E_SIZES,"_zv_norm1"); - else - for ( i = 0; i < dim; i++ ) - { - s = scale->ve[i]; - sum += ( s== 0.0 ) ? zabs(x->ve[i]) : zabs(x->ve[i])/fabs(s); - } - - return sum; -} - -/* square -- returns x^2 */ -/****************************** -double square(x) -double x; -{ return x*x; } -******************************/ - -#define square(x) ((x)*(x)) - -/* _zv_norm2 -- computes (scaled) 2-norm (Euclidean norm) of vectors */ -double _zv_norm2(x,scale) -ZVEC *x; -VEC *scale; -{ - int i, dim; - Real s, sum; - - if ( x == ZVNULL ) - error(E_NULL,"_zv_norm2"); - dim = x->dim; - - sum = 0.0; - if ( scale == VNULL ) - for ( i = 0; i < dim; i++ ) - sum += square(x->ve[i].re) + square(x->ve[i].im); - else if ( scale->dim < dim ) - error(E_SIZES,"_v_norm2"); - else - for ( i = 0; i < dim; i++ ) - { - s = scale->ve[i]; - sum += ( s== 0.0 ) ? square(x->ve[i].re) + square(x->ve[i].im) : - (square(x->ve[i].re) + square(x->ve[i].im))/square(s); - } - - return sqrt(sum); -} - -#define max(a,b) ((a) > (b) ? (a) : (b)) - -/* _zv_norm_inf -- computes (scaled) infinity-norm (supremum norm) of vectors */ -double _zv_norm_inf(x,scale) -ZVEC *x; -VEC *scale; -{ - int i, dim; - Real s, maxval, tmp; - - if ( x == ZVNULL ) - error(E_NULL,"_zv_norm_inf"); - dim = x->dim; - - maxval = 0.0; - if ( scale == VNULL ) - for ( i = 0; i < dim; i++ ) - { - tmp = zabs(x->ve[i]); - maxval = max(maxval,tmp); - } - else if ( scale->dim < dim ) - error(E_SIZES,"_zv_norm_inf"); - else - for ( i = 0; i < dim; i++ ) - { - s = scale->ve[i]; - tmp = ( s == 0.0 ) ? zabs(x->ve[i]) : zabs(x->ve[i])/fabs(s); - maxval = max(maxval,tmp); - } - - return maxval; -} - -/* zm_norm1 -- compute matrix 1-norm -- unscaled - -- complex version */ -double zm_norm1(A) -ZMAT *A; -{ - int i, j, m, n; - Real maxval, sum; - - if ( A == ZMNULL ) - error(E_NULL,"zm_norm1"); - - m = A->m; n = A->n; - maxval = 0.0; - - for ( j = 0; j < n; j++ ) - { - sum = 0.0; - for ( i = 0; i < m; i ++ ) - sum += zabs(A->me[i][j]); - maxval = max(maxval,sum); - } - - return maxval; -} - -/* zm_norm_inf -- compute matrix infinity-norm -- unscaled - -- complex version */ -double zm_norm_inf(A) -ZMAT *A; -{ - int i, j, m, n; - Real maxval, sum; - - if ( A == ZMNULL ) - error(E_NULL,"zm_norm_inf"); - - m = A->m; n = A->n; - maxval = 0.0; - - for ( i = 0; i < m; i++ ) - { - sum = 0.0; - for ( j = 0; j < n; j ++ ) - sum += zabs(A->me[i][j]); - maxval = max(maxval,sum); - } - - return maxval; -} - -/* zm_norm_frob -- compute matrix frobenius-norm -- unscaled */ -double zm_norm_frob(A) -ZMAT *A; -{ - int i, j, m, n; - Real sum; - - if ( A == ZMNULL ) - error(E_NULL,"zm_norm_frob"); - - m = A->m; n = A->n; - sum = 0.0; - - for ( i = 0; i < m; i++ ) - for ( j = 0; j < n; j ++ ) - sum += square(A->me[i][j].re) + square(A->me[i][j].im); - - return sqrt(sum); -} - diff --git a/src/mesch/zqrfctr.c b/src/mesch/zqrfctr.c deleted file mode 100755 index ad453c4be0..0000000000 --- a/src/mesch/zqrfctr.c +++ /dev/null @@ -1,526 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - This file contains the routines needed to perform QR factorisation - of matrices, as well as Householder transformations. - The internal "factored form" of a matrix A is not quite standard. - The diagonal of A is replaced by the diagonal of R -- not by the 1st non-zero - entries of the Householder vectors. The 1st non-zero entries are held in - the diag parameter of QRfactor(). The reason for this non-standard - representation is that it enables direct use of the Usolve() function - rather than requiring that a seperate function be written just for this case. - See, e.g., QRsolve() below for more details. - - Complex version - -*/ - -static char rcsid[] = "zqrfctr.c,v 1.1 1997/12/04 17:56:15 hines Exp"; - -#include -#include "zmatrix.h" -#include "zmatrix2.h" -#include - - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0) - - -#define sign(x) ((x) > 0.0 ? 1 : ((x) < 0.0 ? -1 : 0 )) - -/* Note: The usual representation of a Householder transformation is taken - to be: - P = I - beta.u.u* - where beta = 2/(u*.u) and u is called the Householder vector - (u* is the conjugate transposed vector of u -*/ - -/* zQRfactor -- forms the QR factorisation of A - -- factorisation stored in compact form as described above - (not quite standard format) */ -ZMAT *zQRfactor(A,diag) -ZMAT *A; -ZVEC *diag; -{ - u_int k,limit; - Real beta; - static ZVEC *tmp1=ZVNULL; - - if ( ! A || ! diag ) - error(E_NULL,"zQRfactor"); - limit = min(A->m,A->n); - if ( diag->dim < limit ) - error(E_SIZES,"zQRfactor"); - - tmp1 = zv_resize(tmp1,A->m); - MEM_STAT_REG(tmp1,TYPE_ZVEC); - - for ( k=0; kve[k],tmp1,&A->me[k][k]); */ - zhhvec(tmp1,k,&beta,tmp1,&A->me[k][k]); - diag->ve[k] = tmp1->ve[k]; - - /* apply H/holder vector to remaining columns */ - /* hhtrcols(A,k,k+1,tmp1,beta->ve[k]); */ - tracecatch(zhhtrcols(A,k,k+1,tmp1,beta),"zQRfactor"); - } - - return (A); -} - -/* zQRCPfactor -- forms the QR factorisation of A with column pivoting - -- factorisation stored in compact form as described above - ( not quite standard format ) */ -ZMAT *zQRCPfactor(A,diag,px) -ZMAT *A; -ZVEC *diag; -PERM *px; -{ - u_int i, i_max, j, k, limit; - static ZVEC *tmp1=ZVNULL, *tmp2=ZVNULL; - static VEC *gamma=VNULL; - Real beta; - Real maxgamma, sum, tmp; - complex ztmp; - - if ( ! A || ! diag || ! px ) - error(E_NULL,"QRCPfactor"); - limit = min(A->m,A->n); - if ( diag->dim < limit || px->size != A->n ) - error(E_SIZES,"QRCPfactor"); - - tmp1 = zv_resize(tmp1,A->m); - tmp2 = zv_resize(tmp2,A->m); - gamma = v_resize(gamma,A->n); - MEM_STAT_REG(tmp1,TYPE_ZVEC); - MEM_STAT_REG(tmp2,TYPE_ZVEC); - MEM_STAT_REG(gamma,TYPE_VEC); - - /* initialise gamma and px */ - for ( j=0; jn; j++ ) - { - px->pe[j] = j; - sum = 0.0; - for ( i=0; im; i++ ) - sum += square(A->me[i][j].re) + square(A->me[i][j].im); - gamma->ve[j] = sum; - } - - for ( k=0; kve[k]; - for ( i=k+1; in; i++ ) - /* Loop invariant:maxgamma=gamma[i_max] - >=gamma[l];l=k,...,i-1 */ - if ( gamma->ve[i] > maxgamma ) - { maxgamma = gamma->ve[i]; i_max = i; } - - /* swap columns if necessary */ - if ( i_max != k ) - { - /* swap gamma values */ - tmp = gamma->ve[k]; - gamma->ve[k] = gamma->ve[i_max]; - gamma->ve[i_max] = tmp; - - /* update column permutation */ - px_transp(px,k,i_max); - - /* swap columns of A */ - for ( i=0; im; i++ ) - { - ztmp = A->me[i][k]; - A->me[i][k] = A->me[i][i_max]; - A->me[i][i_max] = ztmp; - } - } - - /* get H/holder vector for the k-th column */ - zget_col(A,k,tmp1); - /* hhvec(tmp1,k,&beta->ve[k],tmp1,&A->me[k][k]); */ - zhhvec(tmp1,k,&beta,tmp1,&A->me[k][k]); - diag->ve[k] = tmp1->ve[k]; - - /* apply H/holder vector to remaining columns */ - /* hhtrcols(A,k,k+1,tmp1,beta->ve[k]); */ - zhhtrcols(A,k,k+1,tmp1,beta); - - /* update gamma values */ - for ( j=k+1; jn; j++ ) - gamma->ve[j] -= square(A->me[k][j].re)+square(A->me[k][j].im); - } - - return (A); -} - -/* zQsolve -- solves Qx = b, Q is an orthogonal matrix stored in compact - form a la QRfactor() - -- may be in-situ */ -ZVEC *_zQsolve(QR,diag,b,x,tmp) -ZMAT *QR; -ZVEC *diag, *b, *x, *tmp; -{ - u_int dynamic; - int k, limit; - Real beta, r_ii, tmp_val; - - limit = min(QR->m,QR->n); - dynamic = FALSE; - if ( ! QR || ! diag || ! b ) - error(E_NULL,"_zQsolve"); - if ( diag->dim < limit || b->dim != QR->m ) - error(E_SIZES,"_zQsolve"); - x = zv_resize(x,QR->m); - if ( tmp == ZVNULL ) - dynamic = TRUE; - tmp = zv_resize(tmp,QR->m); - - /* apply H/holder transforms in normal order */ - x = zv_copy(b,x); - for ( k = 0 ; k < limit ; k++ ) - { - zget_col(QR,k,tmp); - r_ii = zabs(tmp->ve[k]); - tmp->ve[k] = diag->ve[k]; - tmp_val = (r_ii*zabs(diag->ve[k])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - /* hhtrvec(tmp,beta->ve[k],k,x,x); */ - zhhtrvec(tmp,beta,k,x,x); - } - - if ( dynamic ) - ZV_FREE(tmp); - - return (x); -} - -/* zmakeQ -- constructs orthogonal matrix from Householder vectors stored in - compact QR form */ -ZMAT *zmakeQ(QR,diag,Qout) -ZMAT *QR,*Qout; -ZVEC *diag; -{ - static ZVEC *tmp1=ZVNULL,*tmp2=ZVNULL; - u_int i, limit; - Real beta, r_ii, tmp_val; - int j; - - limit = min(QR->m,QR->n); - if ( ! QR || ! diag ) - error(E_NULL,"zmakeQ"); - if ( diag->dim < limit ) - error(E_SIZES,"zmakeQ"); - Qout = zm_resize(Qout,QR->m,QR->m); - - tmp1 = zv_resize(tmp1,QR->m); /* contains basis vec & columns of Q */ - tmp2 = zv_resize(tmp2,QR->m); /* contains H/holder vectors */ - MEM_STAT_REG(tmp1,TYPE_ZVEC); - MEM_STAT_REG(tmp2,TYPE_ZVEC); - - for ( i=0; im ; i++ ) - { /* get i-th column of Q */ - /* set up tmp1 as i-th basis vector */ - for ( j=0; jm ; j++ ) - tmp1->ve[j].re = tmp1->ve[j].im = 0.0; - tmp1->ve[i].re = 1.0; - - /* apply H/h transforms in reverse order */ - for ( j=limit-1; j>=0; j-- ) - { - zget_col(QR,j,tmp2); - r_ii = zabs(tmp2->ve[j]); - tmp2->ve[j] = diag->ve[j]; - tmp_val = (r_ii*zabs(diag->ve[j])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - /* hhtrvec(tmp2,beta->ve[j],j,tmp1,tmp1); */ - zhhtrvec(tmp2,beta,j,tmp1,tmp1); - } - - /* insert into Q */ - zset_col(Qout,i,tmp1); - } - - return (Qout); -} - -/* zmakeR -- constructs upper triangular matrix from QR (compact form) - -- may be in-situ (all it does is zero the lower 1/2) */ -ZMAT *zmakeR(QR,Rout) -ZMAT *QR,*Rout; -{ - u_int i,j; - - if ( QR==ZMNULL ) - error(E_NULL,"zmakeR"); - Rout = zm_copy(QR,Rout); - - for ( i=1; im; i++ ) - for ( j=0; jn && jme[i][j].re = Rout->me[i][j].im = 0.0; - - return (Rout); -} - -/* zQRsolve -- solves the system Q.R.x=b where Q & R are stored in compact form - -- returns x, which is created if necessary */ -ZVEC *zQRsolve(QR,diag,b,x) -ZMAT *QR; -ZVEC *diag, *b, *x; -{ - int limit; - static ZVEC *tmp = ZVNULL; - - if ( ! QR || ! diag || ! b ) - error(E_NULL,"zQRsolve"); - limit = min(QR->m,QR->n); - if ( diag->dim < limit || b->dim != QR->m ) - error(E_SIZES,"zQRsolve"); - tmp = zv_resize(tmp,limit); - MEM_STAT_REG(tmp,TYPE_ZVEC); - - x = zv_resize(x,QR->n); - _zQsolve(QR,diag,b,x,tmp); - x = zUsolve(QR,x,x,0.0); - x = zv_resize(x,QR->n); - - return x; -} - -/* zQRAsolve -- solves the system (Q.R)*.x = b - -- Q & R are stored in compact form - -- returns x, which is created if necessary */ -ZVEC *zQRAsolve(QR,diag,b,x) -ZMAT *QR; -ZVEC *diag, *b, *x; -{ - int j, limit; - Real beta, r_ii, tmp_val; - static ZVEC *tmp = ZVNULL; - - if ( ! QR || ! diag || ! b ) - error(E_NULL,"zQRAsolve"); - limit = min(QR->m,QR->n); - if ( diag->dim < limit || b->dim != QR->n ) - error(E_SIZES,"zQRAsolve"); - - x = zv_resize(x,QR->m); - x = zUAsolve(QR,b,x,0.0); - x = zv_resize(x,QR->m); - - tmp = zv_resize(tmp,x->dim); - MEM_STAT_REG(tmp,TYPE_ZVEC); - printf("zQRAsolve: tmp->dim = %d, x->dim = %d\n", tmp->dim, x->dim); - - /* apply H/h transforms in reverse order */ - for ( j=limit-1; j>=0; j-- ) - { - zget_col(QR,j,tmp); - tmp = zv_resize(tmp,QR->m); - r_ii = zabs(tmp->ve[j]); - tmp->ve[j] = diag->ve[j]; - tmp_val = (r_ii*zabs(diag->ve[j])); - beta = ( tmp_val == 0.0 ) ? 0.0 : 1.0/tmp_val; - zhhtrvec(tmp,beta,j,x,x); - } - - - return x; -} - -/* zQRCPsolve -- solves A.x = b where A is factored by QRCPfactor() - -- assumes that A is in the compact factored form */ -ZVEC *zQRCPsolve(QR,diag,pivot,b,x) -ZMAT *QR; -ZVEC *diag; -PERM *pivot; -ZVEC *b, *x; -{ - if ( ! QR || ! diag || ! pivot || ! b ) - error(E_NULL,"zQRCPsolve"); - if ( (QR->m > diag->dim && QR->n > diag->dim) || QR->n != pivot->size ) - error(E_SIZES,"zQRCPsolve"); - - x = zQRsolve(QR,diag,b,x); - x = pxinv_zvec(pivot,x,x); - - return x; -} - -/* zUmlt -- compute out = upper_triang(U).x - -- may be in situ */ -ZVEC *zUmlt(U,x,out) -ZMAT *U; -ZVEC *x, *out; -{ - int i, limit; - - if ( U == ZMNULL || x == ZVNULL ) - error(E_NULL,"zUmlt"); - limit = min(U->m,U->n); - if ( limit != x->dim ) - error(E_SIZES,"zUmlt"); - if ( out == ZVNULL || out->dim < limit ) - out = zv_resize(out,limit); - - for ( i = 0; i < limit; i++ ) - out->ve[i] = __zip__(&(x->ve[i]),&(U->me[i][i]),limit - i,Z_NOCONJ); - return out; -} - -/* zUAmlt -- returns out = upper_triang(U)^T.x */ -ZVEC *zUAmlt(U,x,out) -ZMAT *U; -ZVEC *x, *out; -{ - /* complex sum; */ - complex tmp; - int i, limit; - - if ( U == ZMNULL || x == ZVNULL ) - error(E_NULL,"zUAmlt"); - limit = min(U->m,U->n); - if ( out == ZVNULL || out->dim < limit ) - out = zv_resize(out,limit); - - for ( i = limit-1; i >= 0; i-- ) - { - tmp = x->ve[i]; - out->ve[i].re = out->ve[i].im = 0.0; - __zmltadd__(&(out->ve[i]),&(U->me[i][i]),tmp,limit-i-1,Z_CONJ); - } - - return out; -} - - -/* zQRcondest -- returns an estimate of the 2-norm condition number of the - matrix factorised by QRfactor() or QRCPfactor() - -- note that as Q does not affect the 2-norm condition number, - it is not necessary to pass the diag, beta (or pivot) vectors - -- generates a lower bound on the true condition number - -- if the matrix is exactly singular, HUGE is returned - -- note that QRcondest() is likely to be more reliable for - matrices factored using QRCPfactor() */ -double zQRcondest(QR) -ZMAT *QR; -{ - static ZVEC *y=ZVNULL; - Real norm, norm1, norm2, tmp1, tmp2; - complex sum, tmp; - int i, j, limit; - - if ( QR == ZMNULL ) - error(E_NULL,"zQRcondest"); - - limit = min(QR->m,QR->n); - for ( i = 0; i < limit; i++ ) - /* if ( QR->me[i][i] == 0.0 ) */ - if ( is_zero(QR->me[i][i]) ) - return HUGE; - - y = zv_resize(y,limit); - MEM_STAT_REG(y,TYPE_ZVEC); - /* use the trick for getting a unit vector y with ||R.y||_inf small - from the LU condition estimator */ - for ( i = 0; i < limit; i++ ) - { - sum.re = sum.im = 0.0; - for ( j = 0; j < i; j++ ) - /* sum -= QR->me[j][i]*y->ve[j]; */ - sum = zsub(sum,zmlt(QR->me[j][i],y->ve[j])); - /* sum -= (sum < 0.0) ? 1.0 : -1.0; */ - norm1 = zabs(sum); - if ( norm1 == 0.0 ) - sum.re = 1.0; - else - { - sum.re += sum.re / norm1; - sum.im += sum.im / norm1; - } - /* y->ve[i] = sum / QR->me[i][i]; */ - y->ve[i] = zdiv(sum,QR->me[i][i]); - } - zUAmlt(QR,y,y); - - /* now apply inverse power method to R*.R */ - for ( i = 0; i < 3; i++ ) - { - tmp1 = zv_norm2(y); - zv_mlt(zmake(1.0/tmp1,0.0),y,y); - zUAsolve(QR,y,y,0.0); - tmp2 = zv_norm2(y); - zv_mlt(zmake(1.0/tmp2,0.0),y,y); - zUsolve(QR,y,y,0.0); - } - /* now compute approximation for ||R^{-1}||_2 */ - norm1 = sqrt(tmp1)*sqrt(tmp2); - - /* now use complementary approach to compute approximation to ||R||_2 */ - for ( i = limit-1; i >= 0; i-- ) - { - sum.re = sum.im = 0.0; - for ( j = i+1; j < limit; j++ ) - sum = zadd(sum,zmlt(QR->me[i][j],y->ve[j])); - if ( is_zero(QR->me[i][i]) ) - return HUGE; - tmp = zdiv(sum,QR->me[i][i]); - if ( is_zero(tmp) ) - { - y->ve[i].re = 1.0; - y->ve[i].im = 0.0; - } - else - { - norm = zabs(tmp); - y->ve[i].re = sum.re / norm; - y->ve[i].im = sum.im / norm; - } - /* y->ve[i] = (sum >= 0.0) ? 1.0 : -1.0; */ - /* y->ve[i] = (QR->me[i][i] >= 0.0) ? y->ve[i] : - y->ve[i]; */ - } - - /* now apply power method to R*.R */ - for ( i = 0; i < 3; i++ ) - { - tmp1 = zv_norm2(y); - zv_mlt(zmake(1.0/tmp1,0.0),y,y); - zUmlt(QR,y,y); - tmp2 = zv_norm2(y); - zv_mlt(zmake(1.0/tmp2,0.0),y,y); - zUAmlt(QR,y,y); - } - norm2 = sqrt(tmp1)*sqrt(tmp2); - - /* printf("QRcondest: norm1 = %g, norm2 = %g\n",norm1,norm2); */ - - return norm1*norm2; -} - diff --git a/src/mesch/zschur.c b/src/mesch/zschur.c deleted file mode 100755 index 8e20c0d63d..0000000000 --- a/src/mesch/zschur.c +++ /dev/null @@ -1,383 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - -/* - File containing routines for computing the Schur decomposition - of a complex non-symmetric matrix - See also: hessen.c - Complex version -*/ - - -#include -#include "zmatrix.h" -#include "zmatrix2.h" -#include - -static char rcsid[] = "zschur.c,v 1.1 1997/12/04 17:56:16 hines Exp"; - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0) -#define b2s(t_or_f) ((t_or_f) ? "TRUE" : "FALSE") - - -/* zschur -- computes the Schur decomposition of the matrix A in situ - -- optionally, gives Q matrix such that Q^*.A.Q is upper triangular - -- returns upper triangular Schur matrix */ -ZMAT *zschur(A,Q) -ZMAT *A, *Q; -{ - int i, j, iter, k, k_min, k_max, k_tmp, n, split; - Real c; - complex det, discrim, lambda, lambda0, lambda1, s, sum, ztmp; - complex x, y; /* for chasing algorithm */ - complex **A_me; - static ZVEC *diag=ZVNULL; - - if ( ! A ) - error(E_NULL,"zschur"); - if ( A->m != A->n || ( Q && Q->m != Q->n ) ) - error(E_SQUARE,"zschur"); - if ( Q != ZMNULL && Q->m != A->m ) - error(E_SIZES,"zschur"); - n = A->n; - diag = zv_resize(diag,A->n); - MEM_STAT_REG(diag,TYPE_ZVEC); - /* compute Hessenberg form */ - zHfactor(A,diag); - - /* save Q if necessary, and make A explicitly Hessenberg */ - zHQunpack(A,diag,Q,A); - - k_min = 0; A_me = A->me; - - while ( k_min < n ) - { - /* find k_max to suit: - submatrix k_min..k_max should be irreducible */ - k_max = n-1; - for ( k = k_min; k < k_max; k++ ) - if ( is_zero(A_me[k+1][k]) ) - { k_max = k; break; } - - if ( k_max <= k_min ) - { - k_min = k_max + 1; - continue; /* outer loop */ - } - - /* now have r x r block with r >= 2: - apply Francis QR step until block splits */ - split = FALSE; iter = 0; - while ( ! split ) - { - complex a00, a01, a10, a11; - iter++; - - /* set up Wilkinson/Francis complex shift */ - /* use the smallest eigenvalue of the bottom 2 x 2 submatrix */ - k_tmp = k_max - 1; - - a00 = A_me[k_tmp][k_tmp]; - a01 = A_me[k_tmp][k_max]; - a10 = A_me[k_max][k_tmp]; - a11 = A_me[k_max][k_max]; - ztmp.re = 0.5*(a00.re - a11.re); - ztmp.im = 0.5*(a00.im - a11.im); - discrim = zsqrt(zadd(zmlt(ztmp,ztmp),zmlt(a01,a10))); - sum.re = 0.5*(a00.re + a11.re); - sum.im = 0.5*(a00.im + a11.im); - lambda0 = zadd(sum,discrim); - lambda1 = zsub(sum,discrim); - det = zsub(zmlt(a00,a11),zmlt(a01,a10)); - - if ( is_zero(lambda0) && is_zero(lambda1) ) - { - lambda.re = lambda.im = 0.0; - } - else if ( zabs(lambda0) > zabs(lambda1) ) - lambda = zdiv(det,lambda0); - else - lambda = zdiv(det,lambda1); - - /* perturb shift if convergence is slow */ - if ( (iter % 10) == 0 ) - { - lambda.re += iter*0.02; - lambda.im += iter*0.02; - } - - /* set up Householder transformations */ - k_tmp = k_min + 1; - - x = zsub(A->me[k_min][k_min],lambda); - y = A->me[k_min+1][k_min]; - - /* use Givens' rotations to "chase" off-Hessenberg entry */ - for ( k = k_min; k <= k_max-1; k++ ) - { - zgivens(x,y,&c,&s); - zrot_cols(A,k,k+1,c,s,A); - zrot_rows(A,k,k+1,c,s,A); - if ( Q != ZMNULL ) - zrot_cols(Q,k,k+1,c,s,Q); - - /* zero things that should be zero */ - if ( k > k_min ) - A->me[k+1][k-1].re = A->me[k+1][k-1].im = 0.0; - - /* get next entry to chase along sub-diagonal */ - x = A->me[k+1][k]; - if ( k <= k_max - 2 ) - y = A->me[k+2][k]; - else - y.re = y.im = 0.0; - } - - for ( k = k_min; k <= k_max-2; k++ ) - { - /* zero appropriate sub-diagonals */ - A->me[k+2][k].re = A->me[k+2][k].im = 0.0; - } - - /* test to see if matrix should split */ - for ( k = k_min; k < k_max; k++ ) - if ( zabs(A_me[k+1][k]) < MACHEPS* - (zabs(A_me[k][k])+zabs(A_me[k+1][k+1])) ) - { - A_me[k+1][k].re = A_me[k+1][k].im = 0.0; - split = TRUE; - } - - } - } - - /* polish up A by zeroing strictly lower triangular elements - and small sub-diagonal elements */ - for ( i = 0; i < A->m; i++ ) - for ( j = 0; j < i-1; j++ ) - A_me[i][j].re = A_me[i][j].im = 0.0; - for ( i = 0; i < A->m - 1; i++ ) - if ( zabs(A_me[i+1][i]) < MACHEPS* - (zabs(A_me[i][i])+zabs(A_me[i+1][i+1])) ) - A_me[i+1][i].re = A_me[i+1][i].im = 0.0; - - return A; -} - - -#if 0 -/* schur_vecs -- returns eigenvectors computed from the real Schur - decomposition of a matrix - -- T is the block upper triangular Schur matrix - -- Q is the orthognal matrix where A = Q.T.Q^T - -- if Q is null, the eigenvectors of T are returned - -- X_re is the real part of the matrix of eigenvectors, - and X_im is the imaginary part of the matrix. - -- X_re is returned */ -MAT *schur_vecs(T,Q,X_re,X_im) -MAT *T, *Q, *X_re, *X_im; -{ - int i, j, limit; - Real t11_re, t11_im, t12, t21, t22_re, t22_im; - Real l_re, l_im, det_re, det_im, invdet_re, invdet_im, - val1_re, val1_im, val2_re, val2_im, - tmp_val1_re, tmp_val1_im, tmp_val2_re, tmp_val2_im, **T_me; - Real sum, diff, discrim, magdet, norm, scale; - static VEC *tmp1_re=VNULL, *tmp1_im=VNULL, - *tmp2_re=VNULL, *tmp2_im=VNULL; - - if ( ! T || ! X_re ) - error(E_NULL,"schur_vecs"); - if ( T->m != T->n || X_re->m != X_re->n || - ( Q != MNULL && Q->m != Q->n ) || - ( X_im != MNULL && X_im->m != X_im->n ) ) - error(E_SQUARE,"schur_vecs"); - if ( T->m != X_re->m || - ( Q != MNULL && T->m != Q->m ) || - ( X_im != MNULL && T->m != X_im->m ) ) - error(E_SIZES,"schur_vecs"); - - tmp1_re = v_resize(tmp1_re,T->m); - tmp1_im = v_resize(tmp1_im,T->m); - tmp2_re = v_resize(tmp2_re,T->m); - tmp2_im = v_resize(tmp2_im,T->m); - MEM_STAT_REG(tmp1_re,TYPE_VEC); - MEM_STAT_REG(tmp1_im,TYPE_VEC); - MEM_STAT_REG(tmp2_re,TYPE_VEC); - MEM_STAT_REG(tmp2_im,TYPE_VEC); - - T_me = T->me; - i = 0; - while ( i < T->m ) - { - if ( i+1 < T->m && T->me[i+1][i] != 0.0 ) - { /* complex eigenvalue */ - sum = 0.5*(T_me[i][i]+T_me[i+1][i+1]); - diff = 0.5*(T_me[i][i]-T_me[i+1][i+1]); - discrim = diff*diff + T_me[i][i+1]*T_me[i+1][i]; - l_re = l_im = 0.0; - if ( discrim < 0.0 ) - { /* yes -- complex e-vals */ - l_re = sum; - l_im = sqrt(-discrim); - } - else /* not correct Real Schur form */ - error(E_RANGE,"schur_vecs"); - } - else - { - l_re = T_me[i][i]; - l_im = 0.0; - } - - v_zero(tmp1_im); - v_rand(tmp1_re); - sv_mlt(MACHEPS,tmp1_re,tmp1_re); - - /* solve (T-l.I)x = tmp1 */ - limit = ( l_im != 0.0 ) ? i+1 : i; - /* printf("limit = %d\n",limit); */ - for ( j = limit+1; j < T->m; j++ ) - tmp1_re->ve[j] = 0.0; - j = limit; - while ( j >= 0 ) - { - if ( j > 0 && T->me[j][j-1] != 0.0 ) - { /* 2 x 2 diagonal block */ - /* printf("checkpoint A\n"); */ - val1_re = tmp1_re->ve[j-1] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j-1][j+1]),limit-j); - /* printf("checkpoint B\n"); */ - val1_im = tmp1_im->ve[j-1] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j-1][j+1]),limit-j); - /* printf("checkpoint C\n"); */ - val2_re = tmp1_re->ve[j] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint D\n"); */ - val2_im = tmp1_im->ve[j] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint E\n"); */ - - t11_re = T_me[j-1][j-1] - l_re; - t11_im = - l_im; - t22_re = T_me[j][j] - l_re; - t22_im = - l_im; - t12 = T_me[j-1][j]; - t21 = T_me[j][j-1]; - - scale = fabs(T_me[j-1][j-1]) + fabs(T_me[j][j]) + - fabs(t12) + fabs(t21) + fabs(l_re) + fabs(l_im); - - det_re = t11_re*t22_re - t11_im*t22_im - t12*t21; - det_im = t11_re*t22_im + t11_im*t22_re; - magdet = det_re*det_re+det_im*det_im; - if ( sqrt(magdet) < MACHEPS*scale ) - { - det_re = MACHEPS*scale; - magdet = det_re*det_re+det_im*det_im; - } - invdet_re = det_re/magdet; - invdet_im = - det_im/magdet; - tmp_val1_re = t22_re*val1_re-t22_im*val1_im-t12*val2_re; - tmp_val1_im = t22_im*val1_re+t22_re*val1_im-t12*val2_im; - tmp_val2_re = t11_re*val2_re-t11_im*val2_im-t21*val1_re; - tmp_val2_im = t11_im*val2_re+t11_re*val2_im-t21*val1_im; - tmp1_re->ve[j-1] = invdet_re*tmp_val1_re - - invdet_im*tmp_val1_im; - tmp1_im->ve[j-1] = invdet_im*tmp_val1_re + - invdet_re*tmp_val1_im; - tmp1_re->ve[j] = invdet_re*tmp_val2_re - - invdet_im*tmp_val2_im; - tmp1_im->ve[j] = invdet_im*tmp_val2_re + - invdet_re*tmp_val2_im; - j -= 2; - } - else - { - t11_re = T_me[j][j] - l_re; - t11_im = - l_im; - magdet = t11_re*t11_re + t11_im*t11_im; - scale = fabs(T_me[j][j]) + fabs(l_re); - if ( sqrt(magdet) < MACHEPS*scale ) - { - t11_re = MACHEPS*scale; - magdet = t11_re*t11_re + t11_im*t11_im; - } - invdet_re = t11_re/magdet; - invdet_im = - t11_im/magdet; - /* printf("checkpoint F\n"); */ - val1_re = tmp1_re->ve[j] - - __ip__(&(tmp1_re->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint G\n"); */ - val1_im = tmp1_im->ve[j] - - __ip__(&(tmp1_im->ve[j+1]),&(T->me[j][j+1]),limit-j); - /* printf("checkpoint H\n"); */ - tmp1_re->ve[j] = invdet_re*val1_re - invdet_im*val1_im; - tmp1_im->ve[j] = invdet_im*val1_re + invdet_re*val1_im; - j -= 1; - } - } - - norm = v_norm_inf(tmp1_re) + v_norm_inf(tmp1_im); - sv_mlt(1/norm,tmp1_re,tmp1_re); - if ( l_im != 0.0 ) - sv_mlt(1/norm,tmp1_im,tmp1_im); - mv_mlt(Q,tmp1_re,tmp2_re); - if ( l_im != 0.0 ) - mv_mlt(Q,tmp1_im,tmp2_im); - if ( l_im != 0.0 ) - norm = sqrt(in_prod(tmp2_re,tmp2_re)+in_prod(tmp2_im,tmp2_im)); - else - norm = v_norm2(tmp2_re); - sv_mlt(1/norm,tmp2_re,tmp2_re); - if ( l_im != 0.0 ) - sv_mlt(1/norm,tmp2_im,tmp2_im); - - if ( l_im != 0.0 ) - { - if ( ! X_im ) - error(E_NULL,"schur_vecs"); - set_col(X_re,i,tmp2_re); - set_col(X_im,i,tmp2_im); - sv_mlt(-1.0,tmp2_im,tmp2_im); - set_col(X_re,i+1,tmp2_re); - set_col(X_im,i+1,tmp2_im); - i += 2; - } - else - { - set_col(X_re,i,tmp2_re); - if ( X_im != MNULL ) - set_col(X_im,i,tmp1_im); /* zero vector */ - i += 1; - } - } - - return X_re; -} - -#endif - diff --git a/src/mesch/zsolve.c b/src/mesch/zsolve.c deleted file mode 100755 index 58c1b51fd9..0000000000 --- a/src/mesch/zsolve.c +++ /dev/null @@ -1,301 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -/* - Matrix factorisation routines to work with the other matrix files. - Complex case -*/ - -static char rcsid[] = "zsolve.c,v 1.1 1997/12/04 17:56:17 hines Exp"; - -#include -#include "zmatrix2.h" -#include - - -#define is_zero(z) ((z).re == 0.0 && (z).im == 0.0 ) - -/* Most matrix factorisation routines are in-situ unless otherwise specified */ - -/* zUsolve -- back substitution with optional over-riding diagonal - -- can be in-situ but doesn't need to be */ -ZVEC *zUsolve(matrix,b,out,diag) -ZMAT *matrix; -ZVEC *b, *out; -double diag; -{ - u_int dim /* , j */; - int i, i_lim; - complex **mat_ent, *mat_row, *b_ent, *out_ent, *out_col, sum; - - if ( matrix==ZMNULL || b==ZVNULL ) - error(E_NULL,"zUsolve"); - dim = min(matrix->m,matrix->n); - if ( b->dim < dim ) - error(E_SIZES,"zUsolve"); - if ( out==ZVNULL || out->dim < dim ) - out = zv_resize(out,matrix->n); - mat_ent = matrix->me; b_ent = b->ve; out_ent = out->ve; - - for ( i=dim-1; i>=0; i-- ) - if ( ! is_zero(b_ent[i]) ) - break; - else - out_ent[i].re = out_ent[i].im = 0.0; - i_lim = i; - - for ( i = i_lim; i>=0; i-- ) - { - sum = b_ent[i]; - mat_row = &(mat_ent[i][i+1]); - out_col = &(out_ent[i+1]); - sum = zsub(sum,__zip__(mat_row,out_col,i_lim-i,Z_NOCONJ)); - /****************************************************** - for ( j=i+1; j<=i_lim; j++ ) - sum -= mat_ent[i][j]*out_ent[j]; - sum -= (*mat_row++)*(*out_col++); - ******************************************************/ - if ( diag == 0.0 ) - { - if ( is_zero(mat_ent[i][i]) ) - error(E_SING,"zUsolve"); - else - /* out_ent[i] = sum/mat_ent[i][i]; */ - out_ent[i] = zdiv(sum,mat_ent[i][i]); - } - else - { - /* out_ent[i] = sum/diag; */ - out_ent[i].re = sum.re / diag; - out_ent[i].im = sum.im / diag; - } - } - - return (out); -} - -/* zLsolve -- forward elimination with (optional) default diagonal value */ -ZVEC *zLsolve(matrix,b,out,diag) -ZMAT *matrix; -ZVEC *b,*out; -double diag; -{ - u_int dim, i, i_lim /* , j */; - complex **mat_ent, *mat_row, *b_ent, *out_ent, *out_col, sum; - - if ( matrix==ZMNULL || b==ZVNULL ) - error(E_NULL,"zLsolve"); - dim = min(matrix->m,matrix->n); - if ( b->dim < dim ) - error(E_SIZES,"zLsolve"); - if ( out==ZVNULL || out->dim < dim ) - out = zv_resize(out,matrix->n); - mat_ent = matrix->me; b_ent = b->ve; out_ent = out->ve; - - for ( i=0; im,U->n); - if ( b->dim < dim ) - error(E_SIZES,"zUAsolve"); - out = zv_resize(out,U->n); - U_me = U->me; b_ve = b->ve; out_ve = out->ve; - - for ( i=0; idim); - /* MEM_COPY(&(b_ve[i_lim]),&(out_ve[i_lim]), - (dim-i_lim)*sizeof(complex)); */ - MEMCOPY(&(b_ve[i_lim]),&(out_ve[i_lim]),dim-i_lim,complex); - } - - if ( diag == 0.0 ) - { - for ( ; im,A->n); - if ( b->dim < dim ) - error(E_SIZES,"zDsolve"); - x = zv_resize(x,A->n); - - dim = b->dim; - for ( i=0; ime[i][i]) ) - error(E_SING,"zDsolve"); - else - x->ve[i] = zdiv(b->ve[i],A->me[i][i]); - - return (x); -} - -/* zLAsolve -- back substitution with optional over-riding diagonal - using the LOWER triangular part of matrix - -- can be in-situ but doesn't need to be */ -ZVEC *zLAsolve(L,b,out,diag) -ZMAT *L; -ZVEC *b, *out; -double diag; -{ - u_int dim; - int i, i_lim; - complex **L_me, *b_ve, *out_ve, tmp; - Real invdiag; - - if ( ! L || ! b ) - error(E_NULL,"zLAsolve"); - dim = min(L->m,L->n); - if ( b->dim < dim ) - error(E_SIZES,"zLAsolve"); - out = zv_resize(out,L->n); - L_me = L->me; b_ve = b->ve; out_ve = out->ve; - - for ( i=dim-1; i>=0; i-- ) - if ( ! is_zero(b_ve[i]) ) - break; - i_lim = i; - - if ( b != out ) - { - __zzero__(out_ve,out->dim); - /* MEM_COPY(b_ve,out_ve,(i_lim+1)*sizeof(complex)); */ - MEMCOPY(b_ve,out_ve,i_lim+1,complex); - } - - if ( diag == 0.0 ) - { - for ( ; i>=0; i-- ) - { - tmp = zconj(L_me[i][i]); - if ( is_zero(tmp) ) - error(E_SING,"zLAsolve"); - out_ve[i] = zdiv(out_ve[i],tmp); - tmp.re = - out_ve[i].re; - tmp.im = - out_ve[i].im; - __zmltadd__(out_ve,L_me[i],tmp,i,Z_CONJ); - } - } - else - { - invdiag = 1.0/diag; - for ( ; i>=0; i-- ) - { - out_ve[i].re *= invdiag; - out_ve[i].im *= invdiag; - tmp.re = - out_ve[i].re; - tmp.im = - out_ve[i].im; - __zmltadd__(out_ve,L_me[i],tmp,i,Z_CONJ); - } - } - - return (out); -} diff --git a/src/mesch/zvecop.c b/src/mesch/zvecop.c deleted file mode 100755 index 8f2efd824b..0000000000 --- a/src/mesch/zvecop.c +++ /dev/null @@ -1,511 +0,0 @@ -#include <../../nrnconf.h> - -/************************************************************************** -** -** Copyright (C) 1993 David E. Steward & Zbigniew Leyk, all rights reserved. -** -** Meschach Library -** -** This Meschach Library is provided "as is" without any express -** or implied warranty of any kind with respect to this software. -** In particular the authors shall not be liable for any direct, -** indirect, special, incidental or consequential damages arising -** in any way from use of the software. -** -** Everyone is granted permission to copy, modify and redistribute this -** Meschach Library, provided: -** 1. All copies contain this copyright notice. -** 2. All modified copies shall carry a notice stating who -** made the last modification and the date of such modification. -** 3. No charge is made for this software or works derived from it. -** This clause shall not be construed as constraining other software -** distributed on the same medium as this software, nor is a -** distribution fee considered a charge. -** -***************************************************************************/ - - -#include -#include "matrix.h" -#include "zmatrix.h" -static char rcsid[] = "zvecop.c,v 1.1 1997/12/04 17:56:19 hines Exp"; - - - -/* _zin_prod -- inner product of two vectors from i0 downwards - -- flag != 0 means compute sum_i a[i]*.b[i]; - -- flag == 0 means compute sum_i a[i].b[i] */ -complex _zin_prod(a,b,i0,flag) -ZVEC *a,*b; -u_int i0, flag; -{ - u_int limit; - - if ( a==ZVNULL || b==ZVNULL ) - error(E_NULL,"_zin_prod"); - limit = min(a->dim,b->dim); - if ( i0 > limit ) - error(E_BOUNDS,"_zin_prod"); - - return __zip__(&(a->ve[i0]),&(b->ve[i0]),(int)(limit-i0),flag); -} - -/* zv_mlt -- scalar-vector multiply -- may be in-situ */ -ZVEC *zv_mlt(scalar,vector,out) -complex scalar; -ZVEC *vector,*out; -{ - /* u_int dim, i; */ - /* complex *out_ve, *vec_ve; */ - - if ( vector==ZVNULL ) - error(E_NULL,"zv_mlt"); - if ( out==ZVNULL || out->dim != vector->dim ) - out = zv_resize(out,vector->dim); - if ( scalar.re == 0.0 && scalar.im == 0.0 ) - return zv_zero(out); - if ( scalar.re == 1.0 && scalar.im == 0.0 ) - return zv_copy(vector,out); - - __zmlt__(vector->ve,scalar,out->ve,(int)(vector->dim)); - - return (out); -} - -/* zv_add -- vector addition -- may be in-situ */ -ZVEC *zv_add(vec1,vec2,out) -ZVEC *vec1,*vec2,*out; -{ - u_int dim; - - if ( vec1==ZVNULL || vec2==ZVNULL ) - error(E_NULL,"zv_add"); - if ( vec1->dim != vec2->dim ) - error(E_SIZES,"zv_add"); - if ( out==ZVNULL || out->dim != vec1->dim ) - out = zv_resize(out,vec1->dim); - dim = vec1->dim; - __zadd__(vec1->ve,vec2->ve,out->ve,(int)dim); - - return (out); -} - -/* zv_mltadd -- scalar/vector multiplication and addition - -- out = v1 + scale.v2 */ -ZVEC *zv_mltadd(v1,v2,scale,out) -ZVEC *v1,*v2,*out; -complex scale; -{ - /* register u_int dim, i; */ - /* complex *out_ve, *v1_ve, *v2_ve; */ - - if ( v1==ZVNULL || v2==ZVNULL ) - error(E_NULL,"zv_mltadd"); - if ( v1->dim != v2->dim ) - error(E_SIZES,"zv_mltadd"); - if ( scale.re == 0.0 && scale.im == 0.0 ) - return zv_copy(v1,out); - if ( scale.re == 1.0 && scale.im == 0.0 ) - return zv_add(v1,v2,out); - - if ( v2 != out ) - { - tracecatch(out = zv_copy(v1,out),"zv_mltadd"); - - /* dim = v1->dim; */ - __zmltadd__(out->ve,v2->ve,scale,(int)(v1->dim),0); - } - else - { - tracecatch(out = zv_mlt(scale,v2,out),"zv_mltadd"); - out = zv_add(v1,out,out); - } - - return (out); -} - -/* zv_sub -- vector subtraction -- may be in-situ */ -ZVEC *zv_sub(vec1,vec2,out) -ZVEC *vec1,*vec2,*out; -{ - /* u_int i, dim; */ - /* complex *out_ve, *vec1_ve, *vec2_ve; */ - - if ( vec1==ZVNULL || vec2==ZVNULL ) - error(E_NULL,"zv_sub"); - if ( vec1->dim != vec2->dim ) - error(E_SIZES,"zv_sub"); - if ( out==ZVNULL || out->dim != vec1->dim ) - out = zv_resize(out,vec1->dim); - - __zsub__(vec1->ve,vec2->ve,out->ve,(int)(vec1->dim)); - - return (out); -} - -/* zv_map -- maps function f over components of x: out[i] = f(x[i]) - -- _zv_map sets out[i] = f(x[i],params) */ -ZVEC *zv_map(f,x,out) -#ifdef PROTOYPES_IN_STRUCT -complex (*f)(complex); -#else -complex (*f)(); -#endif -ZVEC *x, *out; -{ - complex *x_ve, *out_ve; - int i, dim; - - if ( ! x || ! f ) - error(E_NULL,"zv_map"); - if ( ! out || out->dim != x->dim ) - out = zv_resize(out,x->dim); - - dim = x->dim; x_ve = x->ve; out_ve = out->ve; - for ( i = 0; i < dim; i++ ) - out_ve[i] = (*f)(x_ve[i]); - - return out; -} - -ZVEC *_zv_map(f,params,x,out) -#ifdef PROTOTYPES_IN_STRUCT -complex (*f)(void *,complex); -#else -complex (*f)(); -#endif -ZVEC *x, *out; -void *params; -{ - complex *x_ve, *out_ve; - int i, dim; - - if ( ! x || ! f ) - error(E_NULL,"_zv_map"); - if ( ! out || out->dim != x->dim ) - out = zv_resize(out,x->dim); - - dim = x->dim; x_ve = x->ve; out_ve = out->ve; - for ( i = 0; i < dim; i++ ) - out_ve[i] = (*f)(params,x_ve[i]); - - return out; -} - -/* zv_lincomb -- returns sum_i a[i].v[i], a[i] real, v[i] vectors */ -ZVEC *zv_lincomb(n,v,a,out) -int n; /* number of a's and v's */ -complex a[]; -ZVEC *v[], *out; -{ - int i; - - if ( ! a || ! v ) - error(E_NULL,"zv_lincomb"); - if ( n <= 0 ) - return ZVNULL; - - for ( i = 1; i < n; i++ ) - if ( out == v[i] ) - error(E_INSITU,"zv_lincomb"); - - out = zv_mlt(a[0],v[0],out); - for ( i = 1; i < n; i++ ) - { - if ( ! v[i] ) - error(E_NULL,"zv_lincomb"); - if ( v[i]->dim != out->dim ) - error(E_SIZES,"zv_lincomb"); - out = zv_mltadd(out,v[i],a[i],out); - } - - return out; -} - - -#ifdef ANSI_C - - -/* zv_linlist -- linear combinations taken from a list of arguments; - calling: - zv_linlist(out,v1,a1,v2,a2,...,vn,an,NULL); - where vi are vectors (ZVEC *) and ai are numbers (complex) -*/ - -ZVEC *zv_linlist(ZVEC *out,ZVEC *v1,complex a1,...) -{ - va_list ap; - ZVEC *par; - complex a_par; - - if ( ! v1 ) - return ZVNULL; - - va_start(ap, a1); - out = zv_mlt(a1,v1,out); - - while ((par = va_arg(ap,ZVEC *))) { /* NULL ends the list*/ - a_par = va_arg(ap,complex); - if (a_par.re == 0.0 && a_par.im == 0.0) continue; - if ( out == par ) - error(E_INSITU,"zv_linlist"); - if ( out->dim != par->dim ) - error(E_SIZES,"zv_linlist"); - - if (a_par.re == 1.0 && a_par.im == 0.0) - out = zv_add(out,par,out); - else if (a_par.re == -1.0 && a_par.im == 0.0) - out = zv_sub(out,par,out); - else - out = zv_mltadd(out,par,a_par,out); - } - - va_end(ap); - return out; -} - - -#elif VARARGS - -/* zv_linlist -- linear combinations taken from a list of arguments; - calling: - zv_linlist(out,v1,a1,v2,a2,...,vn,an,NULL); - where vi are vectors (ZVEC *) and ai are numbers (complex) -*/ -ZVEC *zv_linlist(va_alist) va_dcl -{ - va_list ap; - ZVEC *par, *out; - complex a_par; - - va_start(ap); - out = va_arg(ap,ZVEC *); - par = va_arg(ap,ZVEC *); - if ( ! par ) { - va_end(ap); - return ZVNULL; - } - - a_par = va_arg(ap,complex); - out = zv_mlt(a_par,par,out); - - while ((par = va_arg(ap,ZVEC *))) { /* NULL ends the list*/ - a_par = va_arg(ap,complex); - if (a_par.re == 0.0 && a_par.im == 0.0) continue; - if ( out == par ) - error(E_INSITU,"zv_linlist"); - if ( out->dim != par->dim ) - error(E_SIZES,"zv_linlist"); - - if (a_par.re == 1.0 && a_par.im == 0.0) - out = zv_add(out,par,out); - else if (a_par.re == -1.0 && a_par.im == 0.0) - out = zv_sub(out,par,out); - else - out = zv_mltadd(out,par,a_par,out); - } - - va_end(ap); - return out; -} - - -#endif - - - -/* zv_star -- computes componentwise (Hadamard) product of x1 and x2 - -- result out is returned */ -ZVEC *zv_star(x1, x2, out) -ZVEC *x1, *x2, *out; -{ - int i; - Real t_re, t_im; - - if ( ! x1 || ! x2 ) - error(E_NULL,"zv_star"); - if ( x1->dim != x2->dim ) - error(E_SIZES,"zv_star"); - out = zv_resize(out,x1->dim); - - for ( i = 0; i < x1->dim; i++ ) - { - /* out->ve[i] = x1->ve[i] * x2->ve[i]; */ - t_re = x1->ve[i].re*x2->ve[i].re - x1->ve[i].im*x2->ve[i].im; - t_im = x1->ve[i].re*x2->ve[i].im + x1->ve[i].im*x2->ve[i].re; - out->ve[i].re = t_re; - out->ve[i].im = t_im; - } - - return out; -} - -/* zv_slash -- computes componentwise ratio of x2 and x1 - -- out[i] = x2[i] / x1[i] - -- if x1[i] == 0 for some i, then raise E_SING error - -- result out is returned */ -ZVEC *zv_slash(x1, x2, out) -ZVEC *x1, *x2, *out; -{ - int i; - Real r2, t_re, t_im; - complex tmp; - - if ( ! x1 || ! x2 ) - error(E_NULL,"zv_slash"); - if ( x1->dim != x2->dim ) - error(E_SIZES,"zv_slash"); - out = zv_resize(out,x1->dim); - - for ( i = 0; i < x1->dim; i++ ) - { - r2 = x1->ve[i].re*x1->ve[i].re + x1->ve[i].im*x1->ve[i].im; - if ( r2 == 0.0 ) - error(E_SING,"zv_slash"); - tmp.re = x1->ve[i].re / r2; - tmp.im = - x1->ve[i].im / r2; - t_re = tmp.re*x2->ve[i].re - tmp.im*x2->ve[i].im; - t_im = tmp.re*x2->ve[i].im - tmp.im*x2->ve[i].re; - out->ve[i].re = t_re; - out->ve[i].im = t_im; - } - - return out; -} - -/* zv_sum -- returns sum of entries of a vector */ -complex zv_sum(x) -ZVEC *x; -{ - int i; - complex sum; - - if ( ! x ) - error(E_NULL,"zv_sum"); - - sum.re = sum.im = 0.0; - for ( i = 0; i < x->dim; i++ ) - { - sum.re += x->ve[i].re; - sum.im += x->ve[i].im; - } - - return sum; -} - -/* px_zvec -- permute vector */ -ZVEC *px_zvec(px,vector,out) -PERM *px; -ZVEC *vector,*out; -{ - u_int old_i, i, size, start; - complex tmp; - - if ( px==PNULL || vector==ZVNULL ) - error(E_NULL,"px_zvec"); - if ( px->size > vector->dim ) - error(E_SIZES,"px_zvec"); - if ( out==ZVNULL || out->dim < vector->dim ) - out = zv_resize(out,vector->dim); - - size = px->size; - if ( size == 0 ) - return zv_copy(vector,out); - - if ( out != vector ) - { - for ( i=0; ipe[i] >= size ) - error(E_BOUNDS,"px_vec"); - else - out->ve[i] = vector->ve[px->pe[i]]; - } - else - { /* in situ algorithm */ - start = 0; - while ( start < size ) - { - old_i = start; - i = px->pe[old_i]; - if ( i >= size ) - { - start++; - continue; - } - tmp = vector->ve[start]; - while ( TRUE ) - { - vector->ve[old_i] = vector->ve[i]; - px->pe[old_i] = i+size; - old_i = i; - i = px->pe[old_i]; - if ( i >= size ) - break; - if ( i == start ) - { - vector->ve[old_i] = tmp; - px->pe[old_i] = i+size; - break; - } - } - start++; - } - - for ( i = 0; i < size; i++ ) - if ( px->pe[i] < size ) - error(E_BOUNDS,"px_vec"); - else - px->pe[i] = px->pe[i]-size; - } - - return out; -} - -/* pxinv_zvec -- apply the inverse of px to x, returning the result in out - -- may NOT be in situ */ -ZVEC *pxinv_zvec(px,x,out) -PERM *px; -ZVEC *x, *out; -{ - u_int i, size; - - if ( ! px || ! x ) - error(E_NULL,"pxinv_zvec"); - if ( px->size > x->dim ) - error(E_SIZES,"pxinv_zvec"); - if ( ! out || out->dim < x->dim ) - out = zv_resize(out,x->dim); - - size = px->size; - if ( size == 0 ) - return zv_copy(x,out); - if ( out != x ) - { - for ( i=0; ipe[i] >= size ) - error(E_BOUNDS,"pxinv_vec"); - else - out->ve[px->pe[i]] = x->ve[i]; - } - else - { /* in situ algorithm --- cheat's way out */ - px_inv(px,px); - px_zvec(px,x,out); - px_inv(px,px); - } - - - return out; -} - -/* zv_rand -- randomise a complex vector; uniform in [0,1)+[0,1)*i */ -ZVEC *zv_rand(x) -ZVEC *x; -{ - if ( ! x ) - error(E_NULL,"zv_rand"); - - mrandlist((Real *)(x->ve),2*x->dim); - - return x; -} diff --git a/src/modlunit/carbonmunit.h b/src/modlunit/carbonmunit.h deleted file mode 100644 index 907656a8db..0000000000 --- a/src/modlunit/carbonmunit.h +++ /dev/null @@ -1,9 +0,0 @@ -#include -#include -#include -#include -#pragma once off -#define SYSV 1 -#define MAC 1 - -#define NRNUNIT 1 diff --git a/src/modlunit/extdef.h b/src/modlunit/extdef.h index aa7c8a51e7..36da115736 100644 --- a/src/modlunit/extdef.h +++ b/src/modlunit/extdef.h @@ -8,4 +8,5 @@ "gauss", "normrand", "poisrand", "poisson", "setseed", "scop_random", "boundary", "romberg", "legendre", "invert", "stepforce", "schedule", "set_seed", "nrn_pointing", "state_discontinuity", "net_send", "net_move", "net_event", "nrn_random_play", "at_time", - "nrn_ghk", + "nrn_ghk", "random_negexp", "random_normal", "random_uniform", "random_setseq", "random_setids", + "random_ipick", diff --git a/src/modlunit/init.cpp b/src/modlunit/init.cpp index 9e92331a97..80250d47ab 100644 --- a/src/modlunit/init.cpp +++ b/src/modlunit/init.cpp @@ -79,6 +79,7 @@ static struct { /* Keywords */ {"READ", READ}, {"WRITE", WRITE}, {"RANGE", RANGE}, + {"RANDOM", RANDOM}, {"VALENCE", VALENCE}, {"CHARGE", VALENCE}, {"GLOBAL", GLOBAL}, diff --git a/src/modlunit/io.cpp b/src/modlunit/io.cpp index e8a6449806..93b36a6557 100644 --- a/src/modlunit/io.cpp +++ b/src/modlunit/io.cpp @@ -5,9 +5,6 @@ #include #include "model.h" #include -#if MAC && TARGET_API_MAC_CARBON -#include -#endif #undef METHOD #include "parse1.hpp" Item* lastok; /*should be last token accepted by parser that gives @@ -168,10 +165,6 @@ void diag(const char* s1, const char* s2) { } } Fprintf(stderr, "\n"); -#if MAC && TARGET_API_MAC_CARBON - SIOUXSettings.autocloseonquit = true; - RunApplicationEventLoop(); -#endif exit(1); } @@ -348,7 +341,7 @@ static int file_stack_empty() { /* io.c,v * Revision 1.2 1997/11/24 16:19:09 hines - * modlunit port to MAC (not complete) + * modlunit port to mac (not complete) * * Revision 1.1.1.1 1994/10/12 17:22:48 hines * NEURON 3.0 distribution diff --git a/src/modlunit/list.cpp b/src/modlunit/list.cpp index 843542170d..88bea2d255 100644 --- a/src/modlunit/list.cpp +++ b/src/modlunit/list.cpp @@ -258,30 +258,14 @@ Item* putintoken(const char* s, short type, short toktype) { /* make sure a symb return q; } -#if MAC -#undef HAVE_STDARG_H -#define HAVE_STDARG_H 1 -#endif - -#if HAVE_STDARG_H #include -#else -#include -#endif /* make a list of item pointers: notice that the items themselves remain in whatever list they happen to be in. */ /* usage is q = makelist(n, q1, q2,..., qn); and q is of type LIST and is not in any list */ -Item * -#if HAVE_STDARG_H -makelist(int narg, ...) -{ -#else -makelist(va_dcl va_alist) { - int narg; -#endif +Item* makelist(int narg, ...) { va_list ap; int i; List* l; @@ -291,12 +275,7 @@ makelist(va_dcl va_alist) { ql = newitem(); ql->itemtype = LIST; ql->element = (void*) l; -#if HAVE_STDARG_H va_start(ap, narg); -#else - va_start(ap); - narg = va_arg(ap, int); -#endif for (i = 0; i < narg; i++) { q = va_arg(ap, Item*); append(ql, q); @@ -323,24 +302,13 @@ Item* prepend(Item* ql, Item* q) { /* An item which is an array of item pointers. Note where the size of the array is held. */ -Item * -#if HAVE_STDARG_H -itemarray(int narg, ...) { -#else -itemarray(va_dcl va_alist) { - int narg; -#endif +Item* itemarray(int narg, ...) { va_list ap; int i; Item *ql, *q, **qa; ql = newitem(); -#if HAVE_STDARG_H va_start(ap, narg); -#else - va_start(ap); - narg = va_arg(ap, int); -#endif ql->itemtype = ITEMARRAY; qa = (Item**) emalloc((unsigned) (narg + 1) * sizeof(Item*)); qa++; diff --git a/src/modlunit/macmunit.h b/src/modlunit/macmunit.h deleted file mode 100644 index 8a2e315ec6..0000000000 --- a/src/modlunit/macmunit.h +++ /dev/null @@ -1,11 +0,0 @@ -#pragma precompile_target "modlunit_def.h" -#define MSL_USE_PRECOMPILED_HEADERS 1 -#include -#include -#include -#include -#pragma once off -#define SYSV 1 -#define MAC 1 - -#define NRNUNIT 1 diff --git a/src/modlunit/model.cpp b/src/modlunit/model.cpp index fbdbebbc3d..29005268fc 100644 --- a/src/modlunit/model.cpp +++ b/src/modlunit/model.cpp @@ -49,9 +49,6 @@ extern const char* RCS_version; extern const char* RCS_date; static void openfiles(int, char**); static void debug_item(Item* q, int indent, FILE* file); -#if MAC -#include -#endif int main(int argc, char* argv[]) { /* @@ -59,9 +56,6 @@ int main(int argc, char* argv[]) { * files * We first look for a .mrg file and then a .mod file */ -#if MAC - SIOUXSettings.asktosaveonclose = false; -#endif Fprintf(stderr, "%s %s %s\n", pgm_name, RCS_version, RCS_date); @@ -69,9 +63,6 @@ int main(int argc, char* argv[]) { * lists, etc. */ unit_init(); nrn_unit_init(); -#if MAC - mac_cmdline(&argc, &argv); -#endif openfiles(argc, argv); /* .mrg else .mod, .var, .c */ Fprintf(stderr, "Checking units of %s\n", finname); @@ -99,10 +90,6 @@ int main(int argc, char* argv[]) { /* check unit consistency */ parsepass(3); yyparse(); -#if MAC - printf("Units consistent in %s\n", finname); - SIOUXSettings.autocloseonquit = true; -#endif #if 0 parout(); /* print .var file. * Also #defines which used to be in defs.h @@ -263,7 +250,7 @@ static void debug_item(Item* q, int indent, FILE* file) { * more changes for port to mac of modlunit * * Revision 1.3 1997/11/24 16:19:12 hines - * modlunit port to MAC (not complete) + * modlunit port to mac (not complete) * * Revision 1.2 1997/10/20 14:58:07 hines * modlunit file.mod accepted (ie suffix allowed) diff --git a/src/modlunit/model.h b/src/modlunit/model.h index 049fa73c14..2433436c53 100644 --- a/src/modlunit/model.h +++ b/src/modlunit/model.h @@ -128,6 +128,7 @@ extern List* _LST(Item* q, char* file, int line); #define LOCL 0400000L #define CNVFAC 01000000L #define UFACTOR 02000000L +#define RANGEOBJ 04000000L #define EXPLICIT_DECL 01 /* usage field, variable occurs in input file */ @@ -140,18 +141,11 @@ extern char *inputline(), /* used only by parser to get title line */ const char* unit_str(); extern const char* decode_units(Symbol*); -extern List -#if HAVE_STDARG_H || MAC - *makelist(int narg, ...), - *itemarray(int narg, ...), /* item ITEMARRAY, array of item pointers */ -#else - *makelist(), /* item LIST */ - *itemarray(), /* item ITEMARRAY, array of item pointers */ -#endif - *prepend(), - *newlist(), /* begins new empty list */ - *inputtext(); /* used by parser to get block text from - * VERBATIM and COMMENT */ +extern List *makelist(int narg, ...), + *itemarray(int narg, ...), /* item ITEMARRAY, array of item pointers */ + *prepend(), *newlist(), /* begins new empty list */ + *inputtext(); /* used by parser to get block text from + * VERBATIM and COMMENT */ extern Item *putintoken(const char*s, short type, short), /* construct symbol and store input tokens */ *insertstr(Item*item, const char*str), /* before a known Item */ diff --git a/src/modlunit/nrnunit.cpp b/src/modlunit/nrnunit.cpp index 6af59eb7fd..fa71749b68 100644 --- a/src/modlunit/nrnunit.cpp +++ b/src/modlunit/nrnunit.cpp @@ -85,6 +85,12 @@ void nrn_list(Item* qtype, Item* qlist) { point_process = 1; } break; + case RANDOM: + plist = (List**) 0; + ITERATE(q, qlist) { + declare(RANGEOBJ, q, nullptr); + } + break; default: plist = (List**) 0; break; diff --git a/src/modlunit/parse1.ypp b/src/modlunit/parse1.ypp index b69803fd87..6e0e4778b3 100755 --- a/src/modlunit/parse1.ypp +++ b/src/modlunit/parse1.ypp @@ -102,6 +102,7 @@ extern int lexcontext; %type initstmt bablk %token CONDUCTANCE %type conducthint +%token RANDOM /* precedence in expressions--- low to high */ %left OR @@ -602,7 +603,7 @@ fornetcon: FOR_NETCONS '(' arglist ')' while (q1 != netreceive_arglist && q2 != args) { Symbol* s1 = SYM(q1); Symbol* s2 = SYM(q2); - if (s1->u.str) { /* s2 must be nil or same */ + if (s1->u.str) { /* s2 must be nullptr or same */ if (s2->u.str) { if (strcmp(s1->u.str, s2->u.str) != 0) { diag(s1->name, "in FOR_NETCONS arglist does not have same units as corresponding arg in NET_RECEIVE arglist"); @@ -610,7 +611,7 @@ fornetcon: FOR_NETCONS '(' arglist ')' }else{ s2->u.str = s1->u.str; } - }else{ /* s2 must be nil */ + }else{ /* s2 must be nullptr */ if (s2->u.str) { diag(s1->name, "in FOR_NETCONS arglist does not have same units as corresponding arg in NET_RECEIVE arglist"); } @@ -752,6 +753,8 @@ nrnstmt: /*nothing*/ { P1{nrn_list($2,$3);}} | nrnstmt RANGE nrnlist { P1{nrn_list($2, $3);}} + | nrnstmt RANDOM nrnlist + { P1{nrn_list($2, $3);}} | nrnstmt GLOBAL nrnlist { P1{nrn_list($2, $3);}} | nrnstmt POINTER nrnlist diff --git a/src/modlunit/units.cpp b/src/modlunit/units.cpp index 83245793ab..5f67ca1d2b 100644 --- a/src/modlunit/units.cpp +++ b/src/modlunit/units.cpp @@ -12,32 +12,6 @@ #include -/** - The strategy for dynamic units selection between Legacy and modern units - is to maintain two complete tables respectively. Legacy and modern in the - nrnunits.lib.in file are distinquished by, e.g., - @LegacyY@faraday 9.6485309+4 coul - @LegacyN@faraday 96485.3321233100184 coul - The reason for two complete tables, as opposed to a main table and several - short legacy and modern tables, is that units are often defined in terms - of modified units. eg, "R = (k-mole) (joule/degC)" - - Nmodl, via the parser, uses only unit_pop, unit_mag, Unit_push, - install_units, unit_div, and modl_units. - - The issue of unit magnitude arises only when declaring a unit factor as in - the gasconstant (R) above or with the equivalent "name = (unit) -> (unit)" - syntax. If the magnitude difers between legacy and modern, then instead of - emitting code like "static double FARADAY = 96485.3;\n" we can emit - #define FARADAY _nrnunit_FARADAY_[_nrnunit_use_legacy_] - static double _nrnunit_FARADAY_[2] = {96485.3321233100184, 96485.3}; -**/ - -/* modlunit can do its thing in the old way */ -#if !defined(NRN_DYNAMIC_UNITS) -#define NRN_DYNAMIC_UNITS 0 -#endif - #ifdef MINGW #include "../mswin/extra/d2upath.cpp" #endif @@ -60,27 +34,13 @@ extern void diag(const char*, const char*); #define NTAB 601 -#if NRN_DYNAMIC_UNITS -#define SUFFIX ".in" -#else -#define SUFFIX "" -#endif - /* if MODLUNIT environment variable not set then look in the following places*/ -#if MAC -static const char* dfile = ":lib:nrnunits.lib" SUFFIX; -#else #if defined(NEURON_DATA_DIR) -static const char* dfile = NEURON_DATA_DIR "/lib/nrnunits.lib" SUFFIX; +static char const* const dfile = NEURON_DATA_DIR "/lib/nrnunits.lib"; #else -static const char* dfile = "/usr/lib/units"; -#endif -#endif -#if MAC -static const char* dfilealt = "::lib:nrnunits.lib" SUFFIX; -#else -static const char* dfilealt = "../../share/lib/nrnunits.lib" SUFFIX; +static char const* const dfile = "/usr/lib/units"; #endif +static char const* const dfilealt = "../../share/lib/nrnunits.lib"; static char* unames[NDIM]; double getflt(); void fperr(int); @@ -108,13 +68,6 @@ static struct table { static char* names; -#if NRN_DYNAMIC_UNITS -static struct dynam { - struct table* table; /* size NTAB */ - char* names; /* size NTAB*10 */ -} dynam[2]; -#endif - static struct prefix { double factor; const char* pname; @@ -144,15 +97,7 @@ static const char* pc; static int Getc(FILE* inp) { if (inp != stdin) { -#if MAC - int c = getc(inp); - if (c == '\r') { - c = '\n'; - } - return c; -#else return getc(inp); -#endif } else if (pc && *pc) { return (int) (*pc++); } else { @@ -346,23 +291,8 @@ static void install_units_help(char* s1, char* s2) /* define s1 as s2 */ unit_pop(); } -static void switch_units(int legacy) { -#if NRN_DYNAMIC_UNITS - table = dynam[legacy].table; - names = dynam[legacy].names; -#endif -} - void install_units(char* s1, char* s2) { -#if NRN_DYNAMIC_UNITS - int i; - for (i = 0; i < 2; ++i) { - switch_units(i); - install_units_help(s1, s2); - } -#else install_units_help(s1, s2); -#endif } void check_num() { @@ -583,20 +513,10 @@ static void units_alloc() { static int units_alloc_called = 0; if (!units_alloc_called) { units_alloc_called = 1; -#if NRN_DYNAMIC_UNITS - for (i = 0; i < 2; ++i) { - dynam[i].table = (struct table*) calloc(NTAB, sizeof(struct table)); - assert(dynam[i].table); - dynam[i].names = (char*) calloc(NTAB * 10, sizeof(char)); - assert(dynam[i].names); - switch_units(i); - } -#else table = (struct table*) calloc(NTAB, sizeof(struct table)); assert(table); names = (char*) calloc(NTAB * 10, sizeof(char)); assert(names); -#endif } } @@ -608,14 +528,7 @@ void modl_units() { unitonflag = 1; if (first) { units_alloc(); -#if NRN_DYNAMIC_UNITS - for (i = 0; i < 2; ++i) { - switch_units(i); - unit_init(); - } -#else unit_init(); -#endif first = 0; } } @@ -631,7 +544,7 @@ void unit_init() { /* note that on mingw, even if MODLUNIT set to /cygdrive/c/... * it ends up here as c:/... and that is good*/ /* printf("MODLUNIT=|%s|\n", s); */ - Sprintf(buf, "%s%s", s, SUFFIX); + Sprintf(buf, "%s", s); if ((inpfile = fopen(buf, "r")) == (FILE*) 0) { diag("Bad MODLUNIT environment variable. Cant open:", buf); } @@ -642,9 +555,9 @@ void unit_init() { if (s) { if (strncmp(s, "/cygdrive/", 10) == 0) { /* /cygdrive/x/... to c:/... */ - Sprintf(buf, "%c:%s/lib/nrnunits.lib" SUFFIX, s[10], s + 11); + Sprintf(buf, "%c:%s/lib/nrnunits.lib", s[10], s + 11); } else { - Sprintf(buf, "%s/lib/nrnunits.lib" SUFFIX, s); + Sprintf(buf, "%s/lib/nrnunits.lib", s); } inpfile = fopen(buf, "r"); free(s); @@ -655,7 +568,7 @@ void unit_init() { if ((inpfile = fopen(dfilealt, "r")) == (FILE*) 0) { s = neuronhome(); if (s) { - Sprintf(buf, "%s/lib/nrnunits.lib" SUFFIX, s); + Sprintf(buf, "%s/lib/nrnunits.lib", s); inpfile = fopen(buf, "r"); } } @@ -672,65 +585,6 @@ void unit_init() { unit_stk_clean(); } -#if 0 -void main(argc, argv) -char *argv[]; -{ - register i; - register char *file; - struct unit u1, u2; - double f; - - if(argc>1 && *argv[1]=='-') { - argc--; - argv++; - dumpflg++; - } - file = dfile; - if(argc > 1) - file = argv[1]; - if ((inpfile = fopen(file, "r")) == NULL) { - printf("no table\n"); - exit(1); - } - signal(8, fperr); - units_cpp_init(); - -loop: - fperrc = 0; - printf("you have: "); - if(convr(&u1)) - goto loop; - if(fperrc) - goto fp; -loop1: - printf("you want: "); - if(convr(&u2)) - goto loop1; - for(i=0; i 1) *ucp++ = (u + '0'); - return (2); + return 2; } if (u < 0) - return (1); - return (0); + return 1; + return 0; } int convr(unit* up) { @@ -798,7 +652,7 @@ int convr(unit* up) { if (c == '/') den++; if (c == '\n') - return (err); + return err; goto loop; } *cp++ = c; @@ -831,7 +685,7 @@ int lookup(char* name, unit* up, int den, int c) { c--; goto l1; } - return (0); + return 0; } { const char* cp1{}; @@ -863,14 +717,14 @@ int lookup(char* name, unit* up, int den, int c) { name); diag("Cannot recognize the units: ", name); /* printf("cannot recognize %s\n", name);*/ - return (1); + return 1; } static int equal(const char* c1, const char* c2) { while (*c1++ == *c2) if (*c2++ == 0) - return (1); - return (0); + return 1; + return 0; } void units_cpp_init() { @@ -898,9 +752,6 @@ void units_cpp_init() { l0: c = get(); if (c == 0) { -#if 0 - printf("%d units; %ld bytes\n\n", i, cp-names); -#endif if (dumpflg) for (tp = table; tp < table + NTAB; tp++) { if (tp->name == 0) @@ -919,36 +770,6 @@ void units_cpp_init() { goto l0; } -#if NRN_DYNAMIC_UNITS - if (c == '@') { - /** - Dynamic unit line beginning with @LegacyY@ or @LegacyN@. - If the Y or N does not match the modern or legacy table, skip the - entire line. For a match, just leave file at char after the final '@'. - **/ - int i; - int legacy; - char legstr[7]; - char y_or_n; - for (i = 0; i < 6; ++i) { - legstr[i] = get(); - } - legstr[6] = '\0'; - assert(strcmp(legstr, "Legacy") == 0); - y_or_n = get(); - assert(y_or_n == 'Y' || y_or_n == 'N'); - legacy = (y_or_n == 'Y') ? 1 : 0; - nrn_assert(get() == '@'); - if (dynam[legacy].table != table) { /* skip the line */ - while (c != '\n' && c != 0) { - c = get(); - } - goto l0; - } - c = get(); - } -#endif - if (c == '\n') goto l0; @@ -1000,18 +821,12 @@ void units_cpp_init() { goto l0; } -#if NRN_DYNAMIC_UNITS -/* Translate string to double using a2f for modern units - to allow consistency with BlueBrain/nmodl -*/ -double modern_getflt() { +double getflt() { int c; char str[100]; char* cp; double d_modern; - assert(table == dynam[0].table); - cp = str; do c = get(); @@ -1040,73 +855,11 @@ double modern_getflt() { *cp = '\0'; d_modern = atof(str); if (c == '|') { - d_modern /= modern_getflt(); + d_modern /= getflt(); return d_modern; } peekc = c; - return (d_modern); -} -#endif /* NRN_DYNAMIC_UNITS */ - -double getflt() { - int c, i, dp; - double d, e; - int f; - -#if NRN_DYNAMIC_UNITS - if (table == dynam[0].table) { - return modern_getflt(); - } -#endif /* NRN_DYNAMIC_UNITS */ - d = 0.; - dp = 0; - do - c = get(); - while (c == ' ' || c == '\t'); - -l1: - if (c >= '0' && c <= '9') { - d = d * 10. + c - '0'; - if (dp) - dp++; - c = get(); - goto l1; - } - if (c == '.') { - dp++; - c = get(); - goto l1; - } - if (dp) - dp--; - if (c == '+' || c == '-') { - f = 0; - if (c == '-') - f++; - i = 0; - c = get(); - while (c >= '0' && c <= '9') { - i = i * 10 + c - '0'; - c = get(); - } - if (f) - i = -i; - dp -= i; - } - e = 1.; - i = dp; - if (i < 0) - i = -i; - while (i--) - e *= 10.; - if (dp < 0) - d *= e; - else - d /= e; - if (c == '|') - return (d / getflt()); - peekc = c; - return (d); + return d_modern; } int get() { @@ -1115,7 +868,7 @@ int get() { /*SUPPRESS 560*/ if ((c = peekc) != 0) { peekc = 0; - return (c); + return c; } c = Getc(inpfile); if (c == '\r') { @@ -1126,9 +879,9 @@ int get() { printf("\n"); exit(0); } - return (0); + return 0; } - return (c); + return c; } struct table* hash_table(const char* name) { @@ -1146,9 +899,9 @@ struct table* hash_table(const char* name) { tp = &table[h]; l0: if (tp->name == 0) - return (tp); + return tp; if (equal(name, tp->name)) - return (tp); + return tp; tp++; if (tp >= table + NTAB) tp = table; @@ -1160,43 +913,10 @@ void fperr(int sig) { fperrc++; } -static double dynam_unit_mag(int legacy, char* u1, char* u2) { - double result; - switch_units(legacy); +void nrnunit_str(char (&buf)[NRN_BUFSIZE], const char* name, const char* u1, const char* u2) { Unit_push(u1); Unit_push(u2); unit_div(); - result = unit_mag(); + Sprintf(buf, "static double %s = %a;\n", name, unit_mag()); unit_pop(); - return result; -} - -void nrnunit_dynamic_str(char (&buf)[NRN_BUFSIZE], const char* name, char* u1, char* u2) { -#if NRN_DYNAMIC_UNITS - - double legacy = dynam_unit_mag(1, u1, u2); - double modern = dynam_unit_mag(0, u1, u2); - Sprintf(buf, - "\n" - "#define %s _nrnunit_%s[_nrnunit_use_legacy_]\n" - "static double _nrnunit_%s[2] = {%a, %g};\n", - name, - name, - name, - modern, - legacy); - -#else - - Unit_push(u1); - Unit_push(u2); - unit_div(); -#if (defined(LegacyFR) && LegacyFR == 1) - Sprintf(buf, "static double %s = %g;\n", name, unit_mag()); -#else - Sprintf(buf, "static double %s = %.12g;\n", name, unit_mag()); -#endif - unit_pop(); - -#endif } diff --git a/src/mswin/extra/d2upath.cpp b/src/mswin/extra/d2upath.cpp index b31ae3b11a..9b9ce5c064 100644 --- a/src/mswin/extra/d2upath.cpp +++ b/src/mswin/extra/d2upath.cpp @@ -9,30 +9,22 @@ the output string should be freed with free() when no longer needed. #include #include -char* hoc_dos2cygdrivepath(const char* d, int cygdrive) { +char* hoc_dos2unixpath(const char* d) { /* translate x: and x:/ and x:\, to /cygdrive/x/ */ /* and all backslashes to forward slashes */ /* or, for mingw, just backslashes to forward slashes */ - char* u; char* cp; - int i, j; -#if 0 - u = new char[strlen(d) + 12]; -#else - u = static_cast(malloc(strlen(d) + 12)); + auto* const u = static_cast(malloc(strlen(d) + 12)); assert(u); -#endif - i = j = 0; - if (cygdrive) { - if (d[0] && d[1] == ':') { - strcpy(u, "/cygdrive/"); - i = strlen(u); - u[i++] = d[0]; - j += 2; - u[i++] = '/'; - if (d[j] == '/' || d[j] == '\\') { - j++; - } + int i{}, j{}; + if (d[0] && d[1] == ':') { + strcpy(u, "/cygdrive/"); + i = strlen(u); + u[i++] = d[0]; + j += 2; + u[i++] = '/'; + if (d[j] == '/' || d[j] == '\\') { + j++; } } strcpy(u + i, d + j); @@ -43,11 +35,3 @@ char* hoc_dos2cygdrivepath(const char* d, int cygdrive) { } return u; } - -char* hoc_dos2unixpath(const char* d) { -#if defined(__MINGW32__) - return hoc_dos2cygdrivepath(d, 1); -#else - return hoc_dos2cygdrivepath(d, 1); -#endif -} diff --git a/src/mswin/mwprefix.h b/src/mswin/mwprefix.h index 71f0f8d943..877b31f9ec 100644 --- a/src/mswin/mwprefix.h +++ b/src/mswin/mwprefix.h @@ -1,10 +1,5 @@ /* auto include file for metrowerks codewarrior for all nrn */ -#if __MWERKS__ >= 7 -#define _MSL_DIRENT_H -#else #include -#endif -#define HAVE_LIMITS_H 1 #pragma once off #ifndef __WIN32__ #define __WIN32__ 1 diff --git a/src/mswin/nrnsetupmingw.nsi.in b/src/mswin/nrnsetupmingw.nsi.in index 7036d36aba..48eb1eb295 100644 --- a/src/mswin/nrnsetupmingw.nsi.in +++ b/src/mswin/nrnsetupmingw.nsi.in @@ -137,7 +137,7 @@ Section "Start Menu Shortcuts" CreateShortCut "$SMPROGRAMS\${NEURON}\mknrndll.lnk" "$INSTDIR\bin${binsuffix}\nrniv.exe" "-nopython $uINSTDIR/lib/hoc/mknrndll.hoc" "$INSTDIR\bin${binsuffix}\nmodl2a.ico" 0 CreateShortCut "$SMPROGRAMS\${NEURON}\modlunit.lnk" "$INSTDIR\bin${binsuffix}\nrniv.exe" "-nopython $uINSTDIR/lib/hoc/modlunit.hoc" "$INSTDIR\bin${binsuffix}\nmodl2a.ico" 0 CreateShortCut "$SMPROGRAMS\${NEURON}\Notes.lnk" "notepad.exe" "$INSTDIR\notes.txt" - WriteINIStr "$SMPROGRAMS\${NEURON}\NEURON Home Page.url" "InternetShortcut" "URL" "http://neuron.yale.edu/" + WriteINIStr "$SMPROGRAMS\${NEURON}\NEURON Home Page.url" "InternetShortcut" "URL" "https://nrn.readthedocs.io/" SetOutPath "$INSTDIR\demo" SetOutPath "$INSTDIR" CreateShortCut "$SMPROGRAMS\${NEURON}\bash.lnk" "$launch1" "$launch2" "$INSTDIR\mingw\usr\bin\bash.exe" 0 @@ -157,7 +157,7 @@ Section "Desktop ${NEURON} folder with Shortcuts" CreateShortCut "$DESKTOP\${NEURON}\mknrndll.lnk" "$INSTDIR\bin${binsuffix}\nrniv.exe" "-nopython $uINSTDIR/lib/hoc/mknrndll.hoc" "$INSTDIR\bin${binsuffix}\nmodl2a.ico" 0 CreateShortCut "$DESKTOP\${NEURON}\modlunit.lnk" "$INSTDIR\bin${binsuffix}\nrniv.exe" "-nopython $uINSTDIR/lib/hoc/modlunit.hoc" "$INSTDIR\bin${binsuffix}\nmodl2a.ico" 0 CreateShortCut "$DESKTOP\${NEURON}\Notes.lnk" "notepad.exe" "$INSTDIR\notes.txt" - WriteINIStr "$DESKTOP\${NEURON}\NEURON Home Page.url" "InternetShortcut" "URL" "http://neuron.yale.edu/" + WriteINIStr "$DESKTOP\${NEURON}\NEURON Home Page.url" "InternetShortcut" "URL" "https://nrn.readthedocs.io/" SetOutPath "$INSTDIR\demo" SetOutPath "$INSTDIR" CreateShortCut "$DESKTOP\${NEURON}\bash.lnk" "$launch1" "$launch2" "$INSTDIR\mingw\usr\bin\bash.exe" 0 diff --git a/src/neuron/cache/mechanism_range.hpp b/src/neuron/cache/mechanism_range.hpp new file mode 100644 index 0000000000..751639c6dc --- /dev/null +++ b/src/neuron/cache/mechanism_range.hpp @@ -0,0 +1,272 @@ +#pragma once +#include "membfunc.h" +#include "nrn_ansi.h" +#include "nrnoc_ml.h" + +#include + +namespace neuron::cache { +/** + * @brief Call the given method with each dparam index that should be cached for a mechanism. + * + * The callable will be invoked with the largest index first. This is useful if you want to resize a + * container and use the indices as offsets into it. + */ +template +void indices_to_cache(short type, Callable callable) { + auto const pdata_size = nrn_prop_dparam_size_[type]; + auto* const dparam_semantics = memb_func[type].dparam_semantics; + for (int field = pdata_size - 1; field >= 0; --field) { + // Check if the field-th dparam of this mechanism type is an ion variable. See + // hoc_register_dparam_semantics. + auto const sem = dparam_semantics[field]; + // See https://github.com/neuronsimulator/nrn/issues/2312 for discussion of possible + // extensions to caching. + if ((sem > 0 && sem < 1000) || sem == -1 /* area */) { + std::invoke(callable, field); + } + } +} + +/** + * @brief Version of Memb_list for use in performance-critical code. + * + * Unlike Memb_list, this requires that the number of fields is known at compile time. + * This is typically only true in translated MOD file code. The idea is that an instance of this + * class will be created outside a loop over the data vectors and then used inside the loop. + * + * @warning It is the responsibility of the caller to ensure that the model remains sorted beyond + * the lifetime of the MechanismRange instance. + */ +template +struct MechanismRange { + /** + * @brief Construct a MechanismRange from sorted model data. + * @param cache_token Token showing the model data are sorted. + * @param nt Thread that this MechanismRange corresponds to. + * @param ml Range of mechanisms this MechanismRange refers to. + * @param type The type of this mechanism. + * + * This mirrors the signature of the functions (nrn_state, nrn_cur, nrn_init...) that are + * generated in C++ from MOD files. Typically those generated functions immediately create an + * instance of MechanismRange using this constructor. + */ + MechanismRange(neuron::model_sorted_token const& cache_token, + NrnThread&, + Memb_list& ml, + int type) + : MechanismRange{type, ml.get_storage_offset()} { + auto const& ptr_cache = mechanism::_get::_pdata_ptr_cache_data(cache_token, type); + m_pdata_ptrs = ptr_cache.data(); + assert(ptr_cache.size() <= NumDatumFields); + } + + protected: + /** + * @brief Hidden helper constructor used by MechanismRange and MechanismInstance. + */ + MechanismRange(int mech_type, std::size_t offset) + : m_data_ptrs{mechanism::get_data_ptrs(mech_type)} + , m_data_array_dims{mechanism::get_array_dims(mech_type)} + , m_offset{offset} { + assert((mech_type < 0) || + (mechanism::get_field_count(mech_type) == NumFloatingPointFields)); + } + + public: + /** + * @brief Get the range of values for an array RANGE variable. + * @tparam variable The index of the RANGE variable in the mechanism. + * @tparam array_size The array dimension of the RANGE variable. + * @param instance Which mechanism instance to access inside this mechanism range. + */ + template + [[nodiscard]] double* data_array(std::size_t instance) { + static_assert(variable < NumFloatingPointFields); + // assert(array_size == m_data_array_dims[variable]); + return std::next(m_data_ptrs[variable], array_size * (m_offset + instance)); + } + + template + [[nodiscard]] double* data_array_ptr() { + return data_array(0); + } + + /** + * @brief Get a RANGE variable value. + * @tparam variable The index of the RANGE variable in the mechanism. + * @param instance Which mechanism instance to access inside this MechanismRange. + * + * This is only intended for use with non-array RANGE variables, otherwise use @ref data_array. + */ + template + [[nodiscard]] double& fpfield(std::size_t instance) { + return *data_array(instance); + } + + template + [[nodiscard]] double* fpfield_ptr() { + return data_array(0); + } + + /** + * @brief Get a RANGE variable value. + * @param instance Which mechanism instance to access inside this MechanismRange. + * @param ind The index of the RANGE variable in the mechanism. This includes both the + * index of the variable and the index into an array RANGE variable. + */ + [[nodiscard]] double& data(std::size_t instance, container::field_index ind) { + // assert(ind.field < NumFloatingPointFields); + auto const array_dim = m_data_array_dims[ind.field]; + // assert(ind.array_index < array_dim); + return m_data_ptrs[ind.field][array_dim * (m_offset + instance) + ind.array_index]; + } + + /** + * @brief Get a POINTER variable. + * @tparam variable The index of the POINTER variable in the pdata/dparam entries of the + * mechanism. + * @param instance Which mechanism instance to access inside this mechanism range. + */ + template + [[nodiscard]] double* dptr_field(std::size_t instance) { + static_assert(variable < NumDatumFields); + return m_pdata_ptrs[variable][m_offset + instance]; + } + + template + [[nodiscard]] double* const* dptr_field_ptr() { + static_assert(variable < NumDatumFields); + return m_pdata_ptrs[variable] + m_offset; + } + + protected: + /** + * @brief Pointer to a range of pointers to the start of RANGE variable storage. + * + * @c m_data_ptrs[i] is a pointer to the start of the contiguous storage for the + * @f$\texttt{i}^{th}@f$ RANGE variable. + * @see container::detail::field_data::data_ptrs() + */ + double* const* m_data_ptrs{}; + + /** + * @brief Pointer to a range of array dimensions for the RANGE variables in this mechanism. + * + * @c m_data_array_dims[i] is the array dimension of the @f$\texttt{i}^{th}@f$ RANGE variable. + */ + int const* m_data_array_dims{}; + + /** + * @brief Pointer to a range of pointers to the start of POINTER variable caches. + * + * @c m_pdata_ptrs[i][j] is the @c double* corresponding to the @f$\texttt{i}^{th}@f$ @c pdata / + * @c dparam field and the @f$\texttt{j}^{th}@f$ instance of the mechanism in the program. + * @see MechanismInstance::MechanismInstance(Prop*) and @ref nrn_sort_mech_data. + */ + double* const* const* m_pdata_ptrs{}; + + /** + * @brief Offset of this contiguous range of mechanism instances into the global range. + * + * Typically if there is more than one thread in the process then the instances of a particular + * mechanism type will be distributed across multiple NrnThread objects and processed by + * different threads, and the mechanism data will be permuted so that the instances owned by a + * given thread are contiguous. In that case the MechanismRange for the 0th thread would have an + * @c m_offset of zero, and the MechanismRange for the next thread would have an @c m_offset of + * the number of instances in the 0th thread. + * + * @see @ref nrn_sort_mech_data. + */ + std::size_t m_offset{}; +}; +/** + * @brief Specialised version of MechanismRange for a single instance. + * + * This is used inside generated code that takes a single mechanism instance (Prop) instead of a + * range of instances (Memb_list). A key feature of methods that take Prop is that they should + * *not* require a call to nrn_ensure_model_data_are_sorted(). This is conceptually fine, as if + * we are only concerned with a single mechanism instance then it doesn't matter where it lives + * in the global storage vectors. In this case, @ref m_dptr_cache contains an array of pointers + * that @ref m_dptr_datums can refer to. + */ +template +struct MechanismInstance: MechanismRange { + /** + * @brief Shorthand for the MechanismRange base class. + */ + using base_type = MechanismRange; + + /** + * @brief Construct from a single mechanism instance. + * @param prop Handle to a single mechanism instance. + */ + MechanismInstance(Prop* prop) + : base_type{_nrn_mechanism_get_type(prop), mechanism::_get::_current_row(prop)} { + if (!prop) { + // grrr...see cagkftab test where setdata is not called(?) and extcall_prop is null(?) + return; + } + indices_to_cache(_nrn_mechanism_get_type(prop), [this, prop](auto field) { + assert(field < NumDatumFields); + auto& datum = _nrn_mechanism_access_dparam(prop)[field]; + m_dptr_cache[field] = datum.template get(); + this->m_dptr_datums[field] = &m_dptr_cache[field]; + }); + this->m_pdata_ptrs = m_dptr_datums.data(); + } + + /** + * @brief Copy constructor. + */ + MechanismInstance(MechanismInstance const& other) { + *this = other; // Implement using copy assignment. + } + + /** + * @brief Copy assignment + * + * This has to be implemented manually because the base class (MechanismInstance) member @ref + * m_pdata_ptrs has to be updated to point at the derived class (MechanismInstance) member @ref + * m_dptr_datums. + */ + MechanismInstance& operator=(MechanismInstance const& other) { + if (this != &other) { + this->m_data_ptrs = other.m_data_ptrs; + this->m_data_array_dims = other.m_data_array_dims; + this->m_offset = other.m_offset; + m_dptr_cache = other.m_dptr_cache; + for (auto i = 0; i < NumDatumFields; ++i) { + m_dptr_datums[i] = &m_dptr_cache[i]; + } + this->m_pdata_ptrs = m_dptr_datums.data(); + } + return *this; + } + + private: + /** + * @brief Cached @c double* values for this instance, calculated from @ref Datum. + */ + std::array m_dptr_cache{}; + + /** + * @brief Pointers to m_dptr_cache needed to satisfy MechanismRange's requirements. + * @invariant @ref m_dptr_datums[i] is equal to &@ref m_dptr_cache[i] for all @c i. + * @invariant @c MechanismInstance::m_pdata_ptrs is equal to @ref m_dptr_datums.%data(). + */ + std::array m_dptr_datums{}; +}; +} // namespace neuron::cache + +namespace neuron::legacy { +/** + * @brief Helper for legacy MOD files that mess with _p in VERBATIM blocks. + */ +template +void set_globals_from_prop(Prop* p, MechInstance& ml, MechRange*& ml_ptr, std::size_t& iml) { + ml = {p}; + ml_ptr = &ml; + iml = 0; +} +} // namespace neuron::legacy diff --git a/src/neuron/cache/model_data.hpp b/src/neuron/cache/model_data.hpp new file mode 100644 index 0000000000..675b9b4097 --- /dev/null +++ b/src/neuron/cache/model_data.hpp @@ -0,0 +1,45 @@ +#pragma once +#include +#include + +#include "neuron/container/memory_usage.hpp" + +// Forward declare Datum +namespace neuron::container { +struct generic_data_handle; +} +using Datum = neuron::container::generic_data_handle; +namespace neuron::cache { +struct Mechanism { + /** + * @brief Raw pointers into pointer data for use during simulation. + * + * pdata_ptr_cache contains pointers to the start of the storage for each pdata variable that is + * flattened into the pdata member of this struct, and nullptr elsewhere. Compared to using + * pdata directly this avoids exposing details such as the container used. + */ + std::vector pdata_ptr_cache{}; + std::vector> pdata{}; // raw pointers for use during simulation + std::vector> pdata_hack{}; // temporary storage used when populating pdata; + // should go away when pdata are SoA +}; +struct Thread { + /** + * @brief Offset into global Node storage for this thread. + */ + std::size_t node_data_offset{}; + /** + * @brief Offsets into global mechanism storage for this thread (one per mechanism) + */ + std::vector mechanism_offset{}; +}; +struct Model { + std::vector thread{}; + std::vector mechanism{}; +}; +extern std::optional model; +} // namespace neuron::cache +namespace neuron::container { +cache::ModelMemoryUsage memory_usage(const std::optional& model); +cache::ModelMemoryUsage memory_usage(const neuron::cache::Model& model); +} // namespace neuron::container diff --git a/src/neuron/container/data_handle.hpp b/src/neuron/container/data_handle.hpp new file mode 100644 index 0000000000..013654dae1 --- /dev/null +++ b/src/neuron/container/data_handle.hpp @@ -0,0 +1,395 @@ +#pragma once +#include "backtrace_utils.h" +#include "neuron/container/non_owning_soa_identifier.hpp" +#include "neuron/model_data_fwd.hpp" + +#include +#include + +namespace neuron::container { +struct do_not_search_t {}; +inline constexpr do_not_search_t do_not_search{}; + +namespace detail { +// 3rd argument ensures the no-op implementation has lower precedence than the one that prints val. +// This implementation avoids passing a T through ... in the no-output-operator case, which is +// important if T is a non-trivial type without an output operator. +template +std::ostream& print_value_impl(std::ostream& os, T const&, ...) { + return os; +} +template +auto print_value_impl(std::ostream& os, T const& val, std::nullptr_t) -> decltype(os << val) { + return os << " val=" << val; +} +template +std::ostream& print_value(std::ostream& os, T const& val) { + return print_value_impl(os, val, nullptr); +} +} // namespace detail + +/** @brief Stable handle to a generic value. + * + * Without this type one can already hold a Node::handle `foo` and call + * something like `foo.v()` to get that Node's voltage in a way that is stable + * against permutations of the underlying data. The data_handle concept is + * intended to be used if we want to erase the detail that the quantity is a + * voltage, or that it belongs to a Node, and simply treat it as a + * floating-point value that we may want to dereference later -- essentially a + * substitute for double*. + * + * Implementation: like Node::handle we can store a std::size_t* that we + * dereference to find out either: + * - the current offset in the underlying container, or + * - that the object being referred to (e.g. a Node) no longer exists + * + * Assuming nothing has been invalidated, we have an offset, a type, and the + * fundamental assumption behind all of this that the underlying data are + * contiguous -- we "just" need to know the address of the start of the + * underlying storage vector without adding specific type information (like + * Node) to this class. The simplest way of doing this is to assume that the + * underlying storage is always std::vector (or a custom allocator that is + * always the same type in neuron::container::*). Note that storing T* or + * span would not work if the underlying storage is reallocated. + * + * @todo Const correctness -- data_handle should be like span: + * data_handle can read + write the value, data_handle + * can only read the value. const applied to the data_handle itself should just + * control whether or not it can be rebound to refer elsewhere. + */ +template +struct data_handle { + data_handle() = default; + + /** @brief Construct a data_handle from a plain pointer. + */ + explicit data_handle(T* raw_ptr) { + // Null pointer -> null handle. + if (!raw_ptr) { + return; + } + // First see if we can find a neuron::container that contains the current + // value of `raw_ptr` and promote it into a container/handle pair. This is + // ugly and inefficient; you should prefer using the other constructor. + auto needle = utils::find_data_handle(raw_ptr); + if (needle) { + *this = std::move(needle); + } else { + // If that didn't work, just save the plain pointer value. This is unsafe + // and should be removed. It is purely meant as an intermediate step, if + // you use it then the guarantees above will be broken. + m_container_or_raw_ptr = raw_ptr; + } + } + + /** + * @brief Create a data_handle wrapping the raw T* + * + * Unlike the constructor taking T*, this does *not* attempt to promote raw pointers to modern + * data_handles. + */ + data_handle(do_not_search_t, T* raw_ptr) + : m_container_or_raw_ptr{raw_ptr} {} + + /** + * @brief Get a data handle to a different element of the same array variable. + * + * Given an array variable a[N], this method allows a handle to a[i] to yield a handle to a[j] + * within the same logical row. If the handle is wrapping a raw pointer T*, the shift is applied + * to that raw pointer. + */ + [[nodiscard]] data_handle next_array_element(int shift = 1) const { + if (refers_to_a_modern_data_structure()) { + int const new_array_index{m_array_index + shift}; + if (new_array_index < 0 || new_array_index >= m_array_dim) { + std::ostringstream oss; + oss << *this << " next_array_element(" << shift << "): out of range"; + throw std::runtime_error(oss.str()); + } + return {m_offset, + static_cast(m_container_or_raw_ptr), + m_array_dim, + new_array_index}; + } else { + return {do_not_search, static_cast(m_container_or_raw_ptr) + shift}; + } + } + + /** + * @brief Query whether this data handle is in "modern" mode. + * @return true if the handle was created as a permutation-stable handle to an soa<...> data + * structure, otherwise false. + * + * Note that this does *not* mean that the handle is still valid. The referred-to row and/or + * column may have gone away in the meantime. + */ + [[nodiscard]] bool refers_to_a_modern_data_structure() const { + return bool{m_offset} || m_offset.was_once_valid(); + } + + // TODO a const-ness cleanup. It should be possible to get + // data_handle from a view into a frozen container, even though it + // isn't possible to get std::vector& from a frozen container. And + // data_handle should forbid writing to the data value. + data_handle(non_owning_identifier_without_container offset, + T* const* container, + int array_dim, + int array_index) + : m_offset{std::move(offset)} + , m_container_or_raw_ptr{const_cast(container)} + , m_array_dim{array_dim} + , m_array_index{array_index} {} + + [[nodiscard]] explicit operator bool() const { + if (bool{m_offset}) { + // valid, modern identifier (i.e. row is valid) + return container_data(); // also check if the column is valid + } else if (m_offset.was_once_valid()) { + // once-valid, modern. no longer valid + return false; + } else { + // null or raw pointer + return m_container_or_raw_ptr; + } + } + + /** Query whether this generic handle points to a value from the `Tag` field + * of the given container. + */ + template + [[nodiscard]] bool refers_to(Container const& container) const { + static_assert(Container::template has_tag_v); + if (bool{m_offset} || m_offset.was_once_valid()) { + // basically in modern mode (possibly the entry we refer to has + // died) + return container.template is_storage_pointer(container_data()); + } else { + // raw-ptr mode or null + return false; + } + } + + /** + * @brief Get the current logical row number. + */ + [[nodiscard]] std::size_t current_row() const { + assert(refers_to_a_modern_data_structure()); + assert(m_offset); + return m_offset.current_row(); + } + + private: + // Try and cover the different operator* and operator T* cases with/without + // const in a more composable way + [[nodiscard]] T* raw_ptr() { + return static_cast(m_container_or_raw_ptr); + } + [[nodiscard]] T const* raw_ptr() const { + return static_cast(m_container_or_raw_ptr); + } + [[nodiscard]] T* container_data() { + return *static_cast(m_container_or_raw_ptr); + } + [[nodiscard]] T const* container_data() const { + return *static_cast(m_container_or_raw_ptr); + } + template + [[nodiscard]] static auto get_ptr_helper(This& this_ref) { + if (this_ref.m_offset.has_always_been_null()) { + // null or raw pointer + return this_ref.raw_ptr(); + } + if (this_ref.m_offset) { + // valid, modern mode *identifier*; i.e. we know what offset into a vector we're + // supposed to be looking at. It's also possible that the vector doesn't exist anymore, + // either because the whole soa<...> container was deleted, or because an optional field + // was toggled off in an existing soa<...> container. In that case, the base pointer + // will be null. + if (auto* const base_ptr = this_ref.container_data(); base_ptr) { + // the array still exists + return base_ptr + this_ref.m_array_dim * this_ref.m_offset.current_row() + + this_ref.m_array_index; + } + // the vector doesn't exist anymore => return nullptr + return decltype(this_ref.raw_ptr()){nullptr}; + } + // no longer valid, modern mode + return decltype(this_ref.raw_ptr()){nullptr}; + } + + public: + [[nodiscard]] T& operator*() { + auto* const ptr = get_ptr_helper(*this); + if (ptr) { + return *ptr; + } else { + std::ostringstream oss; + oss << *this << " attempt to dereference [T& operator*]"; + throw std::runtime_error(oss.str()); + } + } + + [[nodiscard]] T const& operator*() const { + auto* const ptr = get_ptr_helper(*this); + if (ptr) { + return *ptr; + } else { + std::ostringstream oss; + oss << *this << " attempt to dereference [T const& operator*]"; + throw std::runtime_error(oss.str()); + } + } + + [[nodiscard]] explicit operator T*() { + return get_ptr_helper(*this); + } + + [[nodiscard]] explicit operator T const *() const { + return get_ptr_helper(*this); + } + + friend std::ostream& operator<<(std::ostream& os, data_handle const& dh) { + os << "data_handle<" << cxx_demangle(typeid(T).name()) << ">{"; + if (auto const valid = dh.m_offset; valid || dh.m_offset.was_once_valid()) { + auto* const container_data = dh.container_data(); + if (auto const maybe_info = utils::find_container_info(container_data)) { + if (!maybe_info->container().empty()) { + os << "cont=" << maybe_info->container() << ' '; + } + // the printout will show the logical row number, but we have the physical size. + // these are different in case of array variables. convert the size to a logical + // one, but add some printout showing what we did + auto size = maybe_info->size(); + assert(dh.m_array_dim >= 1); + assert(dh.m_array_index < dh.m_array_dim); + assert(size % dh.m_array_dim == 0); + size /= dh.m_array_dim; + os << maybe_info->field(); + if (dh.m_array_dim > 1) { + os << '[' << dh.m_array_index << '/' << dh.m_array_dim << ']'; + } + os << ' ' << dh.m_offset << '/' << size; + } else { + os << "cont=" << (container_data ? "unknown " : "deleted ") << dh.m_offset + << "/unknown"; + } + // print the value if it exists and has an output operator + if (valid) { + // if the referred-to *column* was deleted but the referred-to *row* is still valid, + // valid == true but ptr == nullptr. + if (auto* const ptr = get_ptr_helper(dh); ptr) { + detail::print_value(os, *ptr); + } + } + } else if (dh.m_container_or_raw_ptr) { + os << "raw=" << dh.m_container_or_raw_ptr; + } else { + os << dh.m_offset; + } + return os << '}'; + } + + // TODO should a "modern" handle that has become invalid compare equal to a + // null handle that was never valid? Perhaps yes, as both evaluate to + // boolean false, but their string representations are different. + [[nodiscard]] friend bool operator==(data_handle const& lhs, data_handle const& rhs) { + return lhs.m_offset == rhs.m_offset && + lhs.m_container_or_raw_ptr == rhs.m_container_or_raw_ptr && + lhs.m_array_dim == rhs.m_array_dim && lhs.m_array_index == rhs.m_array_index; + } + + [[nodiscard]] friend bool operator!=(data_handle const& lhs, data_handle const& rhs) { + return !(lhs == rhs); + } + + /** + * @brief Get the identifier used by this handle. + * + * This is likely to only be useful for the (hopefully temporary) method + * neuron::container::notify_when_handle_dies. + */ + [[nodiscard]] non_owning_identifier_without_container identifier() const { + return m_offset; + } + + private: + friend struct generic_data_handle; + friend struct std::hash; + non_owning_identifier_without_container m_offset{}; // basically std::size_t* + // If m_offset is/was valid for a modern container, this is a pointer to a value containing the + // start of the underlying contiguous storage (i.e. the return value of std::vector::data()) + // otherwise it is possibly-null T* + void* m_container_or_raw_ptr{}; + // These are needed for "modern" handles to array variables, where the offset + // yielded by m_offset needs to be scaled/shifted by an array dimension/index + // before being applied to m_container_or_raw_ptr + int m_array_dim{1}, m_array_index{}; +}; + +/** + * @brief Explicit specialisation data_handle. + * + * This is convenient as it allows void* to be stored in generic_data_handle. + * The "modern style" data handles that hold a reference to a container and a way of determining an + * offset into that container do not make sense with a void value type, so this only supports the + * "legacy" mode where a data handle wraps a plain pointer. + */ +template <> +struct data_handle { + data_handle() = default; + data_handle(void* raw_ptr) + : m_raw_ptr{raw_ptr} {} + data_handle(do_not_search_t, void* raw_ptr) + : m_raw_ptr{raw_ptr} {} + [[nodiscard]] bool refers_to_a_modern_data_structure() const { + return false; + } + explicit operator bool() const { + return m_raw_ptr; + } + explicit operator void*() { + return m_raw_ptr; + } + explicit operator void const *() const { + return m_raw_ptr; + } + friend std::ostream& operator<<(std::ostream& os, data_handle const& dh) { + return os << "data_handle{raw=" << dh.m_raw_ptr << '}'; + } + friend bool operator==(data_handle const& lhs, data_handle const& rhs) { + return lhs.m_raw_ptr == rhs.m_raw_ptr; + } + + private: + friend struct generic_data_handle; + friend struct std::hash>; + void* m_raw_ptr; +}; + +} // namespace neuron::container + +// Enable data_handle as a key type in std::unordered_map +template +struct std::hash> { + std::size_t operator()(neuron::container::data_handle const& s) const noexcept { + static_assert(sizeof(std::size_t) == sizeof(T const*)); + if (s.m_offset || s.m_offset.was_once_valid()) { + // The hash should not include the current row number, but rather the + // std::size_t* that is dereferenced to *get* the current row number, + // and which container this generic value lives in. + return std::hash{}( + s.m_offset) ^ + reinterpret_cast(s.m_container_or_raw_ptr); + } else { + return reinterpret_cast(s.m_container_or_raw_ptr); + } + } +}; + +template <> +struct std::hash> { + std::size_t operator()(neuron::container::data_handle const& s) const noexcept { + static_assert(sizeof(std::size_t) == sizeof(void*)); + return reinterpret_cast(s.m_raw_ptr); + } +}; diff --git a/src/neuron/container/generic_data_handle.hpp b/src/neuron/container/generic_data_handle.hpp new file mode 100644 index 0000000000..218ba8f332 --- /dev/null +++ b/src/neuron/container/generic_data_handle.hpp @@ -0,0 +1,318 @@ +#pragma once +#include "backtrace_utils.h" +#include "neuron/container/data_handle.hpp" +#include "neuron/container/non_owning_soa_identifier.hpp" + +#include +#include +#include + +namespace neuron::container { +/** + * @brief Non-template stable handle to a generic value. + * + * This is a type-erased version of data_handle, with the additional feature + * that it can store values of POD types no larger than a pointer (typically int + * and float). It stores (at runtime) the type of the value it contains, and is + * therefore type-safe, but this increases sizeof(generic_data_handle) by 50% so + * it may be prudent to view this as useful for validation/debugging but not + * something to become too dependent on. + * + * There are several states that instances of this class can be in: + * - null, no value is contained, any type can be assigned without any type + * mismatch error (m_type=null, m_container=null, m_offset=null) + * - wrapping an instance of a small, trivial type T (m_type=&typeid(T), + * m_container=the_value, m_offset=null) + * - wrapping a data handle (m_type=&typeid(T*), m_container=the_container, + * m_offset=ptr_to_row) + * + * @todo Consider whether this should be made more like std::any (with a maximum + * 2*sizeof(void*) and a promise never to allocate memory dynamically) so + * it actually has a data_handle subobject. Presumably that would mean + * data_handle would need to have a trivial destructor. This might make + * it harder in future to have some vector_of_generic_data_handle type + * that hoists out the pointer-to-container and typeid parts that should + * be the same for all rows. + */ +struct generic_data_handle { + private: + // The exact criteria could be refined, it is definitely not possible to + // store types with non-trivial destructors. + template + static constexpr bool can_be_stored_literally_v = + std::is_trivial_v && !std::is_pointer_v && sizeof(T) <= sizeof(void*); + + public: + /** @brief Construct a null data handle. + */ + generic_data_handle() = default; + + /** @brief Construct a null data handle. + */ + generic_data_handle(std::nullptr_t) {} + + /** + * @brief Construct a generic data handle that holds a small literal value. + * + * This is explicit to avoid things like operator<<(ostream&, generic_data_handle const&) being + * considered when printing values like size_t. + */ + template , int> = 0> + explicit generic_data_handle(T value) + : m_type{&typeid(T)} { + std::memcpy(&m_container, &value, sizeof(T)); + } + + /** + * @brief Assign a small literal value to a generic data handle. + * + * This is important when generic_data_handle is used as the Datum type for "pointer" variables + * in MOD files. + */ + template , int> = 0> + generic_data_handle& operator=(T value) { + return *this = generic_data_handle{value}; + } + + /** + * @brief Store a pointer inside this generic data handle. + * + * Explicit handling of pointer types (with redirection via data_handle) ensures that + * `some_generic_handle = some_ptr_to_T` promotes the raw `T*` to a modern `data_handle` that + * is stable to permutation. + */ + template , int> = 0> + generic_data_handle& operator=(T value) { + return *this = generic_data_handle{data_handle>{value}}; + } + + template + generic_data_handle(do_not_search_t dns, T* raw_ptr) + : generic_data_handle{data_handle{dns, raw_ptr}} {} + + /** + * @brief Wrap a data_handle in a generic data handle. + * + * Note that data_handle is always wrapping a raw void* and is never + * in "modern" mode + */ + template , int> = 0> + generic_data_handle(data_handle const& handle) + : m_container{handle.m_raw_ptr} + , m_type{&typeid(T*)} { + static_assert(std::is_same_v); + } + + /** + * @brief Wrap a data_handle in a generic data handle. + */ + template , int> = 0> + generic_data_handle(data_handle const& handle) + : m_offset{handle.m_offset} + , m_container{handle.m_container_or_raw_ptr} + , m_type{&typeid(T*)} + , m_array_dim{handle.m_array_dim} + , m_array_index{handle.m_array_index} { + static_assert(!std::is_same_v); + } + + /** + * @brief Create data_handle from a generic data handle. + * + * The conversion will succeed, yielding a null data_handle, if the + * generic data handle is null. If the generic data handle is not null then + * the conversion will succeed if the generic data handle actually holds a + * data_handle or a literal T*. + * + * It might be interesting in future to explore dropping m_type in optimised + * builds, in which case we should aim to avoid predicating important logic + * on exceptions thrown by this function. + */ + template + explicit operator data_handle() const { + // Either the type has to match or the generic_data_handle needs to have + // never been given a type + if (!m_type) { + // A (typeless / default-constructed) null generic_data_handle can + // be converted to any (null) data_handle. + return {}; + } + if (typeid(T*) != *m_type) { + throw_error(" cannot be converted to data_handle<" + cxx_demangle(typeid(T).name()) + + ">"); + } + if (m_offset.has_always_been_null()) { + // This is a data handle in backwards-compatibility mode, wrapping a + // raw pointer of type T*, or a null handle that has always been null (as opposed to a + // handle that became null). Passing do_not_search prevents the data_handle + // constructor from trying to find the raw pointer in the NEURON data structures. + return {do_not_search, static_cast(m_container)}; + } + // Nothing after here is reachable with T=void, as data_handle is always either null + // or in backwards-compatibility mode. the branch structure has been chosen to try and + // minimise compiler warnings and maximise reported coverage... + if constexpr (!std::is_same_v) { + if (!m_offset.was_once_valid()) { + // A real and still-valid data handle. This cannot be instantiated with T=void + // because data_handle does not have a 4-argument constructor. + assert(m_container); + return {m_offset, static_cast(m_container), m_array_dim, m_array_index}; + } + } + // Reaching here should mean T != void && was_once_valid == true, i.e. this used to be a + // valid data handle, but it has since been invalidated. Invalid data handles never become + // valid again, so we can safely produce a "fully null" data_handle. + return {}; + } + + /** @brief Explicit conversion to any T. + * + * It might be interesting in future to explore dropping m_type in + * optimised builds, in which case we should aim to avoid predicating + * important logic on exceptions thrown by this function. + * + * Something like static_cast(generic_handle) will work both if + * the Datum holds a literal double* and if it holds a data_handle. + * + * @todo Consider conversion to bool and whether this means not-null or to + * obtain a literal, wrapped bool value + */ + template + T get() const { + if constexpr (std::is_pointer_v) { + // If T=U* (i.e. T is a pointer type) then we might be in modern + // mode, go via data_handle + return static_cast(static_cast>>(*this)); + } else { + // Getting a literal value saved in m_container + static_assert(can_be_stored_literally_v, + "generic_data_handle can only hold non-pointer types that are trivial " + "and smaller than a pointer"); + if (!m_offset.has_always_been_null()) { + throw_error(" conversion to " + cxx_demangle(typeid(T).name()) + + " not possible for a handle [that was] in modern mode"); + } + if (typeid(T) != *m_type) { + throw_error(" does not hold a literal value of type " + + cxx_demangle(typeid(T).name())); + } + T ret{}; + std::memcpy(&ret, &m_container, sizeof(T)); + return ret; + } + } + + // Defined elsewhere to optimise compile times. + friend std::ostream& operator<<(std::ostream& os, generic_data_handle const& dh); + + /** @brief Check if this handle refers to the specific type. + * + * This could be a small, trivial type (e.g. T=int) or a pointer type (e.g. + * T=double*). holds() == true could either indicate that a + * data_handle is held or that a literal double* is held. + * + * It might be interesting in future to explore dropping m_type in + * optimised builds, in which case we should aim to avoid predicating + * important logic on this function. + */ + template + [[nodiscard]] bool holds() const { + return m_type && typeid(T) == *m_type; + } + + /** @brief Check if this handle contains a data_handle or just a literal. + * + * This should match + * static_cast>(generic_handle).refers_to_a_modern_data_structure() + * if T is correct. This will return true if we hold a data_handle that + * refers to a since-deleted row. + */ + [[nodiscard]] bool refers_to_a_modern_data_structure() const { + return !m_offset.has_always_been_null(); + } + + /** @brief Return the demangled name of the type this handle refers to. + * + * If the handle contains data_handle, this will be T*. If a literal + * value or raw pointer is being wrapped, that possibly-pointer type will + * be returned. + * + * It might be interesting in future to explore dropping m_type in + * optimised builds, in which case we should aim to avoid predicating + * important logic on this function. + */ + [[nodiscard]] std::string type_name() const { + return m_type ? cxx_demangle(m_type->name()) : "typeless_null"; + } + + /** @brief Obtain a reference to the literal value held by this handle. + * + * Storing literal values is incompatible with storing data_handle. If + * the handle stores data_handle then calling this method throws an + * exception. If the handle is null, this sets the stored type to be T and + * returns a reference to it. If the handle already holds a literal value + * of type T then a reference to it is returned. + * + * Note that, unlike converting to double*, literal_value() will + * fail if the handle contains data_handle, as in that case there + * is no persistent double* that could be referred to. + * + * It might be interesting in future to explore dropping m_type in + * optimised builds, in which case we should aim to avoid predicating + * important logic on exceptions thrown by this function. + */ + template + [[nodiscard]] T& literal_value() { + if (!m_offset.has_always_been_null()) { + throw_error("::literal_value<" + cxx_demangle(typeid(T).name()) + + "> cannot be called on a handle [that was] in modern mode"); + } else { + // This is a data handle in backwards-compatibility mode, wrapping a + // raw pointer, or a null data handle. Using raw_ptr() on a typeless_null + // (default-constructed) handle turns it into a legacy handle-to-T. + if (!m_type) { + m_type = &typeid(T); + } else if (typeid(T) != *m_type) { + throw_error(" does not hold a literal value of type " + + cxx_demangle(typeid(T).name())); + } + return *reinterpret_cast(&m_container); + } + } + + private: + [[noreturn]] void throw_error(std::string message) const { + std::ostringstream oss{}; + oss << *this << std::move(message); + throw std::runtime_error(std::move(oss).str()); + } + // Offset into the underlying storage container. If this handle is holding a + // literal value, such as a raw pointer, then this will be null. + non_owning_identifier_without_container m_offset{}; + // T* const* for the T encoded in m_type if m_offset is non-null, + // otherwise a literal value is stored in this space. + void* m_container{}; + // Pointer to typeid(T) for the wrapped type + std::type_info const* m_type{}; + // Extra information required for data_handle to point at array variables + int m_array_dim{1}, m_array_index{}; +}; + +namespace utils { +namespace detail { +/** + * @brief Try and promote a generic_data_handle wrapping a raw pointer. + * + * If the raw pointer can be found in the model data structures, a "modern" permutation-stable + * handle to it will be returned. If it cannot be found, a null generic_data_handle will be + * returned. + */ +[[nodiscard]] generic_data_handle promote_or_clear(generic_data_handle); +} // namespace detail +// forward declared in model_data_fwd.hpp +template +[[nodiscard]] data_handle find_data_handle(T* ptr) { + return static_cast>(detail::promote_or_clear({do_not_search, ptr})); +} +} // namespace utils +} // namespace neuron::container diff --git a/src/neuron/container/mechanism.hpp b/src/neuron/container/mechanism.hpp new file mode 100644 index 0000000000..5f92e32be0 --- /dev/null +++ b/src/neuron/container/mechanism.hpp @@ -0,0 +1,135 @@ +#pragma once +#include "neuron/container/data_handle.hpp" +#include "neuron/container/view_utils.hpp" + +#include +#include +#include +#include + +namespace neuron::container::Mechanism { +struct Variable { + std::string name{}; + int array_size{1}; +}; +namespace field { +/** @brief Catch-all for floating point per-instance variables in the MOD file. + * + * @todo Update the code generation so we get some hh_data = soa type instead of fudging things this way. + */ +struct FloatingPoint { + FloatingPoint(std::vector var_info) + : m_var_info{std::move(var_info)} {} + /** + * @brief How many copies of this column should be created? + * + * Typically this is the number of different RANGE variables in a mechanism. + */ + [[nodiscard]] std::size_t num_variables() const { + return m_var_info.size(); + } + + [[nodiscard]] Variable const& info(std::size_t i) const { + return m_var_info.at(i); + } + + /** + * @brief What is the array dimension of the i-th copy of this column? + * + * Typically this is 1 for a normal RANGE variable and larger for an array RANGE variable. + */ + [[nodiscard]] int array_dimension(int i) const { + return info(i).array_size; + } + + [[nodiscard]] const char* name(int i) const { + return info(i).name.c_str(); + } + + using type = double; + + private: + std::vector m_var_info{}; +}; +} // namespace field + +/** + * @brief Base class defining the public API of Mechanism handles. + * @tparam Identifier The concrete owning/non-owning identifier type. + * + * This allows the same struct-like accessors (v(), ...) to be + * used on all of the different types of objects that represent a single Node: + * - owning_handle: stable over permutations of underlying data, manages + * lifetime of a row in the underlying storage. Only null when in moved-from + * state. + * - handle: stable over permutations of underlying data, produces runtime error + * if it is dereferenced after the corresponding owning_handle has gone out of + * scope. Can be null. + */ +template +struct handle_interface: handle_base { + using base_type = handle_base; + using base_type::base_type; + + /** + * @brief Return the number of floating point fields accessible via fpfield. + */ + [[nodiscard]] int num_fpfields() const { + return this->template get_tag().num_variables(); + } + + /** + * @brief Get the sum of array dimensions of floating point fields. + */ + [[nodiscard]] int fpfields_size() const { + auto* const prefix_sums = + this->underlying_storage().template get_array_dim_prefix_sums(); + auto const num_fields = num_fpfields(); + return prefix_sums[num_fields - 1]; + } + + /** + * @brief Return the array size of the given field. + */ + [[nodiscard]] int fpfield_dimension(int field_index) const { + if (auto const num_fields = num_fpfields(); field_index >= num_fields) { + throw std::runtime_error("fpfield #" + std::to_string(field_index) + " out of range (" + + std::to_string(num_fields) + ")"); + } + auto* const array_dims = + this->underlying_storage().template get_array_dims(); + return array_dims[field_index]; + } + + [[nodiscard]] field::FloatingPoint::type& fpfield(int field_index, int array_index = 0) { + return this->underlying_storage().fpfield(this->current_row(), field_index, array_index); + } + + [[nodiscard]] field::FloatingPoint::type const& fpfield(int field_index, + int array_index = 0) const { + return this->underlying_storage().fpfield(this->current_row(), field_index, array_index); + } + + /** @brief Return a data_handle to a floating point value. + */ + [[nodiscard]] data_handle fpfield_handle(int field_index, + int array_index = 0) { + return this->underlying_storage().fpfield_handle(this->id(), field_index, array_index); + } + + friend std::ostream& operator<<(std::ostream& os, handle_interface const& handle) { + os << handle.underlying_storage().name() << '{' << handle.id() << '/' + << handle.underlying_storage().size(); + auto const num = handle.num_fpfields(); + for (auto i = 0; i < num; ++i) { + os << " fpfield[" << i << "]{"; + for (auto j = 0; j < handle.fpfield_dimension(i); ++j) { + os << " " << handle.fpfield(i, j); + } + os << " }"; + } + return os << '}'; + } +}; +} // namespace neuron::container::Mechanism diff --git a/src/neuron/container/mechanism_data.hpp b/src/neuron/container/mechanism_data.hpp new file mode 100644 index 0000000000..89fb7e13fc --- /dev/null +++ b/src/neuron/container/mechanism_data.hpp @@ -0,0 +1,59 @@ +#pragma once +#include "neuron/container/mechanism.hpp" +#include "neuron/container/soa_container.hpp" + +#include + +namespace neuron::container::Mechanism { +/** + * @brief Underlying storage for all instances of a particular Mechanism. + * + * To mitigate Python wheel ABI issues, a basic set of methods are defined in .cpp code that is + * compiled as part of NEURON. + */ +struct storage: soa { + using base_type = soa; + // Defined in .cpp to avoid instantiating base_type constructors too often. + storage(short mech_type, std::string name, std::vector floating_point_fields = {}); + /** + * @brief Access floating point values. + */ + [[nodiscard]] double& fpfield(std::size_t instance, int field, int array_index = 0); + /** + * @brief Access floating point values. + */ + [[nodiscard]] double const& fpfield(std::size_t instance, int field, int array_index = 0) const; + /** + * @brief Access floating point values. + */ + [[nodiscard]] data_handle fpfield_handle(non_owning_identifier_without_container id, + int field, + int array_index = 0); + /** + * @brief The name of this mechanism. + */ + [[nodiscard]] std::string_view name() const; + /** + * @brief The type of this mechanism. + */ + [[nodiscard]] short type() const; + /** + * @brief Pretty printing for mechanism data structures. + */ + friend std::ostream& operator<<(std::ostream& os, storage const& data); + + private: + short m_mech_type{}; + std::string m_mech_name{}; +}; + +/** + * @brief Non-owning handle to a Mechanism instance. + */ +using handle = handle_interface>; + +/** + * @brief Owning handle to a Mechanism instance. + */ +using owning_handle = handle_interface>; +} // namespace neuron::container::Mechanism diff --git a/src/neuron/container/memory_usage.hpp b/src/neuron/container/memory_usage.hpp new file mode 100644 index 0000000000..33c3cfa5db --- /dev/null +++ b/src/neuron/container/memory_usage.hpp @@ -0,0 +1,226 @@ +#pragma once + +#include +#include +#include +#include +#include + +namespace neuron::container { +/** @brief Size and capacity in bytes. */ +struct VectorMemoryUsage { + VectorMemoryUsage() = default; + VectorMemoryUsage(size_t size, size_t capacity) + : size(size) + , capacity(capacity) {} + + /** Compute the memory requirements of the `std::vector`. + * + * Note, this returns the size and capacity of the memory allocated by + * the `std::vector`. If the element allocate memory, that memory + * isn't included. + * + * Essentially, + * + * size = vec.size() * sizeof(T); + * capacity = vec.capacity() * sizeof(T); + */ + template + VectorMemoryUsage(const std::vector& vec) + : size(vec.size() * sizeof(T)) + , capacity(vec.capacity() * sizeof(T)) {} + + /// @brief Number of bytes used. + size_t size{}; + + /// @brief Number of bytes allocated. + size_t capacity{}; + + const VectorMemoryUsage& operator+=(const VectorMemoryUsage& other) { + size += other.size; + capacity += other.capacity; + + return *this; + } +}; + + +/** @brief Memory usage of a storage/soa container. */ +struct StorageMemoryUsage { + /// @brief The memory usage of the heavy data in a soa. + VectorMemoryUsage heavy_data{}; + + /// @brief The memory usage for the stable identifiers in a soa. + VectorMemoryUsage stable_identifiers{}; + + const StorageMemoryUsage& operator+=(const StorageMemoryUsage& other) { + heavy_data += other.heavy_data; + stable_identifiers += other.stable_identifiers; + + return *this; + } + + VectorMemoryUsage compute_total() const { + auto total = heavy_data; + total += stable_identifiers; + + return total; + } +}; + +/** @brief Memory usage of a `neuron::Model`. */ +struct ModelMemoryUsage { + /// @brief The memory usage of the nodes related data. + StorageMemoryUsage nodes{}; + + /// @brief The memory usage of all mechanisms. + StorageMemoryUsage mechanisms{}; + + const ModelMemoryUsage& operator+=(const ModelMemoryUsage& other) { + nodes += other.nodes; + mechanisms += other.mechanisms; + + return *this; + } + + VectorMemoryUsage compute_total() const { + auto total = nodes.compute_total(); + total += mechanisms.compute_total(); + + return total; + } +}; + +namespace cache { +/** @brief Memory usage of a `neuron::cache::Model`. */ +struct ModelMemoryUsage { + /** @brief Memory usage required for NRN threads. */ + VectorMemoryUsage threads{}; + + /** @brief Memory usage related to caching mechanisms. */ + VectorMemoryUsage mechanisms{}; + + const ModelMemoryUsage& operator+=(const ModelMemoryUsage& other) { + threads += other.threads; + mechanisms += other.mechanisms; + + return *this; + } + + VectorMemoryUsage compute_total() const { + auto total = threads; + total += mechanisms; + + return total; + } +}; +} // namespace cache + +/** @brief Overall SoA datastructures related memory usage. */ +struct MemoryUsage { + ModelMemoryUsage model{}; + cache::ModelMemoryUsage cache_model{}; + VectorMemoryUsage stable_pointers{}; + + const MemoryUsage& operator+=(const MemoryUsage& other) { + model += other.model; + cache_model += other.cache_model; + stable_pointers += other.stable_pointers; + + return *this; + } + + VectorMemoryUsage compute_total() const { + auto total = model.compute_total(); + total += cache_model.compute_total(); + total += stable_pointers; + + return total; + } +}; + +struct MemoryUsageSummary { + /** @brief Data that are part of the algorithm. */ + size_t required{}; + + /** @brief Any memory that's (currently) required to run NEURON. + * + * This includes things like the live stable identifiers in each `soa`, the + * `cache::Model` and similar things that are needed to implement NEURON + * correctly, but are not required by the simulation. + * + * This category covers memory that needed to solve a computer science + * problem rather than a neuroscience problem. Hence, this category + * could potentially be optimized. It's not obvious how much this category + * can be optimized. + */ + size_t convenient{}; + + /** @brief Wasted memory due to the difference of `size` and `capacity`. */ + size_t oversized{}; + + /** @brief Essentially leaked memory. + * + * The current implementation doesn't know when it's safe to delete stable + * identifiers. Hence, when the owning identifier is deallocated the stable + * identifier is kept alive and leaked into a global collector. + */ + size_t leaked{}; + + MemoryUsageSummary(const MemoryUsage& memory_usage) { + add(memory_usage.model); + add(memory_usage.cache_model); + add(leaked, memory_usage.stable_pointers); + } + + private: + void add(size_t& accumulator, const VectorMemoryUsage& increment) { + oversized += increment.capacity - increment.size; + accumulator += increment.size; + } + + void add(const StorageMemoryUsage& increment) { + add(required, increment.heavy_data); + add(convenient, increment.stable_identifiers); + } + + void add(const ModelMemoryUsage& model) { + add(model.nodes); + add(model.mechanisms); + } + + void add(const cache::ModelMemoryUsage& model) { + add(convenient, model.mechanisms); + add(convenient, model.threads); + } +}; + +/** @brief */ +struct MemoryStats { + MemoryUsage total; +}; + +/** @brief Gather memory usage of this process. */ +MemoryUsage local_memory_usage(); + +/** @brief Utility to format memory as a human readable string. + * + * Note, this is currently tailored to it's use in `format_memory_usage` + * and is therefore very rigid in it's padding. Generalize when needed. + * + * @internal + */ +std::string format_memory(size_t bytes); + +/** @brief Aligned, human readable representation of `memory_usage`. + * + * @internal + */ +std::string format_memory_usage(const VectorMemoryUsage& memory_usage); + +/** @brief Create a string representation of `usage`. */ +std::string format_memory_usage(const MemoryUsage& usage); + +void print_memory_usage(const MemoryUsage& usage); + +} // namespace neuron::container diff --git a/src/neuron/container/node.hpp b/src/neuron/container/node.hpp new file mode 100644 index 0000000000..8d7c30e2a1 --- /dev/null +++ b/src/neuron/container/node.hpp @@ -0,0 +1,256 @@ +#pragma once +#include "membdef.h" // DEF_vrest +#include "neuron/container/data_handle.hpp" +#include "neuron/container/view_utils.hpp" + +#include +#include +#include +#include + +namespace neuron::container::Node { +namespace field { + +/** + * @brief Above-diagonal element in node equation. + */ +struct AboveDiagonal { + using type = double; +}; + +/** + * @brief Area in um^2 but see treeset.cpp. + */ +struct Area { + using type = double; + constexpr type default_value() const { + return 100.; + } +}; + +/** + * @brief Below-diagonal element in node equation. + */ +struct BelowDiagonal { + using type = double; +}; + +/** @brief Membrane potential. + */ +struct Voltage { + using type = double; + constexpr type default_value() const { + return DEF_vrest; + } +}; + +/** + * @brief Diagonal element in node equation. + */ +struct Diagonal { + using type = double; +}; + +/** + * @brief Field for fast_imem calculation. + */ +struct FastIMemSavD { + static constexpr bool optional = true; + using type = double; +}; + +/** + * @brief Field for fast_imem calculation. + */ +struct FastIMemSavRHS { + static constexpr bool optional = true; + using type = double; +}; + +struct RHS { + using type = double; +}; + +} // namespace field + +/** + * @brief Base class defining the public API of Node handles. + * @tparam Identifier The concrete owning/non-owning identifier type. + * + * This allows the same struct-like accessors (v(), ...) to be + * used on all of the different types of objects that represent a single Node: + * - owning_handle: stable over permutations of underlying data, manages + * lifetime of a row in the underlying storage. Only null when in moved-from + * state. + * - handle: stable over permutations of underlying data, produces runtime error + * if it is dereferenced after the corresponding owning_handle has gone out of + * scope. Can be null. + */ +template +struct handle_interface: handle_base { + using base_type = handle_base; + using base_type::base_type; + /** + * @brief Return the above-diagonal element. + */ + [[nodiscard]] field::AboveDiagonal::type& a() { + return this->template get(); + } + + /** + * @brief Return the above-diagonal element. + */ + [[nodiscard]] field::AboveDiagonal::type const& a() const { + return this->template get(); + } + + /** @brief Return the area. + */ + [[nodiscard]] field::Area::type& area() { + return this->template get(); + } + + /** @brief Return the area. + */ + [[nodiscard]] field::Area::type const& area() const { + return this->template get(); + } + + /** + * @brief This is a workaround for area sometimes being a macro. + * @todo Remove those macros once and for all. + */ + [[nodiscard]] field::Area::type& area_hack() { + return area(); + } + + /** + * @brief This is a workaround for area sometimes being a macro. + * @todo Remove those macros once and for all. + */ + [[nodiscard]] field::Area::type const& area_hack() const { + return area(); + } + + /** @brief Return a data_handle to the area. + */ + [[nodiscard]] data_handle area_handle() { + return this->template get_handle(); + } + + /** + * @brief Return the below-diagonal element. + */ + [[nodiscard]] field::BelowDiagonal::type& b() { + return this->template get(); + } + + /** + * @brief Return the below-diagonal element. + */ + [[nodiscard]] field::BelowDiagonal::type const& b() const { + return this->template get(); + } + + /** + * @brief Return the diagonal element. + */ + [[nodiscard]] field::Diagonal::type& d() { + return this->template get(); + } + + /** + * @brief Return the diagonal element. + */ + [[nodiscard]] field::Diagonal::type const& d() const { + return this->template get(); + } + + /** + * @brief Return the membrane potential. + */ + [[nodiscard]] field::Voltage::type& v() { + return this->template get(); + } + + /** + * @brief Return the membrane potential. + */ + [[nodiscard]] field::Voltage::type const& v() const { + return this->template get(); + } + + /** + * @brief Return a handle to the membrane potential. + */ + [[nodiscard]] data_handle v_handle() { + return this->template get_handle(); + } + + /** + * @brief This is a workaround for v sometimes being a macro. + * @todo Remove those macros once and for all. + */ + [[nodiscard]] field::Voltage::type& v_hack() { + return v(); + } + + /** + * @brief This is a workaround for v sometimes being a macro. + * @todo Remove those macros once and for all. + */ + [[nodiscard]] field::Voltage::type const& v_hack() const { + return v(); + } + + /** + * @brief Return the right hand side of the Hines solver. + */ + [[nodiscard]] field::RHS::type& rhs() { + return this->template get(); + } + + /** + * @brief Return the right hand side of the Hines solver. + */ + [[nodiscard]] field::RHS::type const& rhs() const { + return this->template get(); + } + + /** + * @brief Return a handle to the right hand side of the Hines solver. + */ + [[nodiscard]] data_handle rhs_handle() { + return this->template get_handle(); + } + + [[nodiscard]] field::FastIMemSavRHS::type& sav_d() { + return this->template get(); + } + + [[nodiscard]] field::FastIMemSavRHS::type const& sav_d() const { + return this->template get(); + } + + [[nodiscard]] field::FastIMemSavRHS::type& sav_rhs() { + return this->template get(); + } + + [[nodiscard]] field::FastIMemSavRHS::type const& sav_rhs() const { + return this->template get(); + } + [[nodiscard]] data_handle sav_rhs_handle() { + return this->template get_handle(); + } + + friend std::ostream& operator<<(std::ostream& os, handle_interface const& handle) { + if (handle.id()) { + return os << "Node{" << handle.id() << '/' << handle.underlying_storage().size() + << " v=" << handle.v() << " area=" << handle.area() << " a=" << handle.a() + << " b=" << handle.b() << " d=" << handle.d() << '}'; + } else { + return os << "Node{null}"; + } + } +}; +} // namespace neuron::container::Node diff --git a/src/neuron/container/node_data.hpp b/src/neuron/container/node_data.hpp new file mode 100644 index 0000000000..ac53aa5269 --- /dev/null +++ b/src/neuron/container/node_data.hpp @@ -0,0 +1,41 @@ +#pragma once +#include "membdef.h" +#include "neuron/container/node.hpp" +#include "neuron/container/soa_container.hpp" + +namespace neuron::container::Node { +/** @brief Underlying storage for all Nodes. + */ +struct storage: soa { + [[nodiscard]] std::string_view name() const { + return {}; + } +}; + +/** + * @brief Non-owning handle to a Node. + */ +using handle = handle_interface>; + +/** + * @brief Owning handle to a Node. + */ +struct owning_handle: handle_interface> { + using base_type = handle_interface>; + using base_type::base_type; + /** + * @brief Get a non-owning handle from an owning handle. + */ + [[nodiscard]] handle non_owning_handle() { + return non_owning_identifier{&underlying_storage(), id()}; + } +}; +} // namespace neuron::container::Node diff --git a/src/neuron/container/non_owning_soa_identifier.hpp b/src/neuron/container/non_owning_soa_identifier.hpp new file mode 100644 index 0000000000..18722269b1 --- /dev/null +++ b/src/neuron/container/non_owning_soa_identifier.hpp @@ -0,0 +1,142 @@ +#pragma once +#include +#include +#include +#include +#include +#include + +namespace neuron::container { +/** + * @brief Struct used to index SoAoS data, such as array range variables. + */ +struct field_index { + int field{}, array_index{}; +}; + +inline constexpr std::size_t invalid_row = std::numeric_limits::max(); + +/** + * @brief A non-owning permutation-stable identifier for an entry in a container. + * + * The container type is not specified. This is essentially a wrapper for + * std::size_t* that avoids using that naked type in too many places. + */ +struct non_owning_identifier_without_container { + /** + * @brief Create a null identifier. + */ + non_owning_identifier_without_container() = default; + + non_owning_identifier_without_container(const non_owning_identifier_without_container& other) = + default; + non_owning_identifier_without_container(non_owning_identifier_without_container&& other) = + default; + + non_owning_identifier_without_container& operator=( + const non_owning_identifier_without_container&) = default; + non_owning_identifier_without_container& operator=(non_owning_identifier_without_container&&) = + default; + + ~non_owning_identifier_without_container() = default; + + + /** + * @brief Does the identifier refer to a valid entry? + * + * The row can be invalid because the identifier was always null, or it can + * have become invalid if the relevant entry was deleted after the + * identifier was created. + */ + [[nodiscard]] explicit operator bool() const { + return m_ptr && (*m_ptr != invalid_row); + } + + /** + * @brief What is the current row? + * + * The returned value is invalidated by any deletions from the underlying + * container, and by any permutations of the underlying container. + */ + [[nodiscard]] std::size_t current_row() const { + assert(m_ptr); + auto const value = *m_ptr; + assert(value != invalid_row); + return value; + } + + /** + * @brief Did the identifier use to refer to a valid entry? + */ + [[nodiscard]] bool was_once_valid() const { + return m_ptr && *m_ptr == invalid_row; + } + + /** + * @brief Has the identifier always been null. + * + * has_always_been_null() --> "null" + * !has_always_been_null && was_once_valid --> "died" + * !has_always_been_null && !was_once_valid --> "row=X" + */ + [[nodiscard]] bool has_always_been_null() const { + return !m_ptr; + } + + friend std::ostream& operator<<(std::ostream& os, + non_owning_identifier_without_container const& id) { + if (!id.m_ptr) { + return os << "null"; + } else if (*id.m_ptr == invalid_row) { + return os << "died"; + } else { + return os << "row=" << *id.m_ptr; + } + } + + /** @brief Test if two handles are both null or refer to the same valid row. + */ + friend bool operator==(non_owning_identifier_without_container lhs, + non_owning_identifier_without_container rhs) { + return lhs.m_ptr == rhs.m_ptr || (!lhs && !rhs); + } + + friend bool operator!=(non_owning_identifier_without_container lhs, + non_owning_identifier_without_container rhs) { + return !(lhs == rhs); + } + + friend bool operator<(non_owning_identifier_without_container lhs, + non_owning_identifier_without_container rhs) { + return lhs.m_ptr < rhs.m_ptr; + } + + protected: + // Needed to convert owning_identifier to non_owning_identifier + template + friend struct owning_identifier; + + template + friend struct soa; + friend struct std::hash; + explicit non_owning_identifier_without_container(std::shared_ptr ptr) + : m_ptr{std::move(ptr)} {} + void set_current_row(std::size_t row) { + assert(m_ptr); + *m_ptr = row; + } + + explicit non_owning_identifier_without_container(size_t row) + : m_ptr(std::make_shared(row)) {} + + private: + std::shared_ptr m_ptr{}; +}; +} // namespace neuron::container +template <> +struct std::hash { + std::size_t operator()( + neuron::container::non_owning_identifier_without_container const& h) noexcept { + return reinterpret_cast(h.m_ptr.get()); + } +}; diff --git a/src/neuron/container/soa_container.hpp b/src/neuron/container/soa_container.hpp new file mode 100644 index 0000000000..08591c9b32 --- /dev/null +++ b/src/neuron/container/soa_container.hpp @@ -0,0 +1,1624 @@ +#pragma once +#include "backtrace_utils.h" +#include "memory_usage.hpp" +#include "neuron/container/data_handle.hpp" +#include "neuron/container/generic_data_handle.hpp" +#include "neuron/container/soa_identifier.hpp" + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace neuron::container { +namespace detail { +template +inline constexpr bool type_in_pack_v = std::disjunction_v...>; +// https://stackoverflow.com/a/67106430 +template +inline constexpr bool are_types_unique_v = + (!std::is_same_v && ...) && are_types_unique_v; +template +inline constexpr bool are_types_unique_v = true; +// https://stackoverflow.com/a/18063608 +template +struct index_of_type_helper; +template +struct index_of_type_helper> { + static constexpr std::size_t value = 0; +}; +template +struct index_of_type_helper> { + static constexpr std::size_t value = 1 + index_of_type_helper>::value; +}; +template +inline constexpr std::size_t index_of_type_v = []() { + constexpr bool Ts_are_unique = are_types_unique_v; + constexpr bool T_is_in_Ts = type_in_pack_v; + static_assert(Ts_are_unique, + "index_of_type_v assumes there are no duplicates in Ts..."); + static_assert(T_is_in_Ts, "index_of_type_v assumes that T occurs in Ts..."); + // make the error message better by avoiding instantiating index_of_type_helper if the + // assertions fail + if constexpr (Ts_are_unique && T_is_in_Ts) { + return index_of_type_helper>::value; + } else { + return std::numeric_limits::max(); // unreachable without hitting + // static_assert + } +}(); + +// Detect if a type T has a non-static member function called default_value +template +inline constexpr bool has_default_value_v = false; +template +inline constexpr bool + has_default_value_v().default_value())>> = true; + +// Get the array dimension for a given field within a given tag, or 1 if the array_dimension +// function is not defined in the tag type +template +auto get_array_dimension(T const& t, std::nullptr_t /* higher precedence than the fallback case */) + -> decltype(t.array_dimension(), 0) { + return t.array_dimension(); +} +template +auto get_array_dimension(T const& t, int i) -> decltype(t.array_dimension(i), 0) { + return t.array_dimension(i); +} +template +int get_array_dimension(T const&, ...) { + return 1; +} + +// Detect if a type T has a non-static member function called num_variables(). +template +struct has_num_variables: std::false_type {}; +template +struct has_num_variables().num_variables())>> + : std::true_type {}; +template +inline constexpr bool has_num_variables_v = has_num_variables::value; + +template +size_t get_num_variables(T const& t) { + if constexpr (has_num_variables_v) { + return t.num_variables(); + } else { + return 1; + } +} + +// Get the value of a static member variable called optional, or false if it doesn't exist. +template +struct optional: std::false_type {}; +template +struct optional> { + constexpr static bool value = T::optional; +}; +template +inline constexpr bool optional_v = optional::value; + +enum struct FieldImplementation { + AlwaysSingle, // field always exists -> std::vector + OptionalSingle, // field exists 0 or 1 time -> std::vector that might be skipped + RuntimeVariable // field is duplicated a number of times that is set at runtime -> + // std::vector> +}; + +template +inline constexpr FieldImplementation field_impl_v = + (has_num_variables_v ? FieldImplementation::RuntimeVariable + : (optional_v ? FieldImplementation::OptionalSingle + : FieldImplementation::AlwaysSingle)); + +// Get a name for a given field within a given tag +template +auto get_name_impl(Tag const& tag, int field_index, std::nullptr_t) + -> decltype(static_cast(tag.name(field_index)), std::string()) { + return tag.name(field_index); +} + +template +std::string get_name_impl(Tag const& tag, int field_index, ...) { + auto ret = cxx_demangle(typeid(Tag).name()); + if (field_index >= 0) { + ret.append(1, '#'); + ret.append(std::to_string(field_index)); + } + constexpr std::string_view prefix{"neuron::container::"}; + if (std::string_view{ret}.substr(0, prefix.size()) == prefix) { + ret.erase(0, prefix.size()); + } + return ret; +} + +/** + * @brief Get the nicest available name for the field_index-th instance of Tag. + * + * This should elegantly handle field_index == -1 (=> the tag doesn't get have num_variables()) and + * field_index being out of range. + */ +template +auto get_name(Tag const& tag, int field_index) { + if constexpr (has_num_variables_v) { + if (field_index >= 0 && field_index < tag.num_variables()) { + // use tag.name(field_index) if it's available, otherwise fall back + return get_name_impl(tag, field_index, nullptr); + } + } + // no num_variables() or invalid field_index, use the fallback + return get_name_impl(tag, field_index, 1 /* does not match nullptr */); +} + +struct index_column_tag { + using type = non_owning_identifier_without_container; +}; + +/** + * @brief Check if the given range is a permutation of the first N integers. + * @return true if the permutation is trivial, false otherwise. + * + * A trivial permutation is one where i == range[i] for all i. An empty range + * is classed as trivial. + */ +template +bool check_permutation_vector(Rng const& range, std::size_t size) { + if (range.size() != size) { + throw std::runtime_error("invalid permutation vector: wrong size"); + } + bool trivial{true}; + std::vector seen(size, false); + for (auto i = 0ul; i < size; ++i) { + auto const val = range[i]; + trivial = trivial && (i == val); + if (!(val >= 0 && val < size)) { + throw std::runtime_error("invalid permutation vector: value out of range"); + } + if (seen[val]) { + throw std::runtime_error("invalid permutation vector: repeated value " + + std::to_string(val)); + } + seen[val] = true; + } + return trivial; +} + +enum struct may_cause_reallocation { Yes, No }; + +/** @brief Defer deleting pointers to deallocated memory. + * + * The address of a pointer to the underlying storage of `field_data` can + * escape the container. When deallocating the container the memory is + * deallocated but the pointer to the storage location is "leaked" into this + * vector. + */ +extern std::vector* defer_delete_storage; +VectorMemoryUsage compute_defer_delete_storage_size(); + +/** + * @brief Storage for safe deletion of soa<...> containers. + * + * This is intended to prevent deleting an instance of a soa<...>-based container from invalidating + * any existing data handles, by keeping certain (small) values alive. Deleting these containers is + * probably not common (e.g. deleting a mechanism type), and only small bookkeeping-related values + * have to be kept alive. Generally defer_delete_storage is non-null for the lifetime of the top + * level Model structure, and the Model destructor deallocates (using delete[]) the pointers that + * are stored inside defer_delete_storage. + */ +template +void defer_delete(std::unique_ptr data) { + static_assert(std::is_trivially_destructible_v, "defer_delete does not call destructors"); + if (data && defer_delete_storage) { + defer_delete_storage->push_back(data.release()); + } +} + +template +struct field_data { + static_assert(impl == FieldImplementation::AlwaysSingle || + impl == FieldImplementation::OptionalSingle); + using data_type = typename Tag::type; + static_assert(!has_num_variables_v); + field_data(Tag tag) + : m_tag{std::move(tag)} + , m_array_dim{get_array_dimension(m_tag)} { + if constexpr (impl == FieldImplementation::AlwaysSingle) { + m_data_ptr = std::make_unique(1); + } + } + + ~field_data() { + // An unknown number of data_handle in the wild may be holding references to m_data_ptr + defer_delete(std::move(m_data_ptr)); + } + + /** + * @brief Return a reference to the tag instance. + */ + Tag const& tag() const { + return m_tag; + } + + template + Callable for_each_vector(Callable callable) { + if constexpr (impl == FieldImplementation::OptionalSingle) { + if (!m_data_ptr) { + // inactive, optional field + return callable; + } + } + callable(m_tag, m_storage, -1, m_array_dim); + if constexpr (might_reallocate == may_cause_reallocation::Yes) { + m_data_ptr[0] = m_storage.data(); + } + + return callable; + } + + template + Callable for_each_vector(Callable callable) const { + if constexpr (impl == FieldImplementation::OptionalSingle) { + if (!m_data_ptr) { + // inactive, optional field + return callable; + } + } + callable(m_tag, m_storage, -1, m_array_dim); + + return callable; + } + + [[nodiscard]] bool active() const { + static_assert(impl == FieldImplementation::OptionalSingle); + return bool{m_data_ptr}; + } + + void set_active(bool enable, std::size_t size) { + static_assert(impl == FieldImplementation::OptionalSingle); + if (enable == active()) { + return; + } + if (enable) { + // make sure the storage is allocated + the right size + full of default values + assert(m_storage.empty()); // it should be starting off empty + if constexpr (has_default_value_v) { + m_storage.resize(size * m_array_dim, m_tag.default_value()); + } else { + m_storage.resize(size * m_array_dim); + } + m_data_ptr = std::make_unique(1); + m_data_ptr[0] = m_storage.data(); + } else { + // clear + free the storage + m_storage.clear(); + m_storage.shrink_to_fit(); + // data handles may be holding pointers to m_data_ptr (which is the reason for the + // deferred deletion); signal to them that they are no longer valid by writing nullptr + // here + m_data_ptr[0] = nullptr; + defer_delete(std::move(m_data_ptr)); + } + } + + [[nodiscard]] data_type* const* data_ptrs() const { + return m_data_ptr.get(); + } + + [[nodiscard]] int const* array_dims() const { + return &m_array_dim; + } + + [[nodiscard]] int const* array_dim_prefix_sums() const { + return &m_array_dim; + } + + private: + /** + * @brief Tag type instance. + * + * An instance of @c soa contains an instance of @c field_data for each tag type in its @c + * Tags... pack. The instance of the tag type contains the metadata about the field it + * represents, and @c field_data adds the actual data for that field. For example, with @c Tag = + * @c Node::field::Voltage, which represents the voltage in a given Node, @c m_tag is just an + * empty type that defines the @c data_type and default value of voltages. + */ + Tag m_tag; + + /** + * @brief Storage for the data associated with @c Tag. + * + * This is one of the "large" data arrays holding the model data. Because this specialisation of + * @c field_data is for @c Tag types that @b don't have @c num_variables() members, such as @c + * Node::field::Voltage, there is exactly one vector per instance of @c field_data. Because the + * fields in @c Node::storage all have array dimension 1, in that case the size of this vector + * is the number of Node instances in the program. + */ + std::vector m_storage; + + /** + * @brief Storage where we maintain an up-to-date cache of @c m_storage.data(). + * @invariant @c m_storage.data() is equal to @c m_data_ptr. + * @see field_data::m_data_ptrs %for the motivation. + * + * This is declared as an array (of size 1) to simplify the implementation of defer_delete. + * For FieldImplementation::OptionalSingle then whether or not this is null encodes whether + * or not the field is active. For FieldImplementation::AlwaysSingle it is never null. + */ + std::unique_ptr m_data_ptr; + + /** + * @brief Array dimension of the data associated with @c Tag. + * @invariant @c m_array_dim is equal to @c m_tag.array_dimension(), if that function exists, + * or 1. + * @see field_data::m_array_dims %for the motivation. + */ + int m_array_dim; +}; + +/** + * @brief Storage manager for a tag type that implements num_variables(). + * + * An illustrative example is that this is responsible for the storage associated with floating + * point mechanism data, where the number of fields is set at runtime via num_variables. + * + * As well as owning the actual storage containers, this type maintains two spans of values that + * can be used by other types, in particular neuron::cache::MechanismRange: + * - array_dims() returns a pointer to the first element of a num_variables()-sized range holding + * the array dimensions of the variables. + * - array_dim_prefix_sums() returns a pointer to the first element of a num_variables()-sized + * range holding the prefix sum over the array dimensions (i.e. if array_dims() returns [1, 2, 1] + * then array_dim_prefix_sums() returns [1, 3, 4]). + * - data_ptrs() returns a pointer to the first element of a num_variables()-sized range holding + * pointers to the start of the storage associated with each variable (i.e. the result of calling + * data() on the underlying vector). + * + * This is a helper type for use by neuron::container::soa and it should not be used directly. + */ +template +struct field_data { + using data_type = typename Tag::type; + static_assert(has_num_variables_v); + field_data(Tag tag) + : m_tag{std::move(tag)} + , m_storage{m_tag.num_variables()} + , m_data_ptrs{std::make_unique(m_tag.num_variables())} { + update_data_ptr_storage(); + auto const num = m_tag.num_variables(); + m_array_dims.reserve(num); + m_array_dim_prefix_sums.reserve(num); + for (auto i = 0; i < m_tag.num_variables(); ++i) { + m_array_dims.push_back(get_array_dimension(m_tag, i)); + m_array_dim_prefix_sums.push_back( + (m_array_dim_prefix_sums.empty() ? 0 : m_array_dim_prefix_sums.back()) + + m_array_dims.back()); + } + } + + ~field_data() { + // An unknown number of data_handle in the wild may be holding references to m_data_ptrs + defer_delete(std::move(m_data_ptrs)); + } + + /** + * @brief Return a reference to the tag instance. + */ + Tag const& tag() const { + return m_tag; + } + + /** + * @brief Return a pointer to an array of array dimensions for this tag. + * + * This avoids indirection via the tag type instances. Because array dimensions are not + * permitted to change, this is guaranteed to remain valid as long as the underlying soa<...> + * container does. This is mainly intended for use in neuron::cache::MechanismRange and friends. + */ + [[nodiscard]] int const* array_dims() const { + return m_array_dims.data(); + } + + /** + * @brief Return a pointer to an array of the prefix sum of array dimensions for this tag. + */ + [[nodiscard]] int const* array_dim_prefix_sums() const { + return m_array_dim_prefix_sums.data(); + } + + [[nodiscard]] int check_array_dim(int field_index, int array_index) const { + assert(field_index >= 0); + assert(array_index >= 0); + if (auto const num_fields = m_tag.num_variables(); field_index >= num_fields) { + throw std::runtime_error(get_name(m_tag, field_index) + "/" + + std::to_string(num_fields) + ": out of range"); + } + auto const array_dim = m_array_dims[field_index]; + if (array_index >= array_dim) { + throw std::runtime_error(get_name(m_tag, field_index) + ": index " + + std::to_string(array_index) + " out of range"); + } + return array_dim; + } + + /** + * @brief Return a pointer to an array of data pointers for this tag. + * + * This array is guaranteed to be kept up to date when the actual storage is re-allocated. + * This is mainly intended for use in neuron::cache::MechanismRange and friends. + */ + [[nodiscard]] data_type* const* data_ptrs() const { + return m_data_ptrs.get(); + } + + /** + * @brief Invoke the given callable for each vector. + * + * @tparam might_reallocate Might the callable cause reallocation of the vector it is given? + * @param callable A callable to invoke. + */ + template + Callable for_each_vector(Callable callable) { + for (auto i = 0; i < m_storage.size(); ++i) { + callable(m_tag, m_storage[i], i, m_array_dims[i]); + } + if constexpr (might_reallocate == may_cause_reallocation::Yes) { + update_data_ptr_storage(); + } + + return callable; + } + + /** + * @brief Invoke the given callable for each vector. + * + * @param callable A callable to invoke. + */ + template + Callable for_each_vector(Callable callable) const { + for (auto i = 0; i < m_storage.size(); ++i) { + callable(m_tag, m_storage[i], i, m_array_dims[i]); + } + + return callable; + } + + // TODO actually use this + // TODO use array_dim_prefix_sums + [[nodiscard]] field_index translate_legacy_index(int legacy_index) const { + int total{}; + auto const num_fields = m_tag.num_variables(); + for (auto field = 0; field < num_fields; ++field) { + auto const array_dim = m_array_dims[field]; + if (legacy_index < total + array_dim) { + auto const array_index = legacy_index - total; + return {field, array_index}; + } + total += array_dim; + } + throw std::runtime_error("could not translate legacy index " + + std::to_string(legacy_index)); + } + + private: + void update_data_ptr_storage() { + std::transform(m_storage.begin(), m_storage.end(), m_data_ptrs.get(), [](auto& vec) { + return vec.data(); + }); + } + /** + * @brief Tag type instance. + * + * An instance of @c soa contains an instance of @c field_data for each tag type in its @c + * Tags... pack. The instance of the tag type contains the metadata about the field it + * represents, and @c field_data adds the actual data for that field. For example, with @c Tag = + * @c Mechanism::field::FloatingPoint, which represents RANGE variables in mechanisms, @c m_tag + * holds the names and array dimensions of the RANGE variables. + */ + Tag m_tag; + + /** + * @brief Storage for the data associated with @c Tag. + * + * These are the "large" data arrays holding the model data. Because this specialisation of @c + * field_data is for @c Tag types that @b do have @c num_variables() members, such as @c + * Mechanism::field::FloatingPoint, there is an outer vector with this dimension. + * + * @invariant @c m_storage.size() is equal to @c m_tag.num_variables() + * + * For Mechanism data, this size is equal to the number of RANGE variables, while + * @c m_storage[i].size() is (assuming an array dimension of 1) the number of instances (in this + * case of the given Mechanism type) that exist in the program. + */ + std::vector> m_storage; + + /** + * @brief Storage where we maintain an up-to-date cache of .data() pointers from m_storage. + * @invariant @c m_data_ptrs contains @c m_storage.size() elements + * @invariant @c m_storage[i].data() is equal to @c m_data_ptrs[i] for all @c i. + * + * This is useful because it allows @c data_handle to store something like @c T** instead of + * having to store something like @c std::vector*, which avoids hardcoding unnecessary + * details about the allocator and so on, and allows @c cache::MechanismRange to similarly have + * a C-like interface. Because @c data_handle remembers references to this, we cannot free + * it when the container is destroyed (e.g. when a mechanism type is deleted). + */ + std::unique_ptr m_data_ptrs; + + /** + * @brief Array dimensions of the data associated with @c Tag. + * @invariant @c m_storage.size() is equal to @c m_array_dims.size() + * @invariant @c m_array_dims[i] is equal to @c m_tag.array_dimension(i), if that function + * exists, or otherwise 1, for all @c i + * + * Similar to @c m_data_ptrs, this allows the array dimensions to be communicated simply across + * a C-like interface. + */ + std::vector m_array_dims; + + /** + * @brief Prefix sum over array dimensions for the data associated with @c Tag. + * @invariant @c m_storage.size() is equal to @c m_array_dim_prefix_sums.size() + * @invariant @c m_array_dim_prefix_sums[i] is equal to the sum of @c m_array_dims[0] .. @c + * m_array_dims[i] for all @c i. + * @todo This could be used to more efficiently convert legacy indices. + * + * This is mainly useful for logic that aids the transition from AoS to SoAoS format in NEURON. + * For example, the size of the old @c _p vectors in NEURON was @c + * m_array_dim_prefix_sums.back(), the sum over all array dimensions, which is generally larger + * than @c m_tag.num_variables(). + */ + std::vector m_array_dim_prefix_sums; +}; + +struct storage_info_impl: utils::storage_info { + std::string_view container() const override { + return m_container; + } + std::string_view field() const override { + return m_field; + } + std::size_t size() const override { + return m_size; + } + std::string m_container{}, m_field{}; + std::size_t m_size{}; +}; + +class AccumulateMemoryUsage { + public: + void operator()(detail::index_column_tag const& indices, + std::vector const& vec, + int field_index, + int array_dim) { + auto element_size = sizeof(detail::index_column_tag::type); + + m_usage.stable_identifiers.size = vec.size() * (sizeof(vec[0]) + sizeof(size_t*)); + m_usage.stable_identifiers.capacity = m_usage.stable_identifiers.size + + (vec.capacity() - vec.size()) * sizeof(vec[0]); + } + + template + void operator()(Tag const& tag, + std::vector const& vec, + int field_index, + int array_dim) { + m_usage.heavy_data += VectorMemoryUsage(vec); + } + + StorageMemoryUsage usage() { + return m_usage; + } + + private: + StorageMemoryUsage m_usage; +}; + + +} // namespace detail + +/** + * @brief Token whose lifetime manages the frozen state of a container. + * + * Because this cannot be defaulted constructed or moved, it cannot reach an + * empty/invalid state. + */ +template +struct state_token { + /** + * @brief Copy a token, incrementing the frozen count of the underlying container. + */ + constexpr state_token(state_token const& other) + : m_container{other.m_container} { + assert(m_container); + m_container->increase_frozen_count(); + } + + /** + * @brief Copy assignment. + * + * Explicitly deleted to avoid an implicit version with the wrong semantics. + */ + constexpr state_token& operator=(state_token const&) = delete; + + /** + * @brief Destroy a token, decrementing the frozen count of the underlying container. + */ + ~state_token() { + assert(m_container); + m_container->decrease_frozen_count(); + } + + private: + template + friend struct soa; + constexpr state_token(Container& container) + : m_container{&container} {} + Container* m_container{}; +}; + +/** + * @brief Utility for generating SOA data structures. + * @headerfile neuron/container/soa_container.hpp + * @tparam Storage Name of the actual storage type derived from soa<...>. + * @tparam Tags Parameter pack of tag types that define the columns + * included in the container. Types may not be repeated. + * + * This CRTP base class is used to implement the ~global SOA storage structs + * that hold (so far) Node and Mechanism data. Ownership of rows in these + * structs is managed via instances of the owning identifier type @ref + * neuron::container::owning_identifier instantiated with Storage, and + * non-owning reference to rows in the data structures are managed via instances + * of the @ref neuron::container::non_owning_identifier template instantiated + * with Storage. These identifiers are typically wrapped in a + * data-structure-specific (i.e. Node- or Mechanism-specific) interface type + * that provides data-structure-specific accessors and methods to obtain actual + * data values and more generic handle types such as @ref + * neuron::container::data_handle and @ref + * neuron::container::generic_data_handle. + */ +template +struct soa { + /** + * @brief Construct with default-constructed tag type instances. + */ + soa() + : soa(Tags{}...) {} + + /** + * @brief Construct with specific tag instances. + * + * This is useful if the tag types are not empty, for example if the number + * of times a column is duplicated is a runtime value. + */ + soa(Tags... tag_instances) + : m_data{std::move(tag_instances)...} {} + + /** + * @brief @ref soa is not movable + * + * This is to make it harder to accidentally invalidate pointers-to-storage + * in handles. + */ + soa(soa&&) = delete; + + /** + * @brief @ref soa is not copiable + * + * This is partly to make it harder to accidentally invalidate + * pointers-to-storage in handles, and partly because it could be very + * expensive so it might be better to be more explicit. + */ + soa(soa const&) = delete; + + /** + * @brief @ref soa is not move assignable + * + * For the same reason it isn't movable. + */ + soa& operator=(soa&&) = delete; + + /** + * @brief @ref soa is not copy assignable + * + * For the same reasons it isn't copy constructible + */ + soa& operator=(soa const&) = delete; + + /** + * @brief Get the size of the container. + * + * Note that this is not thread-safe if the container is not frozen, i.e. + * you should either hold a token showing the container is frozen, or you + * should ensure that no non-const operations on this container are being + * executed concurrently. + */ + [[nodiscard]] std::size_t size() const { + // Check our various std::vector members are still the same size as each + // other. This check could be omitted in release builds... + auto const check_size = m_indices.size(); + for_each_vector( + [check_size](auto const& tag, auto const& vec, int field_index, int array_dim) { + auto const size = vec.size(); + assert(size % array_dim == 0); + assert(size / array_dim == check_size); + }); + return check_size; + } + + /** + * @brief Test if the container is empty. + * + * Note that this is not thread-safe if the container is not frozen, i.e. + * you should either hold a token showing the container is frozen, or you + * should ensure that no non-const operations on this container are being + * executed concurrently. + */ + [[nodiscard]] bool empty() const { + auto const result = m_indices.empty(); + for_each_vector([result](auto const& tag, auto const& vec, int field_index, int array_dim) { + assert(vec.empty() == result); + }); + return result; + } + + void shrink_to_fit() { + if (m_frozen_count) { + throw_error("shrink() called on a frozen structure"); + } + for_each_vector( + [](auto const& tag, auto& vec, int field_index, int array_dim) { + vec.shrink_to_fit(); + }); + } + + private: + /** + * @brief Remove the @f$i^{\text{th}}@f$ row from the container. + * + * This is currently implemented by swapping the last element into position + * @f$i@f$ (if those are not the same element) and reducing the size by one. + * Iterators to the last element and the deleted element will be + * invalidated. + */ + void erase(std::size_t i) { + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + if (m_frozen_count) { + throw_error("erase() called on a frozen structure"); + } + mark_as_unsorted_impl(); + auto const old_size = size(); + assert(i < old_size); + if (i != old_size - 1) { + // Swap ranges of size array_dim at logical positions `i` and `old_size - 1` in each + // vector + for_each_vector( + [i](auto const& tag, auto& vec, int field_index, int array_dim) { + ::std::swap_ranges(::std::next(vec.begin(), i * array_dim), + ::std::next(vec.begin(), (i + 1) * array_dim), + ::std::prev(vec.end(), array_dim)); + }); + // Tell the new entry at `i` that its index is `i` now. + m_indices[i].set_current_row(i); + } + for_each_vector( + [new_size = old_size - 1](auto const& tag, auto& vec, int field_index, int array_dim) { + vec.resize(new_size * array_dim); + }); + } + + friend struct state_token; + friend struct owning_identifier; + + static_assert(detail::are_types_unique_v, "All tag types should be unique"); + template + static constexpr std::size_t tag_index_v = detail::index_of_type_v; + + /** + * @brief Apply the given function to non-const versions of all vectors. + * + * The callable has a signature compatible with: + * + * void callable(const Tag& tag, + * std::vector& v, + * int field_index, + * int array_dim) + * + * where `array_dim` is the array dimensions of the field `field_index`. + * + * @tparam might_reallocate Might the callable trigger reallocation of the vectors? + * @param callable Callable to invoke on each vector. + * + * If might_allocate is true then the "cached" values of .data() for each vector will be + * updated. + */ + template + Callable for_each_vector(Callable callable) { + // might_reallocate is not relevant for m_indices because we do not expose the location of + // its storage, so it doesn't matter whether or not this triggers reallocation + callable(detail::index_column_tag{}, m_indices, -1, 1); + + return for_each_tag_vector_impl(callable); + } + + template + Callable for_each_tag_vector_impl(Callable callable) { + auto tmp_callable = + std::get>(m_data).template for_each_vector(callable); + return for_each_tag_vector_impl(tmp_callable); + } + + template + Callable for_each_tag_vector_impl(Callable callable) { + return callable; + } + + /** + * @brief Apply the given function to const-qualified versions of all vectors. + * + * The callable has a signature compatible with: + * + * void callable(const Tag& tag, + * const std::vector& v, + * int field_index, + * int array_dim) + * + * where `array_dim` is the array dimensions of the field `field_index`. + * + * Because of the const qualification this cannot cause reallocation and trigger updates of + * pointers inside m_data, so no might_reallocate parameter is needed. + */ + template + Callable for_each_vector(Callable callable) const { + callable(detail::index_column_tag{}, m_indices, -1, 1); + return for_each_tag_vector_impl(callable); + } + + template + Callable for_each_tag_vector_impl(Callable callable) const { + Callable tmp_callable = std::get>(m_data).template for_each_vector( + callable); + return for_each_tag_vector_impl(tmp_callable); + } + + template + Callable for_each_tag_vector_impl(Callable callable) const { + return callable; + } + + + /** + * @brief Record that a state_token was copied. + * + * This should only be called from the copy constructor of a state_token, + * so m_frozen_count should already be non-zero. + */ + void increase_frozen_count() { + std::lock_guard _{m_mut}; + assert(m_frozen_count); + ++m_frozen_count; + } + + /** + * @brief Flag that the storage is no longer frozen. + * + * This is called from the destructor of state_token. + */ + void decrease_frozen_count() { + std::lock_guard _{m_mut}; + assert(m_frozen_count); + --m_frozen_count; + } + + public: + /** + * @brief Return type of issue_frozen_token() + */ + using frozen_token_type = state_token; + + /** + * @brief Create a token guaranteeing the container is in "frozen" state. + * + * This does *not* modify the "sorted" flag on the container. + * + * The token type is copy constructible but not default constructible. + * There is no need to check if a given instance of the token type is + * "valid"; if a token is held then the container is guaranteed to be + * frozen. + * + * The tokens returned by this function are reference counted; the + * container will be frozen for as long as any token is alive. + * + * Methods such as apply_reverse_permutation() take a non-const reference + * to one of these tokens. This is because a non-const token referring to a + * container with a token reference count of exactly one has an elevated + * status: the holder can lend it out to methods such as + * apply_reverse_permutation() to authorize specific pointer-invaliding + * operations. This is useful for implementing methods such as + * nrn_ensure_model_data_are_sorted() in a thread-safe way. + * + * This method can be called from multiple threads, but note that doing so + * can have surprising effects w.r.t. the elevated status mentioned in the + * previous paragraph. + * + * It is user-defined precisely what "sorted" means, but the soa<...> class + * makes some guarantees: + * - if the container is frozen, no pointers to elements in the underlying + * storage will be invalidated -- attempts to do so will throw or abort. + * - if the container is not frozen, it will remain flagged as sorted until + * a potentially-pointer-invalidating operation (insertion, deletion) + * occurs, or mark_as_unsorted() is called. + * To mark a container as "sorted", apply an explicit permutation to it. + * + * Note that "frozen" refers to the storage layout, not to the stored value, + * meaning that values inside a frozen container can still be modified -- + * "frozen" is not "runtime const". + * + * @todo A future extension could be to preserve the sorted flag until + * pointers are actually, not potentially, invalidated. + */ + [[nodiscard]] frozen_token_type issue_frozen_token() { + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + // Increment the reference count, marking the container as frozen. + ++m_frozen_count; + // Return a token that calls decrease_frozen_count() at the end of its lifetime + return frozen_token_type{static_cast(*this)}; + } + + /** + * @brief Tell the container it is sorted. + * @param write_token Non-const token demonstrating the caller is the only + * token owner. + * + * The meaning of being sorted is externally defined, so we should give + * external code the opportunity to say that the current order is OK. This + * probably only makes sense if the external code simply doesn't care about + * the ordering at all for some reason. This avoids having to construct a + * trivial permutation vector to achieve the same thing. + */ + void mark_as_sorted(frozen_token_type& write_token) { + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + if (m_frozen_count != 1) { + throw_error("mark_as_sorted() given a token that was not the only valid one"); + } + m_sorted = true; + } + + /** + * @brief Tell the container it is no longer sorted. + * + * The meaning of being sorted is externally defined, and it is possible + * that some external change to an input of the (external) algorithm + * defining the sort order can mean that the data are no longer considered + * sorted, even if nothing has actually changed inside this container. + * + * This method can only be called if the container is not frozen. + */ + void mark_as_unsorted() { + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + mark_as_unsorted_impl(); + } + + /** + * @brief Set the callback that is invoked when the container becomes unsorted. + * + * This is invoked by mark_as_unsorted() and when a container operation + * (insertion, permutation, deletion) causes the container to transition + * from being sorted to being unsorted. + * + * This method is not thread-safe. + */ + void set_unsorted_callback(std::function unsorted_callback) { + m_unsorted_callback = std::move(unsorted_callback); + } + + /** + * @brief Query if the underlying vectors are still "sorted". + * + * See the documentation of issue_frozen_token() for an explanation of what + * this means. You most likely only want to call this method while holding + * a token guaranteeing that the container is frozen, and therefore that + * the sorted-status is fixed. + */ + [[nodiscard]] bool is_sorted() const { + return m_sorted; + } + + /** + * @brief Permute the SoA-format data using an arbitrary range of integers. + * @param permutation The reverse permutation vector to apply. + * @return A token guaranteeing the frozen + sorted state of the container + * after the permutation was applied. + * + * This will fail if the container is frozen. + */ + template + frozen_token_type apply_reverse_permutation(Arg&& permutation) { + auto token = issue_frozen_token(); + apply_reverse_permutation(std::forward(permutation), token); + return token; + } + + /** + * @brief Permute the SoA-format data using an arbitrary range of integers. + * @param permutation The reverse permutation vector to apply. + * @param token A non-const token demonstrating that the caller is the only + * party that is forcing the container to be frozen, and (non-const) + * that they are authorised to transfer that status into this method + */ + template + void apply_reverse_permutation(Range permutation, frozen_token_type& sorted_token) { + // Check that the given vector is a valid permutation of length size(). + // The existence of `sorted_token` means that my_size cannot become + // invalid, even though we don't hold `m_mut` yet. + std::size_t const my_size{size()}; + bool const is_trivial{detail::check_permutation_vector(permutation, my_size)}; + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + // Applying a permutation in general invalidates indices, so it is + // forbidden if the structure is frozen, and it leaves the structure + // unsorted. We therefore require that the frozen count is 1, which + // corresponds to the `sorted_token` argument to this function being + // the only active token. + if (m_frozen_count != 1) { + throw_error( + "apply_reverse_permutation() given a token that was not the only valid one"); + } + if (!is_trivial) { + // Now we apply the reverse permutation in `permutation` to all of the columns in the + // container. This is the algorithm from boost::algorithm::apply_reverse_permutation. + for (std::size_t i = 0; i < my_size; ++i) { + while (i != permutation[i]) { + using ::std::swap; + auto const next = permutation[i]; + for_each_vector( + [i, next](auto const& tag, auto& vec, auto field_index, auto array_dim) { + // swap the i-th and next-th array_dim-sized sub-ranges of vec + ::std::swap_ranges(::std::next(vec.begin(), i * array_dim), + ::std::next(vec.begin(), (i + 1) * array_dim), + ::std::next(vec.begin(), next * array_dim)); + }); + swap(permutation[i], permutation[next]); + } + } + // update the indices in the container + for (auto i = 0ul; i < my_size; ++i) { + m_indices[i].set_current_row(i); + } + // If the container was previously marked sorted, and we have just + // applied a non-trivial permutation to it, then we need to call the + // callback if it exists (to invalidate any caches based on the old + // sort order). + if (m_sorted && m_unsorted_callback) { + m_unsorted_callback(); + } + } + // In any case, applying a permutation leaves the container in sorted + // state. The caller made an explicit choice about which element should + // live where, which is as much as we can hope for. + m_sorted = true; + } + + + private: + /** + * @brief Set m_sorted = false and execute the callback. + * @note The *caller* is expected to hold m_mut when this is called. + */ + template + void mark_as_unsorted_impl() { + if (m_frozen_count) { + // Currently you can only obtain a frozen container by calling + // issue_frozen_token(), which explicitly guarantees that the + // container will remain sorted for the lifetime of the returned + // token. + throw_error("mark_as_unsorted() called on a frozen structure"); + } + // Only execute the callback if we're transitioning from sorted to + // or if this was an explicit mark_as_unsorted() call + bool const execute_callback{m_sorted || !internal}; + m_sorted = false; + if (execute_callback && m_unsorted_callback) { + m_unsorted_callback(); + } + } + + /** + * @brief Create a new entry and return an identifier that owns it. + * + * Calling this method increases size() by one. Destroying (modulo move + * operations) the returned identifier, which has the semantics of a + * unique_ptr, decreases size() by one. + * + * Note that this has different semantics to standard library container + * methods such as emplace_back(), push_back(), insert() and so on. Because + * the returned identifier manages the lifetime of the newly-created entry, + * discarding the return value will cause the new entry to immediately be + * deleted. + * + * This is a low-level call that is useful for the implementation of the + * owning_identifier template. The returned owning identifier is typically + * wrapped inside an owning handle type that adds data-structure-specific + * methods (e.g. v(), v_handle() for a Node). + */ + [[nodiscard]] owning_identifier acquire_owning_identifier() { + // Lock access to m_frozen_count and m_sorted. + std::lock_guard _{m_mut}; + if (m_frozen_count) { + throw_error("acquire_owning_identifier() called on a frozen structure"); + } + // The .emplace_back() methods we are about to call can trigger + // reallocation and, therefore, invalidation of pointers. At present, + // "sorted" is defined to mean that pointers have not been invalidated. + // There are two reasonable changes that could be made here: + // - possibly for release builds, we could only mark unsorted if a + // reallocation *actually* happens + // - "sorted" could be defined to mean that indices have not been + // invalidated -- adding a new entry to the end of the container + // never invalidates indices + mark_as_unsorted_impl(); + // Append to all of the vectors + auto const old_size = size(); + for_each_vector( + [](auto const& tag, auto& vec, auto field_index, auto array_dim) { + using Tag = ::std::decay_t; + if constexpr (detail::has_default_value_v) { + vec.insert(vec.end(), array_dim, tag.default_value()); + } else { + vec.insert(vec.end(), array_dim, {}); + } + }); + // Important that this comes after the m_frozen_count check + owning_identifier index{static_cast(*this), old_size}; + // Update the pointer-to-row-number in m_indices so it refers to the + // same thing as index + m_indices.back() = static_cast(index); + return index; + } + + public: + /** + * @brief Get a non-owning identifier to the offset-th entry. + * + * This method should only be called if either: there is only one thread, + * or if a frozen token is held. + */ + [[nodiscard]] non_owning_identifier at(std::size_t offset) const { + return {const_cast(static_cast(this)), m_indices[offset]}; + } + + /** + * @brief Get the instance of the given tag type. + * @tparam Tag The tag type, which must be a member of the @c Tags... pack. + * @return Const reference to the given tag type instance. + * + * For example, if this is called on the @c Node::storage then @c Tag would be something like @c + * Node::field::Area, @c Node::field::RHS or @c Node::field::Voltage, which are empty types that + * serve to define the default values and types of those quantities. + * + * At the time of writing the other possibility is that this is called on an instance of @c + * Mechanism::storage, in which case @c Tag must (currently) be @c + * Mechanism::field::FloatingPoint. This stores the names and array dimensions of the RANGE + * variables in the mechanism (MOD file), which are only known at runtime. + */ + template + [[nodiscard]] constexpr Tag const& get_tag() const { + return std::get>(m_data).tag(); + } + + template + static constexpr bool has_tag_v = detail::type_in_pack_v; + + /** + * @brief Get the offset-th element of the column named by Tag. + * + * Because this is returning a single value, it is permitted even when the + * container is frozen. The container being frozen means that operations + * that would invalidate iterators/pointers are forbidden, not that actual + * data values cannot change. Note that if the container is not frozen then + * care should be taken in a multi-threaded environment, as `offset` could + * be invalidated by operations performed by other threads (that would fail + * if the container were frozen). + */ + template + [[nodiscard]] typename Tag::type& get(std::size_t offset) { + static_assert(has_tag_v); + static_assert(!detail::has_num_variables_v); + auto& field_data = std::get>(m_data); + if constexpr (detail::field_impl_v == detail::FieldImplementation::OptionalSingle) { + if (!field_data.active()) { + std::lock_guard _{m_mut}; + throw_error("get(offset) called for a disabled optional field"); + } + } + return field_data.data_ptrs()[0][offset]; + } + + /** + * @brief Get the offset-th element of the column named by Tag. + * + * If the container is not frozen then care should be taken in a + * multi-threaded environment, as `offset` could be invalidated by + * operations performed by other threads (that would fail if the container + * were frozen). + */ + template + [[nodiscard]] typename Tag::type const& get(std::size_t offset) const { + static_assert(has_tag_v); + static_assert(!detail::has_num_variables_v); + auto const& field_data = std::get>(m_data); + if constexpr (detail::field_impl_v == detail::FieldImplementation::OptionalSingle) { + if (!field_data.active()) { + std::lock_guard _{m_mut}; + throw_error("get(offset) const called for a disabled optional field"); + } + } + return field_data.data_ptrs()[0][offset]; + } + + /** + * @brief Get a handle to the given element of the column named by Tag. + * + * This is not intended to be called from multi-threaded code, and might + * suffer from race conditions if the status of optional fields was being + * modified concurrently. + */ + template + [[nodiscard]] data_handle get_handle( + non_owning_identifier_without_container id, + int array_index = 0) const { + static_assert(has_tag_v); + static_assert(!detail::has_num_variables_v); + auto const& field_data = std::get>(m_data); + // If Tag is an optional field and that field is disabled, return a null handle. + if constexpr (detail::optional_v) { + if (!field_data.active()) { + return {}; + } + } + auto const array_dim = field_data.array_dims()[0]; + assert(array_dim > 0); + assert(array_index >= 0); + assert(array_index < array_dim); + return {std::move(id), field_data.data_ptrs(), array_dim, array_index}; + } + + /** + * @brief Get a handle to the given element of the field_index-th column named by Tag. + * + * This is not intended to be called from multi-threaded code. + */ + template + [[nodiscard]] data_handle get_field_instance_handle( + non_owning_identifier_without_container id, + int field_index, + int array_index = 0) const { + static_assert(has_tag_v); + static_assert(detail::has_num_variables_v); + auto const array_dim = std::get>(m_data).check_array_dim(field_index, + array_index); + return {std::move(id), + std::get>(m_data).data_ptrs() + field_index, + array_dim, + array_index}; + } + + /** + * @brief Get the offset-th element of the field_index-th instance of the column named by Tag. + * + * Put differently: + * - offset: index of a mechanism instance + * - field_index: index of a RANGE variable inside a mechanism + * - array_index: offset inside an array RANGE variable + * + * Because this is returning a single value, it is permitted even when the + * container is frozen. The container being frozen means that operations + * that would invalidate iterators/pointers are forbidden, not that actual + * data values cannot change. Note that if the container is not frozen then + * care should be taken in a multi-threaded environment, as `offset` could + * be invalidated by operations performed by other threads (that would fail + * if the container were frozen). + */ + template + typename Tag::type& get_field_instance(std::size_t offset, + int field_index, + int array_index = 0) { + auto const array_dim = std::get>(m_data).check_array_dim(field_index, + array_index); + return std::get>(m_data) + .data_ptrs()[field_index][offset * array_dim + array_index]; + } + + /** + * @brief Get the offset-th element of the field_index-th instance of the column named by Tag. + * + * If the container is not frozen then care should be taken in a + * multi-threaded environment, as `offset` could be invalidated by + * operations performed by other threads (that would fail if the container + * were frozen). + */ + template + typename Tag::type const& get_field_instance(std::size_t offset, + int field_index, + int array_index = 0) const { + auto const array_dim = std::get>(m_data).check_array_dim(field_index, + array_index); + return std::get>(m_data) + .data_ptrs()[field_index][offset * array_dim + array_index]; + } + + /** + * @brief Get the offset-th identifier. + * + * If the container is not frozen then care should be taken in a + * multi-threaded environment, as `offset` could be invalidated by + * operations performed by other threads (that would fail if the container + * were frozen). + */ + [[nodiscard]] non_owning_identifier_without_container get_identifier(std::size_t offset) const { + return m_indices.at(offset); + } + + /** + * @brief Return a permutation-stable handle if ptr is inside us. + * @todo Check const-correctness. Presumably a const version would return + * data_handle, which would hold a pointer-to-const for the + * container? + * + * This is not intended to be called from multi-threaded code if the + * container is not frozen. + */ + [[nodiscard]] neuron::container::generic_data_handle find_data_handle( + neuron::container::generic_data_handle input_handle) const { + bool done{false}; + neuron::container::generic_data_handle handle{}; + for_each_vector([this, &done, &handle, &input_handle]( + auto const& tag, auto const& vec, int field_index, int array_dim) { + using Tag = ::std::decay_t; + if constexpr (!std::is_same_v) { + using Data = typename Tag::type; + if (done) { + // Short circuit + return; + } + if (vec.empty()) { + // input_handle can't point into an empty vector + return; + } + if (!input_handle.holds()) { + // input_handle can't point into a vector of the wrong type + return; + } + auto* const ptr = input_handle.get(); + if (ptr < vec.data() || ptr >= std::next(vec.data(), vec.size())) { + return; + } + auto const physical_row = ptr - vec.data(); + assert(physical_row < vec.size()); + // This pointer seems to live inside this container. This is all a bit fragile. + int const array_index = physical_row % array_dim; + int const logical_row = physical_row / array_dim; + handle = neuron::container::data_handle{ + at(logical_row), + std::get>(m_data).data_ptrs() + std::max(field_index, 0), + array_dim, + array_index}; + assert(handle.refers_to_a_modern_data_structure()); + done = true; // generic_data_handle doesn't convert to bool + } + }); + return handle; + } + + /** + * @brief Query whether the given pointer-to-vector is the one associated to Tag. + * @todo Fix this for tag types with num_variables()? + * + * This is used so that one can ask a data_handle if it refers to a + * particular field in a particular container. It is not intended to be + * called from multi-threaded code if the container is not frozen. + */ + template + [[nodiscard]] bool is_storage_pointer(typename Tag::type const* ptr) const { + static_assert(has_tag_v); + static_assert(!detail::has_num_variables_v); + auto* const data_ptrs = std::get>(m_data).data_ptrs(); + if constexpr (detail::optional_v) { + // if the field is optional and disabled, data_ptrs is null + if (!data_ptrs) { + return false; + } + } + return ptr == data_ptrs[0]; + } + + /** + * @brief Check if `cont` refers to a field in this container. + * + * This is not intended to be called from multi-threaded code if the + * container is not frozen. + */ + [[nodiscard]] std::unique_ptr find_container_info(void const* cont) const { + std::unique_ptr opt_info; + if (!cont) { + return opt_info; + } + for_each_vector([cont, + &opt_info, + this](auto const& tag, auto const& vec, int field_index, int array_dim) { + if (opt_info) { + // Short-circuit + return; + } + if (vec.data() != cont) { + // This isn't the right vector + return; + } + // We found the right container/tag combination! Populate the + // information struct. + auto impl_ptr = std::make_unique(); + auto& info = *impl_ptr; + info.m_container = static_cast(*this).name(); + info.m_field = detail::get_name(tag, field_index); + info.m_size = vec.size(); + assert(info.m_size % array_dim == 0); + opt_info = std::move(impl_ptr); + }); + return opt_info; + } + + /** + * @brief Get a pointer to a range of pointers that always point to the start of the contiguous + * storage. + */ + template + [[nodiscard]] typename Tag::type* const* get_data_ptrs() const { + return std::get>(m_data).data_ptrs(); + } + + /** + * @brief Get a pointer to an array holding the array dimensions of the fields associated with + * this tag. + */ + template + [[nodiscard]] int const* get_array_dims() const { + return std::get>(m_data).array_dims(); + } + + template + [[nodiscard]] int get_array_dims(int field_index) const { + assert(field_index < get_num_variables()); + return get_array_dims()[field_index]; + } + + template + [[nodiscard]] size_t get_num_variables() const { + return detail::get_num_variables(get_tag()); + } + + /** + * @brief Get a pointer to an array holding the prefix sum of array dimensions for this tag. + */ + template + [[nodiscard]] int const* get_array_dim_prefix_sums() const { + return std::get>(m_data).array_dim_prefix_sums(); + } + + template + [[nodiscard]] field_index translate_legacy_index(int legacy_index) const { + return std::get>(m_data).translate_legacy_index(legacy_index); + } + + /** + * @brief Query whether the field associated with the given tag is active. + */ + template + [[nodiscard]] bool field_active() const { + static_assert(detail::optional_v, + "field_active can only be called with tag types for optional fields"); + return std::get>(m_data).active(); + } + + /** + * @brief Enable/disable the fields associated with the given tags. + */ + template + void set_field_status(bool enabled) { + static_assert((detail::optional_v && ...), + "set_field_status can only be called with tag types for optional fields"); + auto const size = m_indices.size(); + (std::get>(m_data).set_active(enabled, size), ...); + } + + StorageMemoryUsage memory_usage() const { + detail::AccumulateMemoryUsage accumulator; + auto accumulated = for_each_vector(accumulator); + + return accumulated.usage(); + } + + private: + /** + * @brief Throw an exception with a pretty prefix. + * @note The *caller* is expected to hold m_mut when this is called. + */ + [[noreturn]] void throw_error(std::string_view message) const { + std::ostringstream oss; + oss << cxx_demangle(typeid(Storage).name()) << "[frozen_count=" << m_frozen_count + << ",sorted=" << std::boolalpha << m_sorted << "]: " << message; + throw std::runtime_error(std::move(oss).str()); + } + + /** + * @brief Mutex to protect m_frozen_count and m_sorted. + * + * The frozen tokens are used to detect, possibly concurrent, use of + * incompatible operations, such as sorting while erasing rows. All + * operations that modify the structure of the container must happen + * sequentially. + * + * To prevent a different thread from obtaining a frozen token while this + * thread is modifying structure of the container, this thread should lock + * `m_mut`. Likewise, any thread obtaining a frozen token, should acquire a + * lock on `m_mut` to ensure that there are no concurrent operations that + * require sequential access to the container. + * + * By following this pattern the thread knows that the conditions related + * to sorted-ness and froze-ness of the container are valid for the entire + * duration of the operation (== member function of this class). + * + * Note, enforcing proper sequencing of operations is left to the calling + * code. However, this mutex enforces the required thread-safety to be able + * to detect invalid concurrent access patterns. + */ + mutable std::mutex m_mut{}; + + /** + * @brief Flag for issue_frozen_token(), mark_as_unsorted() and is_sorted(). + */ + bool m_sorted{false}; + + /** + * @brief Reference count for tokens guaranteeing the container is frozen. + */ + std::size_t m_frozen_count{}; + + /** + * @brief Pointers to identifiers that record the current physical row. + */ + std::vector m_indices{}; + + /** + * @brief Collection of data columns. + * + * If the tag implements a num_variables() method then it is duplicated a + * runtime-determined number of times and get_field_instance(i) returns + * the i-th element of the outer vector (of length num_variables()) of + * vectors. If is no num_variables() method then the outer vector can be + * omitted and get() returns a vector of values. + */ + std::tuple>...> m_data{}; + + /** + * @brief Callback that is invoked when the container becomes unsorted. + */ + std::function m_unsorted_callback{}; +}; + + +template +StorageMemoryUsage memory_usage(const soa& data) { + return data.memory_usage(); +} + +} // namespace neuron::container diff --git a/src/neuron/container/soa_identifier.hpp b/src/neuron/container/soa_identifier.hpp new file mode 100644 index 0000000000..0402e3a0aa --- /dev/null +++ b/src/neuron/container/soa_identifier.hpp @@ -0,0 +1,207 @@ +#pragma once +#include "backtrace_utils.h" +#include "memory_usage.hpp" +#include "neuron/container/non_owning_soa_identifier.hpp" + +#include +#include +#include +#include +#include +#include +#include + +namespace neuron::container { +/** + * @brief A non-owning permutation-stable identifier for a entry in a container. + * @tparam Storage The type of the referred-to container. This might be a type + * derived from @ref neuron::container::soa<...>, or a plain + * container like std::vector in the case of @ref + * neuron::container::data_handle. + * + * A (non-owning) handle to that row combines an instance of that class with an + * interface that is specific to Storage. non_owning_identifier wraps + * non_owning_identifier_without_container so as to provide the same interface + * as owning_identifier. + */ +template +struct non_owning_identifier: non_owning_identifier_without_container { + non_owning_identifier(Storage* storage, non_owning_identifier_without_container id) + : non_owning_identifier_without_container{std::move(id)} + , m_storage{storage} {} + + /** + * @brief Return a reference to the container in which this entry lives. + */ + Storage& underlying_storage() { + assert(m_storage); + return *m_storage; + } + + /** + * @brief Return a const reference to the container in which this entry lives. + */ + Storage const& underlying_storage() const { + assert(m_storage); + return *m_storage; + } + + using storage_type = Storage; + + private: + Storage* m_storage; +}; + +namespace detail { +void notify_handle_dying(non_owning_identifier_without_container); +} + +/** + * @brief An owning permutation-stable identifier for a entry in a container. + * @tparam Storage The type of the referred-to container, which is expected to + * be a type derived from @ref neuron::container::soa<...> + * + * This identifier has owning semantics, meaning that when it is destroyed the + * corresponding entry in the container is deleted. + */ +template +struct owning_identifier { + /** + * @brief Create a non-null owning identifier by creating a new entry. + */ + owning_identifier(Storage& storage) + : owning_identifier(storage.acquire_owning_identifier()) {} + + owning_identifier(const owning_identifier&) = delete; + owning_identifier(owning_identifier&& other) noexcept + : m_ptr(std::move(other.m_ptr)) + , m_data_ptr(other.m_data_ptr) { + other.m_data_ptr = nullptr; + } + + owning_identifier& operator=(const owning_identifier&) = delete; + owning_identifier& operator=(owning_identifier&& other) { + destroy(); + + m_ptr = std::move(other.m_ptr); + + m_data_ptr = other.m_data_ptr; + other.m_data_ptr = nullptr; + + return *this; + } + + ~owning_identifier() { + destroy(); + } + + /** + * @brief Return a reference to the container in which this entry lives. + */ + Storage& underlying_storage() { + return *m_data_ptr; + } + + /** + * @brief Return a const reference to the container in which this entry lives. + */ + Storage const& underlying_storage() const { + return *m_data_ptr; + } + + [[nodiscard]] operator non_owning_identifier() const { + return {m_data_ptr, m_ptr}; + } + + [[nodiscard]] operator non_owning_identifier_without_container() const { + return static_cast>(*this); + } + + /** + * @brief What is the current row? + * + * The returned value is invalidated by any deletions from the underlying + * container, and by any permutations of the underlying container. + */ + [[nodiscard]] std::size_t current_row() const { + assert(m_ptr); + return m_ptr.current_row(); + } + + friend std::ostream& operator<<(std::ostream& os, owning_identifier const& oi) { + return os << "owning " << non_owning_identifier{oi}; + } + + using storage_type = Storage; + + private: + owning_identifier() = default; + + void destroy() { + if (!m_ptr) { + // Nothing to be done. + return; + } + + assert(m_data_ptr); + auto& data_container = *m_data_ptr; + + // We should still be a valid reference at this point. + assert(m_ptr); + assert(m_ptr.current_row() < data_container.size()); + + // Prove that the bookkeeping works. + assert(data_container.at(m_ptr.current_row()) == m_ptr); + + bool terminate = false; + // Delete the corresponding row from `data_container` + try { + data_container.erase(m_ptr.current_row()); + } catch (std::exception const& e) { + // Cannot throw from unique_ptr release/reset/destructor, this + // is the best we can do. Most likely what has happened is + // something like: + // auto const read_only_token = node_data.issue_frozen_token(); + // list_of_nodes.pop_back(); + // which tries to delete a row from a container in read-only mode. + std::cerr << "neuron::container::owning_identifier<" + << cxx_demangle(typeid(Storage).name()) + << "> destructor could not delete from the underlying storage: " << e.what() + << " [" << cxx_demangle(typeid(e).name()) + << "]. This is not recoverable, aborting." << std::endl; + terminate = true; + } + if (terminate) { + std::terminate(); + } + // We don't know how many people know the pointer `p`, so write a sentinel + // value to it and transfer ownership "elsewhere". + m_ptr.set_current_row(invalid_row); + + // This is to provide compatibility with NEURON's old nrn_notify_when_double_freed and + // nrn_notify_when_void_freed methods. + detail::notify_handle_dying(m_ptr); + } + + + non_owning_identifier_without_container m_ptr{}; + Storage* m_data_ptr{}; + + template + friend struct soa; + void set_current_row(std::size_t new_row) { + assert(m_ptr); + m_ptr.set_current_row(new_row); + } + /** + * @brief Create a non-null owning identifier that owns the given row. + * + * This is used inside + * neuron::container::soa<...>::acquire_owning_identifier() and should not + * be used without great care. + */ + owning_identifier(Storage& storage, std::size_t row) + : m_ptr(row) + , m_data_ptr(&storage) {} +}; +} // namespace neuron::container diff --git a/src/neuron/container/view_utils.hpp b/src/neuron/container/view_utils.hpp new file mode 100644 index 0000000000..619d03186e --- /dev/null +++ b/src/neuron/container/view_utils.hpp @@ -0,0 +1,147 @@ +#pragma once +#include // std::size_t +#include // std::as_const, std::move + +namespace neuron::container { +/** + * @brief Base class for neuron::container::soa<...> handles. + * @tparam Identifier Identifier type used for this handle. This encodes both + * the referred-to type (Node, Mechanism, ...) and the + * ownership semantics (owning, non-owning). The instance of + * this type manages both the pointer-to-row and + * pointer-to-storage members of the handle. + * + * This provides some common methods that are neither specific to a particular data + * structure (Node, Mechanism, ...) nor specific to whether or not the handle + * has owning semantics or not. Methods that are specific to the data type (e.g. + * Node) belong in the interface template for that type (e.g. Node::interface). + * Methods that are specific to the owning/non-owning semantics belong in the + * generic templates non_owning_identifier and owning_identifier. + * + * The typical way these components fit together is: + * + * Node::identifier = non_owning_identifier + * Node::owning_identifier = owning_identifier + * Node::handle = Node::interface + * inherits from: handle_base + * Node::owning_handle = Node::interface + * inherits from handle_base + * + * Where the "identifier" types should be viewed as an implementation detail and + * the handle types as user-facing. + */ +template +struct handle_base { + /** + * @brief Construct a handle from an identifier. + */ + handle_base(Identifier identifier) + : m_identifier{std::move(identifier)} {} + + /** + * @brief Return current offset in the underlying storage where this object lives. + */ + [[nodiscard]] std::size_t current_row() const { + return m_identifier.current_row(); + } + + /** + * @brief Obtain a lightweight identifier of the current entry. + * + * The return type is essentially std::size_t* -- it does not contain a + * pointer/reference to the actual storage container. + */ + [[nodiscard]] non_owning_identifier_without_container id() const { + return m_identifier; + } + + /** + * @brief This is a workaround for id sometimes being a macro. + * @todo Remove those macros once and for all. + */ + [[nodiscard]] auto id_hack() const { + return id(); + } + + /** + * @brief Obtain a reference to the storage this handle refers to. + */ + auto& underlying_storage() { + return m_identifier.underlying_storage(); + } + + /** + * @brief Obtain a const reference to the storage this handle refers to. + */ + auto const& underlying_storage() const { + return m_identifier.underlying_storage(); + } + + protected: + /** + * @brief Get a data_handle referring to the given field inside this handle. + * @tparam Tag Tag type of the field we want a data_handle to. + * + * This is used to implement methods like area_handle() and v_handle() in + * the interface templates. + * + * @todo const cleanup -- should there be a const version returning + * data_handle? + */ + template + [[nodiscard]] auto get_handle() { + return underlying_storage().template get_handle(this->id()); + } + + /** + * @brief Get a data_handle referring to the (runtime) field_index-th + * copy of a given (static) field. + * @tparam Tag Tag type of the set of fields the from which the + * field_index-th one is being requested. + + * @todo Const cleanup as above for the zero-argument version. + */ + template + [[nodiscard]] auto get_handle(int field_index, int array_offset = 0) { + return underlying_storage().template get_field_instance_handle(this->id(), + field_index, + array_offset); + } + template + [[nodiscard]] auto& get() { + return underlying_storage().template get(current_row()); + } + template + [[nodiscard]] auto const& get() const { + return underlying_storage().template get(current_row()); + } + /** + * @brief Get the instance of the given tag type from underlying storage. + * @tparam Tag The tag type. + * @return Const reference to the given tag type instance inside the ~global storage struct. + * + * This is a thin wrapper that calls @c soa::get_tag on the storage container (currently an + * instance of @c Node::storage or @c Mechanism::storage) that this handle refers to an instance + * inside. + */ + template + [[nodiscard]] constexpr Tag const& get_tag() const { + return underlying_storage().template get_tag(); + } + template + [[nodiscard]] auto& get(int field_index, int array_offset = 0) { + return underlying_storage().template get_field_instance(current_row(), + field_index, + array_offset); + } + template + [[nodiscard]] auto const& get(int field_index, int array_offset = 0) const { + return underlying_storage().template get_field_instance(current_row(), + field_index, + array_offset); + } + + private: + Identifier m_identifier; +}; +} // namespace neuron::container diff --git a/src/neuron/model_data.hpp b/src/neuron/model_data.hpp new file mode 100644 index 0000000000..9d71d80074 --- /dev/null +++ b/src/neuron/model_data.hpp @@ -0,0 +1,214 @@ +#pragma once +#include "neuron/cache/model_data.hpp" +#include "neuron/container/mechanism_data.hpp" +#include "neuron/container/memory_usage.hpp" +#include "neuron/container/node_data.hpp" +#include "neuron/model_data_fwd.hpp" + +#include +#include + +namespace neuron { +/** @brief Top-level structure. + * + * This level of indirection (as opposed to, for example, the Node data being a + * global variable in its own right) will give us more control over + * construction/destruction/... of different parts of the model data. + */ +struct Model { + Model(); // defined in container.cpp + ~Model(); // defined in container.cpp + + /** @brief Access the structure containing the data of all Nodes. + */ + container::Node::storage& node_data() { + return m_node_data; + } + + /** @brief Access the structure containing the data of all Nodes. + */ + container::Node::storage const& node_data() const { + return m_node_data; + } + + /** @brief Apply a function to each non-null Mechanism. + */ + template + void apply_to_mechanisms(Callable const& callable) { + for (auto type = 0; type < m_mech_data.size(); ++type) { + if (!m_mech_data[type]) { + continue; + } + callable(*m_mech_data[type]); + } + } + + template + void apply_to_mechanisms(Callable const& callable) const { + for (auto type = 0; type < m_mech_data.size(); ++type) { + if (!m_mech_data[type]) { + continue; + } + callable(*m_mech_data[type]); + } + } + + /** + * @brief Create a structure to hold the data of a new Mechanism. + */ + template + container::Mechanism::storage& add_mechanism(int type, Args&&... args) { + if (type >= m_mech_data.capacity()) { + m_mech_data.reserve(2 * m_mech_data.capacity()); + } + if (type >= m_mech_data.size()) { + m_mech_data.resize(type + 1); + } + if (m_mech_data[type]) { + throw std::runtime_error("add_mechanism(" + std::to_string(type) + + "): storage already exists"); + } + m_mech_data[type] = + std::make_unique(type, std::forward(args)...); + set_unsorted_callback(*m_mech_data[type]); + return *m_mech_data[type]; + } + + /** @brief Destroy the structure holding the data of a particular mechanism. + */ + void delete_mechanism(int type) { + if (type >= m_mech_data.size() || !m_mech_data[type]) { + return; + } + if (auto const size = m_mech_data[type]->size(); size > 0) { + throw std::runtime_error("delete_mechanism(" + std::to_string(type) + + "): refusing to delete storage that still hosts " + + std::to_string(size) + " instances"); + } + m_mech_data[type].reset(); + } + + /** @brief Get the structure holding the data of a particular Mechanism. + */ + container::Mechanism::storage& mechanism_data(int type) { + return mechanism_data_impl(type); + } + + /** @brief Get the structure holding the data of a particular Mechanism. + */ + container::Mechanism::storage const& mechanism_data(int type) const { + return mechanism_data_impl(type); + } + + [[nodiscard]] std::size_t mechanism_storage_size() const { + return m_mech_data.size(); + } + + [[nodiscard]] bool is_valid_mechanism(int type) const { + return 0 <= type && type < mechanism_storage_size() && m_mech_data[type]; + } + + /** + * @brief Find some metadata about the given container. + * + * The argument type will typically be a T* that contains the result of calling .data() on some + * vector in the global model data structure. + */ + [[nodiscard]] std::unique_ptr find_container_info( + void const* cont) const; + + void shrink_to_fit() { + m_node_data.shrink_to_fit(); + apply_to_mechanisms([](auto& mech_data) { mech_data.shrink_to_fit(); }); + } + + private: + container::Mechanism::storage& mechanism_data_impl(int type) const { + if (0 <= type && type >= m_mech_data.size()) { + throw std::runtime_error("mechanism_data(" + std::to_string(type) + + "): type out of range"); + } + const auto& data_ptr = m_mech_data[type]; + if (!data_ptr) { + throw std::runtime_error("mechanism_data(" + std::to_string(type) + + "): data for type was null"); + } + + return *data_ptr; + } + + + void set_unsorted_callback(container::Mechanism::storage& mech_data); + + /** @brief One structure for all Nodes. + */ + container::Node::storage m_node_data; + + /** @brief Storage for mechanism-specific data. + * + * Each element is allocated on the heap so that reallocating this vector + * does not invalidate pointers to container::Mechanism::storage. + */ + std::vector> m_mech_data{}; + + /** + * @brief Backing storage for defer_delete helper. + */ + std::vector m_ptrs_for_deferred_deletion{}; +}; + +struct model_sorted_token { + model_sorted_token(cache::Model& cache, + container::Node::storage::frozen_token_type node_data_token_) + : node_data_token{std::move(node_data_token_)} + , m_cache{cache} {} + [[nodiscard]] cache::Model& cache() { + return m_cache; + } + [[nodiscard]] cache::Model const& cache() const { + return m_cache; + } + void set_cache(cache::Model&& cache) { + m_cache = cache; + } + [[nodiscard]] cache::Mechanism& mech_cache(std::size_t i) { + return cache().mechanism.at(i); + } + [[nodiscard]] cache::Mechanism const& mech_cache(std::size_t i) const { + return cache().mechanism.at(i); + } + [[nodiscard]] cache::Thread& thread_cache(std::size_t i) { + return cache().thread.at(i); + } + [[nodiscard]] cache::Thread const& thread_cache(std::size_t i) const { + return cache().thread.at(i); + } + container::Node::storage::frozen_token_type node_data_token; + std::vector mech_data_tokens{}; + + private: + std::reference_wrapper m_cache; +}; + +namespace detail { +// Defining this inline seems to lead to duplicate copies when we dlopen +// libnrnmech.so , so we define it explicitly in container.cpp as part of +// libnrniv.so +extern Model model_data; +} // namespace detail + +/** @brief Access the global Model instance. + * + * Just to be going on with. Needs more thought about who actually holds/owns + * the structures that own the SOA data. Could use a static local if we need to + * control/defer when this is constructed. + */ +inline Model& model() { + return detail::model_data; +} + +namespace container { +neuron::container::ModelMemoryUsage memory_usage(const Model& model); +} + +} // namespace neuron diff --git a/src/neuron/model_data_fwd.hpp b/src/neuron/model_data_fwd.hpp new file mode 100644 index 0000000000..b724dcece1 --- /dev/null +++ b/src/neuron/model_data_fwd.hpp @@ -0,0 +1,38 @@ +#pragma once +#include +#include + +namespace neuron { +struct Model; +inline Model& model(); +namespace container { +template +struct data_handle; +namespace utils { +template +[[nodiscard]] data_handle find_data_handle(T* ptr); + +/** + * @brief Interface for obtaining information about model data containers. + * + * This indirection via an abstract interface helps reduce the ABI surface between translated MOD + * file code and the rest of the library. + */ +struct storage_info { + virtual ~storage_info() = default; + virtual std::string_view container() const = 0; + virtual std::string_view field() const = 0; + virtual std::size_t size() const = 0; +}; + +/** @brief Try and find a helpful name for a container. + * + * In practice this can be expected to be work for the structures that can be + * discovered from neuron::model(), and not for anything else. If no + * information about the container can be found the returned std::optional will + * not contain a value. + */ +[[nodiscard]] std::unique_ptr find_container_info(void const*); +} // namespace utils +} // namespace container +} // namespace neuron diff --git a/src/neuronmusic/nrnmusic.cpp b/src/neuronmusic/nrnmusic.cpp index d91f10bc18..e3fa55cda2 100644 --- a/src/neuronmusic/nrnmusic.cpp +++ b/src/neuronmusic/nrnmusic.cpp @@ -9,6 +9,7 @@ #include "netcvode.h" #include "multicore.h" #include "nrnmusic.h" +#include "nrnpy.h" #include "netpar.h" #include @@ -22,9 +23,6 @@ void nrnmusic_injectlist(void* vp, double tt); void nrnmusic_inject(void* port, int gindex, double tt); void nrnmusic_spikehandle(void* vport, double tt, int gindex); -extern Object* (*nrnpy_p_po2ho)(PyObject*); -extern PyObject* (*nrnpy_p_ho2po)(Object*); -extern Object* hoc_new_object(Symbol*, void*); extern NetCvode* net_cvode_instance; MUSIC::Setup* nrnmusic_setup; @@ -169,8 +167,8 @@ NRNMUSIC::EventInputPort* NRNMUSIC::publishEventInput(std::string id) { PyObject* NRNMUSIC::EventInputPort::index2target(int gi, PyObject* ptarget) { // analogous to pc.gid_connect - assert(nrnpy_p_po2ho); - Object* target = (*nrnpy_p_po2ho)(ptarget); + assert(neuron::python::methods.po2ho); + Object* target = neuron::python::methods.po2ho(ptarget); if (!is_point_process(target)) { hoc_execerror("target arg must be a Point_process", 0); } @@ -182,7 +180,7 @@ PyObject* NRNMUSIC::EventInputPort::index2target(int gi, PyObject* ptarget) { } // nrn_assert (gi_table.count(gi) == 0); if (gi_table->count(gi) == 0) { - ps = new PreSyn(NULL, NULL, NULL); + ps = new PreSyn({}, {}, {}); net_cvode_instance->psl_append(ps); (*gi_table)[gi] = ps; ps->gid_ = -2; @@ -193,8 +191,8 @@ PyObject* NRNMUSIC::EventInputPort::index2target(int gi, PyObject* ptarget) { NetCon* nc = new NetCon(ps, target); Object* o = hoc_new_object(nrn_netcon_sym(), nc); nc->obj_ = o; - assert(nrnpy_p_ho2po); - PyObject* po = (*nrnpy_p_ho2po)(o); + assert(neuron::python::methods.ho2po); + PyObject* po = neuron::python::methods.ho2po(o); // printf("index2target %d %s\n", gi, hoc_object_name(target)); return po; } diff --git a/src/nmodl/carbonnmodl.h b/src/nmodl/carbonnmodl.h deleted file mode 100644 index acc8a1ffc5..0000000000 --- a/src/nmodl/carbonnmodl.h +++ /dev/null @@ -1,7 +0,0 @@ -#include -#include -#include -#include -#pragma once off -#define SYSV 1 -#define MAC 1 diff --git a/src/nmodl/deriv.cpp b/src/nmodl/deriv.cpp index b3d3273fa4..4071ef0e63 100644 --- a/src/nmodl/deriv.cpp +++ b/src/nmodl/deriv.cpp @@ -50,7 +50,7 @@ void solv_diffeq(Item* qsol, if (method && strcmp(method->name, "cnexp") == 0) { Sprintf(buf, " %s();\n", fun->name); replacstr(qsol, buf); - Sprintf(buf, " %s(_p, _ppvar, _thread, _nt);\n", fun->name); + Sprintf(buf, " %s(_threadargs_);\n", fun->name); vectorize_substitute(qsol, buf); return; } @@ -124,7 +124,8 @@ void solv_diffeq(Item* qsol, Strcpy(deriv2_advance, ""); } Sprintf(buf, - "%s %s%s(_ninits, %d, _slist%d, _dlist%d, _p, &%s, %s, %s, &_temp%d%s);\n%s", + "%s %s%s(_ninits, %d, _slist%d, _dlist%d, neuron::scopmath::row_view{_ml, _iml}, " + "&%s, %s, %s, &_temp%d%s);\n%s", deriv1_advance, ssprefix, method->name, @@ -138,9 +139,11 @@ void solv_diffeq(Item* qsol, maxerr_str, deriv2_advance); } else { + // examples of ssprefix + method->name: sparse, _ss_sparse Sprintf(buf, - "%s%s(&_sparseobj%d, %d, _slist%d, _dlist%d, _p, &%s, %s, %s\ -,&_coef%d, _linmat%d);\n", + "%s%s(&_sparseobj%d, %d, _slist%d, _dlist%d, " + "neuron::scopmath::row_view{_ml, _iml}, &%s, " + "%s, %s, &_coef%d, _linmat%d);\n", ssprefix, method->name, listnum, @@ -155,8 +158,10 @@ void solv_diffeq(Item* qsol, } replacstr(qsol, buf); if (method->subtype & DERF) { /* derivimplicit */ + // derivimplicit_thread Sprintf(buf, - "%s %s%s_thread(%d, _slist%d, _dlist%d, _p, %s, _p, _ppvar, _thread, _nt);\n%s", + "%s %s%s_thread(%d, _slist%d, _dlist%d, neuron::scopmath::row_view{_ml, _iml}, %s, " + "_ml, _iml, _ppvar, _thread, _nt);\n%s", deriv1_advance, ssprefix, method->name, @@ -168,20 +173,21 @@ void solv_diffeq(Item* qsol, vectorize_substitute(qsol, buf); } else { /* kinetic */ if (vectorize) { - Sprintf( - buf, - "%s%s_thread(&(_thread[_spth%d].literal_value()), %d, _slist%d, _dlist%d, _p, &%s, %s, %s\ -, _linmat%d, _p, _ppvar, _thread, _nt);\n", - ssprefix, - method->name, - listnum, - numeqn, - listnum, - listnum, - indepsym->name, - dindepname, - fun->name, - listnum); + // sparse_thread, _ss_sparse_thread + Sprintf(buf, + "%s%s_thread(&(_thread[_spth%d].literal_value()), %d, " + "_slist%d, _dlist%d, neuron::scopmath::row_view{_ml, _iml}, " + "&%s, %s, %s, _linmat%d, _threadargs_);\n", + ssprefix, + method->name, + listnum, + numeqn, + listnum, + listnum, + indepsym->name, + dindepname, + fun->name, + listnum); vectorize_substitute(qsol, buf); } } @@ -190,7 +196,7 @@ void solv_diffeq(Item* qsol, " if (secondorder) {\n" " int _i;\n" " for (_i = 0; _i < %d; ++_i) {\n" - " _p[_slist%d[_i]] += dt*_p[_dlist%d[_i]];\n" + " _ml->data(_iml, _slist%d[_i]) += dt*_ml->data(_iml, _dlist%d[_i]);\n" " }}\n", numeqn, listnum, @@ -465,14 +471,15 @@ void massagederiv(Item* q1, Item* q2, Item* q3, Item* q4) { Lappendsym(massage_list_, SYM(q2)); /* all this junk is still in the intoken list */ - Sprintf(buf, "static int %s(_threadargsproto_);\n", SYM(q2)->name); + Sprintf(buf, "static int %s(_internalthreadargsproto_);\n", SYM(q2)->name); Linsertstr(procfunc, buf); replacstr(q1, "\nstatic int"); q = insertstr(q3, "() {_reset=0;\n"); derfun = SYM(q2); vectorize_substitute(q, - "(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {int " - "_reset=0; int error = 0;\n"); + "(_internalthreadargsproto_) {\n" + " int _reset=0;\n" + " int error = 0;\n"); if (derfun->subtype & DERF && derfun->u.i) { diag("DERIVATIVE merging not implemented", (char*) 0); @@ -517,24 +524,24 @@ is not allowed on the left hand side."); if (s->subtype & ARRAY) { int dim = s->araydim; Sprintf(buf, - "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = %s_columnindex + _i;", + "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = {%s_columnindex, _i};", dim, numlist, count, state->name); Lappendstr(initlist, buf); Sprintf(buf, - " _dlist%d[%d+_i] = %s_columnindex + _i;}\n", + " _dlist%d[%d+_i] = {%s_columnindex, _i};}\n", numlist, count, name_forderiv(indx + 1)); Lappendstr(initlist, buf); count += dim; } else { - Sprintf(buf, "_slist%d[%d] = %s_columnindex;", numlist, count, state->name); + Sprintf(buf, "_slist%d[%d] = {%s_columnindex, 0};", numlist, count, state->name); Lappendstr(initlist, buf); Sprintf(buf, - " _dlist%d[%d] = %s_columnindex;\n", + " _dlist%d[%d] = {%s_columnindex, 0};\n", numlist, count, name_forderiv(indx + 1)); @@ -547,7 +554,12 @@ is not allowed on the left hand side."); diag("DERIVATIVE contains no derivatives", (char*) 0); } derfun->used = count; - Sprintf(buf, "static int _slist%d[%d], _dlist%d[%d];\n", numlist, count, numlist, count); + Sprintf(buf, + "static neuron::container::field_index _slist%d[%d], _dlist%d[%d];\n", + numlist, + count, + numlist, + count); Linsertstr(procfunc, buf); Lappendstr(procfunc, "\n/*CVODE*/\n"); @@ -556,9 +568,7 @@ is not allowed on the left hand side."); { Item* qq = procfunc->prev; copyitems(q1->next, q4, procfunc->prev); - vectorize_substitute( - qq->next, - "(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {int _reset = 0;"); + vectorize_substitute(qq->next, "(_internalthreadargsproto_) {int _reset = 0;"); vectorize_scan_for_func(qq->next, procfunc); } lappendstr(procfunc, "return _reset;\n}\n"); @@ -569,8 +579,7 @@ is not allowed on the left hand side."); Item* qextra = q1->next->next->next->next; Sprintf(buf, "static int _ode_matsol%d", numlist); Lappendstr(procfunc, buf); - vectorize_substitute(lappendstr(procfunc, "() {\n"), - "(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {\n"); + vectorize_substitute(lappendstr(procfunc, "() {\n"), "(_internalthreadargsproto_) {\n"); qq = procfunc->next; cvode_cnexp_possible = 1; ITERATE(q, cvode_diffeq_list) { @@ -657,17 +666,19 @@ if (_deriv%d_advance) {\n", q = insertsym(q4, sp); eqnqueue(q); Sprintf(buf, - "_p[_dlist%d[_id]] - (_p[_slist%d[_id]] - _savstate%d[_id])/d%s;\n", + "_ml->data(_iml, _dlist%d[_id]) - (_ml->data(_iml, _slist%d[_id]) - " + "_savstate%d[_id])/d%s;\n", numlist, numlist, numlist, indepsym->name); Insertstr(q4, buf); - Sprintf(buf, - "}else{\n_dlist%d[++_counte] = _p[_slist%d[_id]] - _savstate%d[_id];}}}\n", - numlist + 1, - numlist, - numlist); + Sprintf( + buf, + "}else{\n_dlist%d[++_counte] = _ml->data(_iml, _slist%d[_id]) - _savstate%d[_id];}}}\n", + numlist + 1, + numlist, + numlist); Insertstr(q4, buf); } else { ITERATE(q, deriv_state_list) { @@ -680,7 +691,9 @@ if (_deriv%d_advance) {\n", q = mixed_eqns(q2, q3, q4); /* numlist now incremented */ if (deriv_implicit) { Sprintf(buf, - "{int _id; for(_id=0; _id < %d; _id++) { _savstate%d[_id] = _p[_slist%d[_id]];}}\n", + "for(int _id=0; _id < %d; _id++) {\n" + " _savstate%d[_id] = _ml->data(_iml, _slist%d[_id]);\n" + "}\n", count, derfun->u.i, derfun->u.i); @@ -829,7 +842,7 @@ each has been saved. So we know if the translation is possible. int cvode_cnexp_success(Item* q1, Item* q2) { Item *q, *q3, *q4, *qeq; if (cvode_cnexp_possible) { - /* convert Method to nil and the type of the block to + /* convert Method to nullptr and the type of the block to PROCEDURE */ SYM(cvode_cnexp_solve->next)->name = stralloc("cnexp", 0); remove(deriv_imp_list->next); @@ -887,8 +900,7 @@ int cvode_cnexp_success(Item* q1, Item* q2) { Item* qq = procfunc->prev; copyitems(q1, q2, procfunc->prev); /* more or less redundant with massagederiv */ - vectorize_substitute(qq->next->next, - "(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {"); + vectorize_substitute(qq->next->next, "(_internalthreadargsproto_) {"); vectorize_scan_for_func(qq->next->next, procfunc); } lappendstr(procfunc, " return 0;\n}\n"); diff --git a/src/nmodl/init.cpp b/src/nmodl/init.cpp index 35b0cf6715..b5c89c3c33 100644 --- a/src/nmodl/init.cpp +++ b/src/nmodl/init.cpp @@ -91,6 +91,7 @@ static struct { /* Keywords */ {"MUTEXLOCK", NRNMUTEXLOCK}, {"MUTEXUNLOCK", NRNMUTEXUNLOCK}, {"REPRESENTS", REPRESENTS}, + {"RANDOM", RANDOM}, {0, 0}}; /* @@ -153,6 +154,17 @@ static const char* extdef5[] = {/* the extdef names that are not threadsafe */ #include "extdef5.h" 0}; +/* random to nrnran123 functions */ +std::map extdef_rand = { + {"random_setseq", "nrnran123_setseq"}, + {"random_setids", "nrnran123_setids"}, + {"random_uniform", "nrnran123_uniform"}, + {"random_negexp", "nrnran123_negexp"}, + {"random_normal", "nrnran123_normal"}, + {"random_ipick", "nrnran123_ipick"}, + {"random_dpick", "nrnran123_dblpick"}, +}; + List *constructorfunc, *destructorfunc; void init() { @@ -197,6 +209,10 @@ void init() { assert(s); s->subtype |= EXTDEF5; } + for (auto it: extdef_rand) { + s = install(it.first.c_str(), NAME); + s->subtype = EXTDEF_RANDOM; + } intoken = newlist(); initfunc = newlist(); modelfunc = newlist(); diff --git a/src/nmodl/io.cpp b/src/nmodl/io.cpp index 741d7cbbfa..88110a1b36 100644 --- a/src/nmodl/io.cpp +++ b/src/nmodl/io.cpp @@ -8,9 +8,6 @@ #include #include "modl.h" #include -#if MAC && TARGET_API_MAC_CARBON -#include -#endif #undef METHOD #include "parse1.hpp" #if defined(_WIN32) @@ -252,10 +249,6 @@ void diag(const char* s1, const char* s2) { } } Fprintf(stderr, "\n"); -#if MAC && TARGET_API_MAC_CARBON - SIOUXSettings.autocloseonquit = true; - RunApplicationEventLoop(); -#endif exit(1); } @@ -452,49 +445,3 @@ static int file_stack_empty() { } return (filestack->next == filestack); } - -/* adapted from : gist@jonathonreinhart/mkdir_p.c */ -int mkdir_p(const char* path) { - const size_t len = strlen(path); - char mypath[PATH_MAX]; - char* p; - - errno = 0; - - /* copy string so its mutable */ - if (len > sizeof(mypath) - 1) { - fprintf(stderr, "Output directory path too long\n"); - return -1; - } - - strcpy(mypath, path); - - /* iterate the string */ - for (p = mypath + 1; *p; p++) { - if (*p == '/') { - /* temporarily truncate */ - *p = '\0'; - -#if defined(_WIN32) - if (_mkdir(mypath) != 0) { -#else - if (mkdir(mypath, S_IRWXU) != 0) { -#endif - if (errno != EEXIST) - return -1; - } - *p = '/'; - } - } - -#if defined(_WIN32) - if (_mkdir(mypath) != 0) { -#else - if (mkdir(mypath, S_IRWXU) != 0) { -#endif - if (errno != EEXIST) - return -1; - } - - return 0; -} diff --git a/src/nmodl/kinetic.cpp b/src/nmodl/kinetic.cpp index 7b4da39090..b3f457355d 100644 --- a/src/nmodl/kinetic.cpp +++ b/src/nmodl/kinetic.cpp @@ -280,16 +280,15 @@ void massagekinetic(Item* q1, Item* q2, Item* q3, Item* q4) /*KINETIC NAME stmtl fun->u.i = numlist; vectorize_substitute(linsertstr(procfunc, "();\n"), - "(void* _so, double* _rhs, _threadargsproto_);\n"); + "(void* _so, double* _rhs, _internalthreadargsproto_);\n"); Sprintf(buf, "static int %s", SYM(q2)->name); linsertstr(procfunc, buf); + replacstr(q1, "\nstatic int"); qv = insertstr(q3, "()\n"); if (vectorize) { kin_vect1(q1, q2, q4); - vectorize_substitute(qv, - "(void* _so, double* _rhs, double* _p, Datum* _ppvar, Datum* _thread, " - "NrnThread* _nt)\n"); + vectorize_substitute(qv, "(void* _so, double* _rhs, _internalthreadargsproto_)\n"); } qv = insertstr(q3, "{_reset=0;\n"); Sprintf(buf, "{int _reset=0;\n"); @@ -375,7 +374,8 @@ void massagekinetic(Item* q1, Item* q2, Item* q3, Item* q4) /*KINETIC NAME stmtl } fun->used = count; Sprintf(buf, - "static int _slist%d[%d], _dlist%d[%d]; static double *_temp%d;\n", + "static neuron::container::field_index _slist%d[%d], _dlist%d[%d]; static double " + "*_temp%d;\n", numlist, count, numlist, @@ -474,7 +474,10 @@ void kinetic_intmethod(Symbol* fun, const char* meth) { Fprintf(stderr, "%s method ignores conservation\n", meth); } ncons = 0; - Sprintf(buf, "{int _i; for(_i=0;_i<%d;_i++) _p[_dlist%d[_i]] = 0.0;}\n", nstate, fun->u.i); + Sprintf(buf, + "{int _i; for(_i=0;_i<%d;_i++) _ml->data(_iml, _dlist%d[_i]) = 0.0;}\n", + nstate, + fun->u.i); /*goes near beginning of block*/ #if Glass fixrlst(rlst); @@ -487,7 +490,7 @@ void kinetic_intmethod(Symbol* fun, const char* meth) { if (rlst->capacity[i][0]) { if (rlst->symorder[i]->subtype & ARRAY) { Sprintf(buf, - "for (_i=0; _i < %d; _i++) { _p[_dlist%d[_i + %d]] /= %s;}\n", + "for (_i=0; _i < %d; _i++) { _ml->data(_iml, _dlist%d[_i + %d]) /= %s;}\n", rlst->symorder[i]->araydim, fun->u.i, rlst->symorder[i]->varnum, @@ -495,7 +498,7 @@ void kinetic_intmethod(Symbol* fun, const char* meth) { Insertstr(rlst->endbrace, buf); } else { Sprintf(buf, - "_p[_dlist%d[%d]] /= %s;\n", + "_ml->data(_iml, _dlist%d[%d]) /= %s;\n", fun->u.i, rlst->symorder[i]->varnum, rlst->capacity[i]); @@ -723,7 +726,7 @@ for(_i=%d;_i<%d;_i++){\n", NOT_CVODE_FLAG { Sprintf(buf, "\ - _RHS%d(_i) = -_dt1*(_p[_slist%d[_i]] - _p[_dlist%d[_i]]);\n\ + _RHS%d(_i) = -_dt1*(_ml->data(_iml, _slist%d[_i]) - _ml->data(_iml, _dlist%d[_i]));\n\ _MATELM%d(_i, _i) = _dt1;\n", fun->u.i, fun->u.i, @@ -734,7 +737,7 @@ for(_i=%d;_i<%d;_i++){\n", CVODE_FLAG { Sprintf(buf, "\ - _RHS%d(_i) = _dt1*(_p[_dlist%d[_i]]);\n\ + _RHS%d(_i) = _dt1*(_ml->data(_iml, _dlist%d[_i]));\n\ _MATELM%d(_i, _i) = _dt1;\n", fun->u.i, fun->u.i, @@ -1085,19 +1088,19 @@ void kinlist(Symbol* fun, Rlist* rlst) { if (s->subtype & ARRAY) { int dim = s->araydim; Sprintf(buf, - "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = %s_columnindex + _i;", + "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = {%s_columnindex, _i};", dim, fun->u.i, s->varnum, s->name); qv = lappendstr(initlist, buf); Sprintf( - buf, " _dlist%d[%d+_i] = D%s_columnindex + _i;}\n", fun->u.i, s->varnum, s->name); + buf, " _dlist%d[%d+_i] = {D%s_columnindex, _i};}\n", fun->u.i, s->varnum, s->name); qv = lappendstr(initlist, buf); } else { - Sprintf(buf, "_slist%d[%d] = %s_columnindex;", fun->u.i, s->varnum, s->name); + Sprintf(buf, "_slist%d[%d] = {%s_columnindex, 0};", fun->u.i, s->varnum, s->name); qv = lappendstr(initlist, buf); - Sprintf(buf, " _dlist%d[%d] = D%s_columnindex;\n", fun->u.i, s->varnum, s->name); + Sprintf(buf, " _dlist%d[%d] = {D%s_columnindex, 0};\n", fun->u.i, s->varnum, s->name); qv = lappendstr(initlist, buf); } s->used = 0; @@ -1362,8 +1365,9 @@ void cvode_kinetic(Item* qsol, Symbol* fun, int numeqn, int listnum) { Sprintf(buf, "static int _ode_spec%d() {_reset=0;{\n", fun->u.i); Lappendstr(procfunc, buf); Sprintf(buf, - "static int _ode_spec%d(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) " - "{int _reset=0;{\n", + "static int _ode_spec%d(_internalthreadargsproto_) {\n" + " int _reset=0;\n" + " {\n", fun->u.i); vectorize_substitute(procfunc->prev, buf); copyitems(cvode_sbegin, cvode_send, procfunc->prev); @@ -1372,8 +1376,8 @@ void cvode_kinetic(Item* qsol, Symbol* fun, int numeqn, int listnum) { Sprintf(buf, "static int _ode_matsol%d() {_reset=0;{\n", fun->u.i); Lappendstr(procfunc, buf); Sprintf(buf, - "static int _ode_matsol%d(void* _so, double* _rhs, double* _p, Datum* _ppvar, Datum* " - "_thread, NrnThread* _nt) {int _reset=0;{\n", + "static int _ode_matsol%d(void* _so, double* _rhs, _internalthreadargsproto_) {int " + "_reset=0;{\n", fun->u.i); vectorize_substitute(procfunc->prev, buf); cvode_flag = 1; diff --git a/src/nmodl/macnmodl.h b/src/nmodl/macnmodl.h deleted file mode 100644 index 8b7d99a986..0000000000 --- a/src/nmodl/macnmodl.h +++ /dev/null @@ -1,9 +0,0 @@ -#pragma precompile_target "nmodl_def.h" -#define MSL_USE_PRECOMPILED_HEADERS 1 -#include -#include -#include -#include -#pragma once off -#define SYSV 1 -#define MAC 1 diff --git a/src/nmodl/modl.cpp b/src/nmodl/modl.cpp index fcb33d2805..9523c7a2a1 100644 --- a/src/nmodl/modl.cpp +++ b/src/nmodl/modl.cpp @@ -28,17 +28,16 @@ /* the first arg may also be a file.mod (containing the .mod suffix)*/ -#include +#include -#if MAC -#include -#endif -#if HAVE_STDLIB_H #include -#endif #include "modl.h" #include +#include +#include + +namespace fs = std::filesystem; FILE *fin, /* input file descriptor for filename.mod */ /* or file2 from the second argument */ @@ -60,11 +59,10 @@ int nmodl_text = 1; List* filetxtlist; extern int yyparse(); -extern int mkdir_p(const char*); extern int vectorize; extern int numlist; -extern char* nmodl_version_; +extern const char* nmodl_version_; extern int usederivstatearray; /*SUPPRESS 763*/ @@ -72,58 +70,22 @@ static const char* pgm_name = "nmodl"; extern const char* RCS_version; extern const char* RCS_date; -static struct option long_options[] = {{"version", no_argument, 0, 'v'}, - {"help", no_argument, 0, 'h'}, - {"outdir", required_argument, 0, 'o'}, - {0, 0, 0, 0}}; - -static void show_options(char** argv) { - fprintf(stderr, "Source to source compiler from NMODL to C++\n"); - fprintf(stderr, "Usage: %s [options] Inputfile\n", argv[0]); - fprintf(stderr, "Options:\n"); - fprintf(stderr, - "\t-o | --outdir directory where output files will be written\n"); - fprintf(stderr, "\t-h | --help print this message\n"); - fprintf(stderr, "\t-v | --version print version number\n"); -} - -static void openfiles(char* given_filename, char* output_dir); +static void openfiles(const char* given_filename, const char* output_dir); int main(int argc, char** argv) { - int option = -1; - int option_index = 0; - char* output_dir = NULL; - - if (argc < 2) { - show_options(argv); - exit(1); - } - - while ((option = getopt_long(argc, argv, ":vho:", long_options, &option_index)) != -1) { - switch (option) { - case 'v': - printf("%s\n", nmodl_version_); - exit(0); + std::string output_dir{}; + std::string inputfile{}; - case 'o': - output_dir = strdup(optarg); - break; + CLI::App app{"Source to source compiler from NMODL to C++"}; + app.add_option("-o,--outdir", output_dir, "directory where output files will be written"); + app.set_version_flag("-v,--version", nmodl_version_, "print version number"); + app.set_help_flag("-h,--help", "print this message"); + app.add_option("Inputfile", inputfile)->required(); + app.allow_extras(); - case 'h': - show_options(argv); - exit(0); + CLI11_PARSE(app, argc, argv); - case ':': - fprintf(stderr, "%s: option '-%c' requires an argument\n", argv[0], optopt); - exit(-1); - - case '?': - default: - fprintf(stderr, "%s: invalid option `-%c' \n", argv[0], optopt); - exit(-1); - } - } - if ((argc - optind) > 1) { + if (!app.remaining().empty()) { fprintf(stderr, "%s: Warning several input files specified on command line but only one will be " "processed\n", @@ -132,17 +94,12 @@ int main(int argc, char** argv) { filetxtlist = newlist(); -#if MAC - SIOUXSettings.asktosaveonclose = false; - Fprintf(stderr, "%s %s %s\n", pgm_name, RCS_version, RCS_date); -#endif - init(); /* keywords into symbol table, initialize * lists, etc. */ - std::strncpy(finname, argv[optind], sizeof(finname)); - - openfiles(finname, output_dir); /* .mrg else .mod, .var, .c */ + std::strcpy(finname, inputfile.c_str()); + openfiles(inputfile.c_str(), + output_dir.empty() ? nullptr : output_dir.c_str()); /* .mrg else .mod, .var, .c */ IGNORE(yyparse()); /* * At this point all blocks are fully processed except the kinetic @@ -252,16 +209,12 @@ int main(int argc, char** argv) { yyunput(ilint); yyoutput(ilint); } -#endif -#if MAC - printf("Done\n"); - SIOUXSettings.autocloseonquit = true; #endif free(modprefix); /* allocated in openfiles below */ return 0; } -static void openfiles(char* given_filename, char* output_dir) { +static void openfiles(const char* given_filename, const char* output_dir) { char s[NRN_BUFSIZE]; char output_filename[NRN_BUFSIZE]; @@ -290,7 +243,9 @@ static void openfiles(char* given_filename, char* output_dir) { } } if (output_dir) { - if (mkdir_p(output_dir) != 0) { + try { + fs::create_directories(output_dir); + } catch (...) { fprintf(stderr, "Can't create output directory %s\n", output_dir); exit(1); } diff --git a/src/nmodl/modl.h b/src/nmodl/modl.h index be6bb5763b..0da34de44f 100644 --- a/src/nmodl/modl.h +++ b/src/nmodl/modl.h @@ -2,12 +2,10 @@ #include "wrap_sprintf.h" #include -#if HAVE_STRING_H #include -#else -#include -#endif #include +#include +#include /** * \dir @@ -159,6 +157,8 @@ typedef struct Symbol { } Symbol; #define SYM0 (Symbol*) 0 +extern std::map extdef_rand; + /* * this is convenient way to get the element pointer if you know what type * the item is @@ -199,13 +199,14 @@ typedef struct Symbol { #define EXTDEF 0100000 #define LINF 0200000 #define UNITDEF 0400000L -#define EXTDEF2 01000000L /* functions that can take array or function name arguments */ -#define nmodlCONST 02000000L /* constants that do not appear in .var file */ -#define EXTDEF3 04000000L /* get two extra reset arguments at beginning */ -#define INTGER 010000000L /* must be cast to double in expr */ -#define EXTDEF4 020000000L /* get extra NrnThread* arg at beginning */ -#define EXTDEF5 040000000L /* not threadsafe from the extdef list */ -#define EXPLICIT_DECL 01 /* usage field, variable occurs in input file */ +#define EXTDEF2 01000000L /* functions that can take array or function name arguments */ +#define nmodlCONST 02000000L /* constants that do not appear in .var file */ +#define EXTDEF3 04000000L /* get two extra reset arguments at beginning */ +#define INTGER 010000000L /* must be cast to double in expr */ +#define EXTDEF4 020000000L /* get extra NrnThread* arg at beginning */ +#define EXTDEF5 040000000L /* not threadsafe from the extdef list */ +#define EXTDEF_RANDOM 0600000000L /* functions that can be used with RANDOM type */ +#define EXPLICIT_DECL 01 /* usage field, variable occurs in input file */ #define NRNEXTRN 01 /* t, dt, celsius, etc. */ @@ -223,6 +224,10 @@ typedef struct Symbol { #define NRNPOINTER 04000 #define IONCONC 010000 #define NRNBBCOREPOINTER 020000 +#define NMODLRANDOM 040000 +// Implicit ion concentration variable that has been added so we can call nrn_wrote_conc, but which +// is not used in the MOD file +#define IONCONC_IMPLICIT 040000 extern char *emalloc(unsigned), /* malloc with out of space checking */ @@ -326,6 +331,5 @@ extern Item* qlint; #endif using neuron::Sprintf; - void verbatim_adjust(char* q); /** @} */ // end of hoc_functions diff --git a/src/nmodl/netrec_discon.cpp b/src/nmodl/netrec_discon.cpp index 4711915303..4666bf0356 100644 --- a/src/nmodl/netrec_discon.cpp +++ b/src/nmodl/netrec_discon.cpp @@ -201,15 +201,15 @@ static void general_discon_adjust(Item* varname, Item* equal, Item* expr, Item* " double __primary_delta = (%s) - __state;\n" " double __dtsav = dt;\n" " for (__i = 0; __i < __neq; ++__i) {\n" - " _p[_dlist%d[__i]] = 0.0;\n" + " _ml->data(_iml, _dlist%d[__i]) = 0.0;\n" " }\n" - " _p[_dlist%d[%d]] = __primary_delta;\n" + " _ml->data(_iml, _dlist%d[%d]) = __primary_delta;\n" " dt *= 0.5;\n" "%s%s" " _ode_matsol_instance%d(_threadargs_);\n" " dt = __dtsav;\n" " for (__i = 0; __i < __neq; ++__i) {\n" - " _p[_slist%d[__i]] += _p[_dlist%d[__i]];\n" + " _ml->data(_iml, _slist%d[__i]) += _ml->data(_iml, _dlist%d[__i]);\n" " }\n" " } else {\n", neq, diff --git a/src/nmodl/nmodlfunc.h b/src/nmodl/nmodlfunc.h index 2f979161f7..da7ac20659 100644 --- a/src/nmodl/nmodlfunc.h +++ b/src/nmodl/nmodlfunc.h @@ -138,4 +138,10 @@ void netrec_asgn(Item* varname, Item* equal, Item* expr, Item* lastok); void netrec_discon(); char* items_as_string(Item* begin, Item* last); /* does not include last */ int slist_search(int listnum, Symbol* s); -void nrnunit_dynamic_str(char (&buf)[NRN_BUFSIZE], const char* name, char* unit1, char* unit2); +void nrnunit_str(char (&buf)[NRN_BUFSIZE], const char* name, const char* unit1, const char* unit2); + +// help know if setdata required to call FUNCTION or PROCEDURE +void check_range_in_func(Symbol*); +void set_inside_func(Symbol*); +void func_needs_setdata(); +void hocfunc_setdata_item(Symbol*, Item*); diff --git a/src/nmodl/noccout.cpp b/src/nmodl/noccout.cpp index a2ceef6b93..f95ed6e315 100644 --- a/src/nmodl/noccout.cpp +++ b/src/nmodl/noccout.cpp @@ -5,9 +5,7 @@ #include "parse1.hpp" #include "symbol.h" -#define CACHEVEC 1 - -extern char* nmodl_version_; +extern const char* nmodl_version_; #define P(arg) fputs(arg, fcout) List *procfunc, *initfunc, *modelfunc, *termfunc, *initlist, *firstlist; @@ -53,42 +51,16 @@ static void ext_vdef() { } if (electrode_current) { P("#if EXTRACELLULAR\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" if (_nd->_extnode) {\n"); - P(" _v = NODEV(_nd) +_nd->_extnode->_v[0];\n"); + P(" _nd = _ml_arg->_nodelist[_iml];\n"); + P(" if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n"); + P(" _v = NODEV(_nd) + _extnode->_v[0];\n"); P(" }else\n"); P("#endif\n"); P(" {\n"); -#if CACHEVEC == 0 - P(" _v = NODEV(_nd);\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" _v = VEC_V(_ni[_iml]);\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" _v = NODEV(_nd);\n"); - P(" }\n"); -#endif - + P(" _v = _vec_v[_ni[_iml]];\n"); P(" }\n"); } else { -#if CACHEVEC == 0 - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" _v = NODEV(_nd);\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" _v = VEC_V(_ni[_iml]);\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" _v = NODEV(_nd);\n"); - P(" }\n"); -#endif + P(" _v = _vec_v[_ni[_iml]];\n"); } } @@ -113,10 +85,18 @@ void c_out() { P("#undef PI\n"); P("#define nil 0\n"); P("#define _pval pval\n"); // due to some old models using _pval + P("// clang-format on\n"); P("#include \"md1redef.h\"\n"); - P("#include \"section.h\"\n"); + P("#include \"section_fwd.hpp\"\n"); P("#include \"nrniv_mf.h\"\n"); P("#include \"md2redef.h\"\n"); + P("// clang-format off\n"); + P("#include \"neuron/cache/mechanism_range.hpp\"\n"); + P("#include \n"); + + /* avoid clashes with mech names */ + P("using std::size_t;\n"); + P("static auto& std_cerr_stream = std::cerr;\n"); printlist(defs_list); printlist(firstlist); @@ -164,14 +144,16 @@ void c_out() { Fflush(fcout); /* generation of initmodel interface */ - P("\nstatic void nrn_init(NrnThread* _nt, Memb_list* _ml, int _type){\n"); - P("Node *_nd; double _v; int* _ni; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); + P("\nstatic void nrn_init(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type){\n"); + P("Node *_nd; double _v; int* _ni; int _cntml;\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto* const _vec_v = _nt->node_voltage_storage();\n"); + P("_ml = &_lmr;\n"); // update global _ml + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); // use global _iml + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); if (debugging_ && net_receive_) { P(" _tsav = -1e20;\n"); } @@ -205,14 +187,18 @@ void c_out() { well as make sure all currents accumulated properly (currents list) */ if (brkpnt_exists) { - P("\nstatic void nrn_cur(NrnThread* _nt, Memb_list* _ml, int _type){\n"); - P("Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); + P("\nstatic void nrn_cur(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type){\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto const _vec_rhs = _nt->node_rhs_storage();\n"); + P("auto const _vec_sav_rhs = _nt->node_sav_rhs_storage();\n"); + P("auto const _vec_v = _nt->node_voltage_storage();\n"); + P("Node *_nd; int* _ni; double _rhs, _v; int _cntml;\n"); + P("_ml = &_lmr;\n"); // update global _ml + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); // global _iml + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); ext_vdef(); if (currents->next != currents) { printlist(get_ion_variables(0)); @@ -223,7 +209,7 @@ void c_out() { fprintf(fcout, "if (cvode_active_) { %s(); }\n", cvode_nrn_cur_solve_->name); } if (currents->next != currents) { - P(" _g = _nrn_current(_v + .001);\n"); + P(" auto const _g_local = _nrn_current(_v + .001);\n"); printlist(begin_dion_stmt()); if (state_discon_list_) { P(" state_discon_flag_ = 1; _rhs = _nrn_current(_v); state_discon_flag_ = 0;\n"); @@ -231,7 +217,7 @@ void c_out() { P(" _rhs = _nrn_current(_v);\n"); } printlist(end_dion_stmt(".001")); - P(" _g = (_g - _rhs)/.001;\n"); + P(" _g = (_g_local - _rhs)/.001;\n"); /* set the ion variable values */ printlist(set_ion_variables(0)); if (point_process) { @@ -239,106 +225,68 @@ void c_out() { P(" _rhs *= 1.e2/(_nd_area);\n"); } if (electrode_current) { -#if CACHEVEC == 0 - P(" NODERHS(_nd) += _rhs;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_RHS(_ni[_iml]) += _rhs;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODERHS(_nd) += _rhs;\n"); + P(" _vec_rhs[_ni[_iml]] += _rhs;\n"); + P(" if (_vec_sav_rhs) {\n"); + P(" _vec_sav_rhs[_ni[_iml]] += _rhs;\n"); P(" }\n"); - P(" if (_nt->_nrn_fast_imem) { _nt->_nrn_fast_imem->_nrn_sav_rhs[_ni[_iml]] += " - "_rhs; }\n"); -#endif P("#if EXTRACELLULAR\n"); - P(" if (_nd->_extnode) {\n"); - P(" *_nd->_extnode->_rhs[0] += _rhs;\n"); + P(" if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n"); + P(" *_extnode->_rhs[0] += _rhs;\n"); P(" }\n"); P("#endif\n"); } else { -#if CACHEVEC == 0 - P(" NODERHS(_nd) -= _rhs;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_RHS(_ni[_iml]) -= _rhs;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODERHS(_nd) -= _rhs;\n"); - P(" }\n"); -#endif + P(" _vec_rhs[_ni[_iml]] -= _rhs;\n"); } } P(" \n}}\n"); /* for the classic breakpoint block, nrn_cur computed the conductance, _g, and now the jacobian calculation merely returns that */ - P("\nstatic void nrn_jacob(NrnThread* _nt, Memb_list* _ml, int _type){\n"); + P("\nstatic void nrn_jacob(_nrn_model_sorted_token const& _sorted_token, NrnThread* " + "_nt, Memb_list* _ml_arg, int _type) {\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto const _vec_d = _nt->node_d_storage();\n"); + P("auto const _vec_sav_d = _nt->node_sav_d_storage();\n"); + P("auto* const _ml = &_lmr;\n"); P("Node *_nd; int* _ni; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml];\n"); if (electrode_current) { - P(" _nd = _ml->_nodelist[_iml];\n"); -#if CACHEVEC == 0 - P(" NODED(_nd) -= _g;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_D(_ni[_iml]) -= _g;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODED(_nd) -= _g;\n"); + P(" _nd = _ml_arg->_nodelist[_iml];\n"); + P(" _vec_d[_ni[_iml]] -= _g;\n"); + P(" if (_vec_sav_d) {\n"); + P(" _vec_sav_d[_ni[_iml]] -= _g;\n"); P(" }\n"); - P(" if (_nt->_nrn_fast_imem) { _nt->_nrn_fast_imem->_nrn_sav_d[_ni[_iml]] -= _g; }\n"); -#endif P("#if EXTRACELLULAR\n"); - P(" if (_nd->_extnode) {\n"); - P(" *_nd->_extnode->_d[0] += _g;\n"); + P(" if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n"); + P(" *_extnode->_d[0] += _g;\n"); P(" }\n"); P("#endif\n"); } else { -#if CACHEVEC == 0 - P(" NODED(_nd) += _g;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_D(_ni[_iml]) += _g;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" NODED(_nd) += _g;\n"); - P(" }\n"); -#endif + P(" _vec_d[_ni[_iml]] += _g;\n"); } P(" \n}}\n"); } /* nrnstate list contains the EQUATION solve statement so this advances states by dt */ - P("\nstatic void nrn_state(NrnThread* _nt, Memb_list* _ml, int _type){\n"); + P("\nstatic void nrn_state(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type){\n"); if (nrnstate || currents->next == currents) { - P("Node *_nd; double _v = 0.0; int* _ni; int _iml, _cntml;\n"); + P("Node *_nd; double _v = 0.0; int* _ni; int _cntml;\n"); if (dtsav_for_nrn_state && nrnstate) { P("double _dtsav = dt;\n" "if (secondorder) { dt *= 0.5; }\n"); } - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto* const _vec_v = _nt->node_voltage_storage();\n"); + P("_ml = &_lmr;\n"); // update global _ml + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); // use the global _iml + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); + P(" _nd = _ml_arg->_nodelist[_iml];\n"); ext_vdef(); P(" v=_v;\n{\n"); printlist(get_ion_variables(1)); @@ -508,9 +456,9 @@ static void funcdec() { narg = s->varnum; if (vectorize) { if (narg) { - Fprintf(fcout, "_threadargsprotocomma_ "); + Fprintf(fcout, "_internalthreadargsprotocomma_ "); } else { - Fprintf(fcout, "_threadargsproto_"); + Fprintf(fcout, "_internalthreadargsproto_"); } } /*loop over argcount and add ,double */ @@ -535,10 +483,13 @@ void c_out_vectorize() { P("#undef PI\n"); P("#define nil 0\n"); P("#define _pval pval\n"); // due to some old models using _pval + P("// clang-format off\n"); P("#include \"md1redef.h\"\n"); - P("#include \"section.h\"\n"); + P("#include \"section_fwd.hpp\"\n"); P("#include \"nrniv_mf.h\"\n"); P("#include \"md2redef.h\"\n"); + P("// clang-format on\n"); + P("#include \"neuron/cache/mechanism_range.hpp\"\n"); printlist(defs_list); printlist(firstlist); P("static int _reset;\n"); @@ -566,7 +517,7 @@ void c_out_vectorize() { /* Initialization function must always be present */ - P("\nstatic void initmodel(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {\n int " + P("\nstatic void initmodel(_internalthreadargsproto_) {\n int " "_i; double _save;"); P("{\n"); initstates(); @@ -575,17 +526,19 @@ void c_out_vectorize() { Fflush(fcout); /* generation of initmodel interface */ - P("\nstatic void nrn_init(NrnThread* _nt, Memb_list* _ml, int _type){\n"); - P("double* _p; Datum* _ppvar; Datum* _thread;\n"); + P("\nstatic void nrn_init(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type){\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto* const _vec_v = _nt->node_voltage_storage();\n"); + P("auto* const _ml = &_lmr;\n"); + P("Datum* _ppvar; Datum* _thread;\n"); P("Node *_nd; double _v; int* _ni; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("_thread = _ml->_thread;\n"); + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("_thread = _ml_arg->_thread;\n"); /*check_tables();*/ P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); check_tables(); if (debugging_ && net_receive_) { P(" _tsav = -1e20;\n"); @@ -597,18 +550,19 @@ void c_out_vectorize() { P(" v = _v;\n"); } printlist(get_ion_variables(1)); - P(" initmodel(_p, _ppvar, _thread, _nt);\n"); + P(" initmodel(_threadargs_);\n"); printlist(set_ion_variables(2)); P("}\n"); P("}\n"); /* standard modl EQUATION without solve computes current */ if (!conductance_) { - P("\nstatic double _nrn_current(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt, " - "double _v){double _current=0.;v=_v;"); + P("\nstatic double _nrn_current(_internalthreadargsprotocomma_ " + "double _v) {\n" + "double _current=0.; v=_v;\n"); if (cvode_nrn_current_solve_) { fprintf(fcout, - "if (cvode_active_) { %s(_p, _ppvar, _thread, _nt); }\n", + "if (cvode_active_) { %s(_threadargs_); }\n", cvode_nrn_current_solve_->name); } P("{"); @@ -632,16 +586,20 @@ void c_out_vectorize() { well as make sure all currents accumulated properly (currents list) */ if (brkpnt_exists) { - P("\nstatic void nrn_cur(NrnThread* _nt, Memb_list* _ml, int _type) {\n"); - P("double* _p; Datum* _ppvar; Datum* _thread;\n"); + P("\nstatic void nrn_cur(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type) {\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto const _vec_rhs = _nt->node_rhs_storage();\n"); + P("auto const _vec_sav_rhs = _nt->node_sav_rhs_storage();\n"); + P("auto const _vec_v = _nt->node_voltage_storage();\n"); + P("auto* const _ml = &_lmr;\n"); + P("Datum* _ppvar; Datum* _thread;\n"); P("Node *_nd; int* _ni; double _rhs, _v; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("_thread = _ml->_thread;\n"); + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("_thread = _ml_arg->_thread;\n"); P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); ext_vdef(); if (currents->next != currents) { printlist(get_ion_variables(0)); @@ -650,7 +608,7 @@ void c_out_vectorize() { } if (cvode_nrn_cur_solve_) { fprintf(fcout, - "if (cvode_active_) { %s(_p, _ppvar, _thread, _nt); }\n", + "if (cvode_active_) { %s(_threadargs_); }\n", cvode_nrn_cur_solve_->name); } if (currents->next != currents) { @@ -660,16 +618,16 @@ void c_out_vectorize() { printlist(set_ion_variables(0)); P(" }\n"); } else { - P(" _g = _nrn_current(_p, _ppvar, _thread, _nt, _v + .001);\n"); + P(" auto const _g_local = _nrn_current(_threadargscomma_ _v + .001);\n"); printlist(begin_dion_stmt()); if (state_discon_list_) { P(" state_discon_flag_ = 1; _rhs = _nrn_current(_v); state_discon_flag_ = " "0;\n"); } else { - P(" _rhs = _nrn_current(_p, _ppvar, _thread, _nt, _v);\n"); + P(" _rhs = _nrn_current(_threadargscomma_ _v);\n"); } printlist(end_dion_stmt(".001")); - P(" _g = (_g - _rhs)/.001;\n"); + P(" _g = (_g_local - _rhs)/.001;\n"); /* set the ion variable values */ printlist(set_ion_variables(0)); } /* end of not conductance */ @@ -678,88 +636,48 @@ void c_out_vectorize() { P(" _rhs *= 1.e2/(_nd_area);\n"); } if (electrode_current) { -#if CACHEVEC == 0 - P(" NODERHS(_nd) += _rhs;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_RHS(_ni[_iml]) += _rhs;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODERHS(_nd) += _rhs;\n"); + P(" _vec_rhs[_ni[_iml]] += _rhs;\n"); + P(" if (_vec_sav_rhs) {\n"); + P(" _vec_sav_rhs[_ni[_iml]] += _rhs;\n"); P(" }\n"); - P(" if (_nt->_nrn_fast_imem) { _nt->_nrn_fast_imem->_nrn_sav_rhs[_ni[_iml]] += " - "_rhs; }\n"); -#endif P("#if EXTRACELLULAR\n"); - P(" if (_nd->_extnode) {\n"); - P(" *_nd->_extnode->_rhs[0] += _rhs;\n"); + P(" if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n"); + P(" *_extnode->_rhs[0] += _rhs;\n"); P(" }\n"); P("#endif\n"); } else { -#if CACHEVEC == 0 - P(" NODERHS(_nd) -= _rhs;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_RHS(_ni[_iml]) -= _rhs;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODERHS(_nd) -= _rhs;\n"); - P(" }\n"); -#endif + P(" _vec_rhs[_ni[_iml]] -= _rhs;\n"); } } P(" \n}\n"); P(" \n}\n"); /* for the classic breakpoint block, nrn_cur computed the conductance, _g, and now the jacobian calculation merely returns that */ - P("\nstatic void nrn_jacob(NrnThread* _nt, Memb_list* _ml, int _type) {\n"); - P("double* _p; Datum* _ppvar; Datum* _thread;\n"); + P("\nstatic void nrn_jacob(_nrn_model_sorted_token const& _sorted_token, NrnThread* " + "_nt, Memb_list* _ml_arg, int _type) {\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto const _vec_d = _nt->node_d_storage();\n"); + P("auto const _vec_sav_d = _nt->node_sav_d_storage();\n"); + P("auto* const _ml = &_lmr;\n"); + P("Datum* _ppvar; Datum* _thread;\n"); P("Node *_nd; int* _ni; int _iml, _cntml;\n"); - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("_thread = _ml->_thread;\n"); + P("_ni = _ml_arg->_nodeindices;\n"); + P("_cntml = _ml_arg->_nodecount;\n"); + P("_thread = _ml_arg->_thread;\n"); P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml];\n"); if (electrode_current) { - P(" _nd = _ml->_nodelist[_iml];\n"); -#if CACHEVEC == 0 - P(" NODED(_nd) -= _g;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_D(_ni[_iml]) -= _g;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" NODED(_nd) -= _g;\n"); + P(" _nd = _ml_arg->_nodelist[_iml];\n"); + P(" _vec_d[_ni[_iml]] -= _g;\n"); + P(" if (_vec_sav_d) {\n"); + P(" _vec_sav_d[_ni[_iml]] -= _g;\n"); P(" }\n"); - P(" if (_nt->_nrn_fast_imem) { _nt->_nrn_fast_imem->_nrn_sav_d[_ni[_iml]] -= _g; }\n"); -#endif P("#if EXTRACELLULAR\n"); - P(" if (_nd->_extnode) {\n"); - P(" *_nd->_extnode->_d[0] += _g;\n"); + P(" if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n"); + P(" *_extnode->_d[0] += _g;\n"); P(" }\n"); P("#endif\n"); } else { -#if CACHEVEC == 0 - P(" NODED(_nd) += _g;\n"); -#else - P("#if CACHEVEC\n"); - P(" if (use_cachevec) {\n"); - P(" VEC_D(_ni[_iml]) += _g;\n"); - P(" }else\n"); - P("#endif\n"); - P(" {\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); - P(" NODED(_nd) += _g;\n"); - P(" }\n"); -#endif + P(" _vec_d[_ni[_iml]] += _g;\n"); } P(" \n}\n"); P(" \n}\n"); @@ -767,22 +685,24 @@ void c_out_vectorize() { /* nrnstate list contains the EQUATION solve statement so this advances states by dt */ - P("\nstatic void nrn_state(NrnThread* _nt, Memb_list* _ml, int _type) {\n"); + P("\nstatic void nrn_state(_nrn_model_sorted_token const& _sorted_token, NrnThread* _nt, " + "Memb_list* _ml_arg, int _type) {\n"); + P("_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n"); + P("auto* const _vec_v = _nt->node_voltage_storage();\n"); + P("auto* const _ml = &_lmr;\n"); if (nrnstate || currents->next == currents) { - P("double* _p; Datum* _ppvar; Datum* _thread;\n"); - P("Node *_nd; double _v = 0.0; int* _ni; int _iml, _cntml;\n"); + P("Datum* _ppvar; Datum* _thread;\n"); + P("Node *_nd; double _v = 0.0; int* _ni;\n"); if (dtsav_for_nrn_state && nrnstate) { P("double _dtsav = dt;\n" "if (secondorder) { dt *= 0.5; }\n"); } - P("#if CACHEVEC\n"); - P(" _ni = _ml->_nodeindices;\n"); - P("#endif\n"); - P("_cntml = _ml->_nodecount;\n"); - P("_thread = _ml->_thread;\n"); - P("for (_iml = 0; _iml < _cntml; ++_iml) {\n"); - P(" _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n"); - P(" _nd = _ml->_nodelist[_iml];\n"); + P("_ni = _ml_arg->_nodeindices;\n"); + P("size_t _cntml = _ml_arg->_nodecount;\n"); + P("_thread = _ml_arg->_thread;\n"); + P("for (size_t _iml = 0; _iml < _cntml; ++_iml) {\n"); + P(" _ppvar = _ml_arg->_pdata[_iml];\n"); + P(" _nd = _ml_arg->_nodelist[_iml];\n"); ext_vdef(); P(" v=_v;\n{\n"); printlist(get_ion_variables(1)); @@ -809,7 +729,6 @@ void c_out_vectorize() { */ /* initlists() is called once to setup slist and dlist pointers */ P("\nstatic void _initlists(){\n"); - P(" double _x; double* _p = &_x;\n"); P(" int _i; static int _first = 1;\n"); P(" if (!_first) return;\n"); printlist(initlist); diff --git a/src/nmodl/nocpout.cpp b/src/nmodl/nocpout.cpp index 3dfc254d0b..67a2d85672 100644 --- a/src/nmodl/nocpout.cpp +++ b/src/nmodl/nocpout.cpp @@ -1,4 +1,5 @@ #include <../../nmodlconf.h> + /* /local/src/master/nrn/src/nmodl/nocpout.c,v 4.1 1997/08/30 20:45:28 hines Exp */ /* @@ -61,8 +62,15 @@ directly by hoc. #include "modl.h" #include "parse1.hpp" + +#include +#include // std::back_inserter #include +#include +#include +#ifdef HAVE_UNISTD_H #include +#endif #define GETWD(buf) getcwd(buf, NRN_BUFSIZE) int vectorize = 1; @@ -118,7 +126,9 @@ static List* rangeparm; static List* rangedep; static List* rangestate; static List* nrnpointers; -static List* uip; /* void _update_ion_pointer(Datum* _ppvar){...} text */ +static List* nmodlrandoms; +static List* nrn_mech_inst_destruct_list; +static int num_random_vars = 0; static char suffix[256]; static const char* rsuffix; /* point process range and functions don't have suffix*/ static char* mechname; @@ -137,7 +147,7 @@ int iondef(int*); void ion_promote(Item*); static int ppvar_cnt; static List* ppvar_semantics_; -static void ppvar_semantics(int, const char*); +static void ppvar_semantics(int, const char* semantics, const char* name, const char* type); static int for_netcons_; /* number of FOR_NETCONS statements */ static Item* net_init_q1_; static Item* net_init_q2_; @@ -147,10 +157,8 @@ static List* ba_list_; List* state_discon_list_; int cvode_not_allowed; static int cvode_emit, cvode_ieq_index; -static int cond_index; static int tqitem_index; static int watch_index; -static int cvode_index; static List* ion_synonym; int debugging_; int net_receive_; @@ -175,6 +183,8 @@ static Item* net_send_delivered_; /* location for if flag is 1 then clear the pvarcount indexes pointers to variables such as ena */ static int varcount, parraycount; +static std::vector> ppvar_data_field_strings; +static std::vector data_field_strings; void nrninit() { currents = newlist(); @@ -187,10 +197,11 @@ void nrninit() { debugging_ = 1; thread_cleanup_list = newlist(); thread_mem_init_list = newlist(); + nmodlrandoms = newlist(); } void parout() { - int i, j, ioncount, pointercount, gind, emit_check_table_thread; + int i, ioncount, pointercount, gind, emit_check_table_thread; Item *q, *q1; Symbol *s, *sion; double d1, d2; @@ -221,6 +232,9 @@ void parout() { } else { Sprintf(suffix, "_%s", mechname); } + + func_needs_setdata(); // Do FUNCTION/PROCEDURE need prior call to setdata. + if (artificial_cell && vectorize && (thread_data_index || toplocal_)) { fprintf(stderr, "Notice: ARTIFICIAL_CELL models that would require thread specific data are not " @@ -278,18 +292,22 @@ void parout() { if (vectorize) { Lappendstr(defs_list, "\n\ -#define _threadargscomma_ _p, _ppvar, _thread, _nt,\n\ -#define _threadargsprotocomma_ double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt,\n\ -#define _threadargs_ _p, _ppvar, _thread, _nt\n\ -#define _threadargsproto_ double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt\n\ +#define _threadargscomma_ _ml, _iml, _ppvar, _thread, _nt,\n\ +#define _threadargsprotocomma_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt,\n\ +#define _internalthreadargsprotocomma_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt,\n\ +#define _threadargs_ _ml, _iml, _ppvar, _thread, _nt\n\ +#define _threadargsproto_ Memb_list* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt\n\ +#define _internalthreadargsproto_ _nrn_mechanism_cache_range* _ml, size_t _iml, Datum* _ppvar, Datum* _thread, NrnThread* _nt\n\ "); } else { Lappendstr(defs_list, "\n\ #define _threadargscomma_ /**/\n\ #define _threadargsprotocomma_ /**/\n\ +#define _internalthreadargsprotocomma_ /**/\n\ #define _threadargs_ /**/\n\ #define _threadargsproto_ /**/\n\ +#define _internalthreadargsproto_ /**/\n\ "); } Lappendstr(defs_list, @@ -300,20 +318,22 @@ void parout() { /*SUPPRESS 765*/\n\ "); Lappendstr(defs_list, "extern double *hoc_getarg(int);\n"); - if (vectorize) { - Sprintf(buf, "/* Thread safe. No static _p or _ppvar. */\n"); - } else { - Sprintf(buf, "static double *_p; static Datum *_ppvar;\n"); - } - Lappendstr(defs_list, buf); nrndeclare(); varcount = parraycount = 0; declare_p(); + // iondef defined _nrn_mechanism_cache_range ioncount = iondef(&pointercount); /* first is _nd_area if point process */ - Lappendstr(defs_list, - "\n#if MAC\n#if !defined(v)\n#define v _mlhv\n#endif\n#if !defined(h)\n#define h " - "_mlhh\n#endif\n#endif\n"); + if (vectorize) { + Sprintf(buf, "/* Thread safe. No static _ml, _iml or _ppvar. */\n"); + } else { + Sprintf(buf, + "static _nrn_mechanism_cache_instance _ml_real{nullptr};\n" + "static _nrn_mechanism_cache_range *_ml{&_ml_real};\n" + "static size_t _iml{0};\n" + "static Datum *_ppvar;\n"); + } + Lappendstr(defs_list, buf); Lappendstr(defs_list, "static int hoc_nrnpointerindex = "); if (pointercount) { q = nrnpointers->next; @@ -325,17 +345,15 @@ void parout() { /*above modified to also count and define pointers*/ if (vectorize) { - Lappendstr(defs_list, "static Datum* _extcall_thread;\n static Prop* _extcall_prop;\n"); + Lappendstr(defs_list, "static _nrn_mechanism_std_vector _extcall_thread;\n"); } -#if 0 - Lappendstr(defs_list, "/* static variables special to NEURON */\n"); - SYMLISTITER { - if (SYM(q)->nrntype & NRNSTATIC) { - Sprintf(buf, "static double %s;\n", SYM(q)->name); - Lappendstr(defs_list, buf); - } - } -#endif + if (!point_process) { + Lappendstr(defs_list, "static Prop* _extcall_prop;\n"); + Lappendstr(defs_list, + "/* _prop_id kind of shadows _extcall_prop to allow validity checking. */\n"); + Lappendstr(defs_list, "static _nrn_non_owning_id_without_container _prop_id{};\n"); + } + Lappendstr(defs_list, "/* external NEURON variables */\n"); SYMLISTITER { s = SYM(q); @@ -371,11 +389,9 @@ void parout() { Lappendstr(defs_list, "static int _mechtype;\n\ extern void _nrn_cacheloop_reg(int, int);\n\ -extern void hoc_register_prop_size(int, int, int);\n\ extern void hoc_register_limits(int, HocParmLimits*);\n\ extern void hoc_register_units(int, HocParmUnits*);\n\ extern void nrn_promote(Prop*, int, int);\n\ -extern Memb_func* memb_func;\n\ "); if (nmodl_text) { @@ -411,10 +427,14 @@ extern Memb_func* memb_func;\n\ /* function to set up _p and _ppvar */ Lappendstr(defs_list, "extern void _nrn_setdata_reg(int, void(*)(Prop*));\n"); Lappendstr(defs_list, "static void _setdata(Prop* _prop) {\n"); - if (vectorize) { + if (!point_process) { Lappendstr(defs_list, "_extcall_prop = _prop;\n"); - } else { - Lappendstr(defs_list, "_p = _prop->param; _ppvar = _prop->dparam;\n"); + Lappendstr(defs_list, "_prop_id = _nrn_get_prop_id(_prop);\n"); + } + if (!vectorize) { + Lappendstr(defs_list, + "neuron::legacy::set_globals_from_prop(_prop, _ml_real, _ml, _iml);\n" + "_ppvar = _nrn_mechanism_access_dparam(_prop);\n"); } Lappendstr(defs_list, "}\n"); @@ -450,6 +470,7 @@ extern Memb_func* memb_func;\n\ Sprintf(buf, "{\"setdata_%s\", _hoc_setdata},\n", mechname); Lappendstr(defs_list, buf); } + SYMLISTITER { s = SYM(q); if ((s->subtype & (FUNCT | PROCED)) && s->name[0] != '_') { @@ -459,6 +480,30 @@ extern Memb_func* memb_func;\n\ } Lappendstr(defs_list, "{0, 0}\n};\n"); + /* Direct Python call wrappers to density mechanism functions. */ + if (!point_process) { + Lappendstr(defs_list, + "\n/* Direct Python call wrappers to density mechanism functions.*/\n"); + SYMLISTITER { + s = SYM(q); + if ((s->subtype & (FUNCT | PROCED)) && s->name[0] != '_') { + Sprintf(buf, "static double _npy_%s(Prop*);\n", s->name, s->name); + Lappendstr(defs_list, buf); + } + } + Lappendstr(defs_list, + "\n" + "static NPyDirectMechFunc npy_direct_func_proc[] = {\n"); + SYMLISTITER { + s = SYM(q); + if ((s->subtype & (FUNCT | PROCED)) && s->name[0] != '_') { + Sprintf(buf, "{\"%s\", _npy_%s},\n", s->name, s->name); + Lappendstr(defs_list, buf); + } + } + Lappendstr(defs_list, "{0, 0}\n};\n"); + } + /* FUNCTION's are now global so callable from other models */ /* change name to namesuffix. This propagates everywhere except to hoc_name*/ @@ -480,9 +525,9 @@ extern Memb_func* memb_func;\n\ Lappendstr(defs_list, buf); if (vectorize && !s->no_threadargs) { if (s->varnum) { - Lappendstr(defs_list, "_threadargsprotocomma_"); + Lappendstr(defs_list, "_internalthreadargsprotocomma_"); } else { - Lappendstr(defs_list, "_threadargsproto_"); + Lappendstr(defs_list, "_internalthreadargsproto_"); } } for (j = 0; j < s->varnum; ++j) { @@ -515,11 +560,11 @@ extern Memb_func* memb_func;\n\ } } Sprintf(buf, - " _thread[%d] = (double*)ecalloc(%d, sizeof(double));\n", + " _thread[%d] = {neuron::container::do_not_search, new double[%d]{}};\n", thread_data_index, cnt); lappendstr(thread_mem_init_list, buf); - Sprintf(buf, " free(_thread[%d].get());\n", thread_data_index); + Sprintf(buf, " delete[] _thread[%d].get();\n", thread_data_index); lappendstr(thread_cleanup_list, buf); cnt = 0; ITERATE(q, toplocal_) { @@ -573,15 +618,18 @@ extern Memb_func* memb_func;\n\ thread_data_index); Lappendstr(defs_list, buf); Sprintf(buf, - " if (_thread1data_inuse) {_thread[_gth] = (double*)ecalloc(%d, " - "sizeof(double));\n }else{\n _thread[_gth] = &_thread1data[0]; " - "_thread1data_inuse = 1;\n }\n", + "if (_thread1data_inuse) {\n" + " _thread[_gth] = {neuron::container::do_not_search, new double[%d]{}};\n" + "} else {\n" + " _thread[_gth] = {neuron::container::do_not_search, _thread1data};\n" + " _thread1data_inuse = 1;\n" + "}\n", gind); lappendstr(thread_mem_init_list, buf); lappendstr(thread_cleanup_list, - " if (_thread[_gth].get() == &_thread1data[0]) {\n " + " if (_thread[_gth].get() == _thread1data) {\n " "_thread1data_inuse = 0;\n " - "}else{\n free(_thread[_gth].get());\n }\n"); + "}else{\n delete[] _thread[_gth].get();\n }\n"); ++thread_data_index; } gind = 0; @@ -687,12 +735,13 @@ extern Memb_func* memb_func;\n\ Lappendstr(defs_list, "{0, 0, 0}\n};\n"); Lappendstr(defs_list, "static double _sav_indep;\n"); if (ba_index_ > 0) { - Lappendstr( - defs_list, - "static void _ba1(Node*_nd, double* _pp, Datum* _ppd, Datum* _thread, NrnThread* _nt)"); + Lappendstr(defs_list, + "static void _ba1(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, " + "Memb_list* _ml, size_t _iml, _nrn_model_sorted_token const&)"); for (i = 2; i <= ba_index_; ++i) { Sprintf(buf, - ", _ba%d(Node*_nd, double* _pp, Datum* _ppd, Datum* _thread, NrnThread* _nt)", + ", _ba%d(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, Memb_list* " + "_ml, size_t _iml, _nrn_model_sorted_token const&)", i); Lappendstr(defs_list, buf); } @@ -704,24 +753,36 @@ extern Memb_func* memb_func;\n\ /*declaration of the range variables names to HOC */ Lappendstr( defs_list, - "static void nrn_alloc(Prop*);\nstatic void nrn_init(NrnThread*, Memb_list*, int);\nstatic void nrn_state(NrnThread*, Memb_list*, int);\n\ -"); + "static void nrn_alloc(Prop*);\n" + "static void nrn_init(_nrn_model_sorted_token const&, NrnThread*, Memb_list*, int);\n" + "static void nrn_state(_nrn_model_sorted_token const&, NrnThread*, Memb_list*, int);\n"); if (brkpnt_exists) { - Lappendstr(defs_list, - "static void nrn_cur(NrnThread*, Memb_list*, int);\nstatic void " - "nrn_jacob(NrnThread*, Memb_list*, int);\n"); + Lappendstr( + defs_list, + "static void nrn_cur(_nrn_model_sorted_token const&, NrnThread*, Memb_list*, int);\n" + "static void nrn_jacob(_nrn_model_sorted_token const&, NrnThread*, " + "Memb_list*, int);\n"); } /* count the number of pointers needed */ - ppvar_cnt = ioncount + diamdec + pointercount + areadec; + num_random_vars = 0; + ITERATE(q, nmodlrandoms) { + num_random_vars++; + } + ppvar_cnt = ioncount + diamdec + pointercount + num_random_vars + areadec; if (net_send_seen_) { tqitem_index = ppvar_cnt; - ppvar_semantics(ppvar_cnt, "netsend"); + ppvar_semantics( + ppvar_cnt, + "netsend", + "_tqitem", + "void*" /* TQItem* really, but that's not defined in translated MOD file code */); ppvar_cnt++; } if (watch_seen_) { watch_index = ppvar_cnt; for (i = 0; i < watch_seen_; ++i) { - ppvar_semantics(i + ppvar_cnt, "watch"); + // TODO: improve type safety by not using void* here + ppvar_semantics(i + ppvar_cnt, "watch", "_watch_array", "void*"); } ppvar_cnt += watch_seen_; Sprintf(buf, "\n#define _watch_array _ppvar + %d", watch_index); @@ -735,7 +796,8 @@ extern Memb_func* memb_func;\n\ if (for_netcons_) { Sprintf(buf, "\n#define _fnc_index %d\n", ppvar_cnt); Lappendstr(defs_list, buf); - ppvar_semantics(ppvar_cnt, "fornetcon"); + // TODO: improve type safety by not using void* here + ppvar_semantics(ppvar_cnt, "fornetcon", "_fnc_index", "void*"); ppvar_cnt += 1; } if (point_process) { @@ -745,7 +807,8 @@ extern Memb_func* memb_func;\n\ } if (watch_seen_) { Sprintf(buf, - " if (_prop) { _nrn_free_watch(_prop->dparam, %d, %d);}\n", + " if (_prop) { _nrn_free_watch(_nrn_mechanism_access_dparam(_prop), " + "%d, %d);}\n", watch_index, watch_seen_); Lappendstr(defs_list, buf); @@ -753,14 +816,15 @@ extern Memb_func* memb_func;\n\ if (for_netcons_) { Sprintf(buf, " if (_prop) { " - "_nrn_free_fornetcon(&(_prop->dparam[_fnc_index].literal_value()));}\n"); + "_nrn_free_fornetcon(&(_nrn_mechanism_access_dparam(_prop)[_fnc_index]" + ".literal_value()));}\n"); Lappendstr(defs_list, buf); } Lappendstr(defs_list, " destroy_point_process(_vptr);\n}\n"); } if (cvode_emit) { cvode_ieq_index = ppvar_cnt; - ppvar_semantics(ppvar_cnt, "cvodeieq"); + ppvar_semantics(ppvar_cnt, "cvodeieq", "_cvode_ieq", "int"); ppvar_cnt++; } cvode_emit_interface(); @@ -846,21 +910,36 @@ static const char *_mechanism[] = {\n\ q = q->next->next->next; } + Item* before_nrn_alloc = lappendstr(defs_list, "\n"); + Lappendstr(defs_list, - "\n\ -extern Prop* need_memb(Symbol*);\n\n\ -static void nrn_alloc(Prop* _prop) {\n\ - Prop *prop_ion;\n\ - double *_p; Datum *_ppvar;\n\ -"); + "\n" + "extern Prop* need_memb(Symbol*);\n" + "static void nrn_alloc(Prop* _prop) {\n" + " Prop *prop_ion{};\n" + " Datum *_ppvar{};\n"); if (point_process) { Lappendstr(defs_list, - " if (nrn_point_prop_) {\n\ - _prop->_alloc_seq = nrn_point_prop_->_alloc_seq;\n\ - _p = nrn_point_prop_->param;\n\ - _ppvar = nrn_point_prop_->dparam;\n }else{\n"); + " if (nrn_point_prop_) {\n" + " _nrn_mechanism_access_alloc_seq(_prop) = " + "_nrn_mechanism_access_alloc_seq(nrn_point_prop_);\n" + " _ppvar = _nrn_mechanism_access_dparam(nrn_point_prop_);\n" + " } else {\n"); + } + // need to fill _prop->dparam before calling _nrn_mechanism_cache_range(Prop*) + if (ppvar_cnt) { + Sprintf(buf, " _ppvar = nrn_prop_datum_alloc(_mechtype, %d, _prop);\n", ppvar_cnt); + Lappendstr(defs_list, buf); + Lappendstr(defs_list, " _nrn_mechanism_access_dparam(_prop) = _ppvar;\n"); } - Sprintf(buf, " _p = nrn_prop_data_alloc(_mechtype, %d, _prop);\n", parraycount); + // seems that even in the old code and with vectorize == false that the global _p, _ppvar were + // shadowed, so don't worry about shadowing the global _ml and _iml here + Sprintf(buf, + " _nrn_mechanism_cache_instance _ml_real{_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n" + " assert(_nrn_mechanism_get_num_vars(_prop) == %d);\n", + parraycount); Lappendstr(defs_list, buf); Lappendstr(defs_list, " /*initialize range parameters*/\n"); ITERATE(q, rangeparm) { @@ -875,34 +954,27 @@ static void nrn_alloc(Prop* _prop) {\n\ if (point_process) { Lappendstr(defs_list, " }\n"); } - Lappendstr(defs_list, "\t_prop->param = _p;\n"); - Sprintf(buf, "\t_prop->param_size = %d;\n", parraycount); + Sprintf(buf, "\t assert(_nrn_mechanism_get_num_vars(_prop) == %d);\n", parraycount); Lappendstr(defs_list, buf); if (ppvar_cnt) { - if (point_process) { - Lappendstr(defs_list, " if (!nrn_point_prop_) {\n"); - } - Sprintf(buf, " _ppvar = nrn_prop_datum_alloc(_mechtype, %d, _prop);\n", ppvar_cnt); - Lappendstr(defs_list, buf); - if (point_process) { - Lappendstr(defs_list, " }\n"); - } - Lappendstr(defs_list, "\t_prop->dparam = _ppvar;\n"); + Lappendstr(defs_list, "\t_nrn_mechanism_access_dparam(_prop) = _ppvar;\n"); Lappendstr(defs_list, "\t/*connect ionic variables to this model*/\n"); } if (diamdec) { Sprintf(buf, "prop_ion = need_memb(_morphology_sym);\n"); Lappendstr(defs_list, buf); - Sprintf(buf, "\t_ppvar[%d] = &prop_ion->param[0]; /* diam */\n", ioncount + pointercount), + Sprintf(buf, + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, 0); /* diam */\n", + ioncount + pointercount), Lappendstr(defs_list, buf); - ppvar_semantics(ioncount + pointercount, "diam"); + ppvar_semantics(ioncount + pointercount, "diam", "diam", "double*"); } if (areadec) { Sprintf(buf, - "\t_ppvar[%d] = &nrn_alloc_node_->_area; /* diam */\n", + "\t_ppvar[%d] = _nrn_mechanism_get_area_handle(nrn_alloc_node_);\n", ioncount + pointercount + diamdec), Lappendstr(defs_list, buf); - ppvar_semantics(ioncount + pointercount + diamdec, "area"); + ppvar_semantics(ioncount + pointercount + diamdec, "area", "area", "double*"); } if (point_process) { @@ -917,7 +989,7 @@ static void nrn_alloc(Prop* _prop) {\n\ Sprintf(buf, "prop_ion = need_memb(_%s_sym);\n", sion->name); Lappendstr(defs_list, buf); if (ldifuslist) { - Sprintf(buf, " _type_i%s = prop_ion->_type;\n", sion->name); + Sprintf(buf, " _type_i%s = _nrn_mechanism_get_type(prop_ion);\n", sion->name); lappendstr(defs_list, buf); } ion_promote(q); @@ -925,7 +997,7 @@ static void nrn_alloc(Prop* _prop) {\n\ ITERATE(q1, LST(q)) { SYM(q1)->nrntype |= NRNIONFLAG; Sprintf(buf, - "\t_ppvar[%d] = &prop_ion->param[%d]; /* %s */\n", + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, %d); /* %s */\n", ioncount++, iontype(SYM(q1)->name, sion->name), SYM(q1)->name); @@ -939,7 +1011,7 @@ static void nrn_alloc(Prop* _prop) {\n\ SYM(q1)->nrntype &= ~NRNIONFLAG; } else { Sprintf(buf, - "\t_ppvar[%d] = &prop_ion->param[%d]; /* %s */\n", + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, %d); /* %s */\n", ioncount++, itype, SYM(q1)->name); @@ -948,7 +1020,8 @@ static void nrn_alloc(Prop* _prop) {\n\ if (itype == IONCUR) { dcurdef = 1; Sprintf(buf, - "\t_ppvar[%d] = &prop_ion->param[%d]; /* _ion_di%sdv */\n", + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, %d); /* " + "_ion_di%sdv */\n", ioncount++, IONDCUR, sion->name); @@ -960,7 +1033,14 @@ static void nrn_alloc(Prop* _prop) {\n\ } if (need_style) { Sprintf(buf, - "\t_ppvar[%d] = &(prop_ion->dparam[0].literal_value()); /* " + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, %d); // erev %s\n", + ioncount++, + IONEREV, + sion->name); + Lappendstr(defs_list, buf); + Sprintf(buf, + "\t_ppvar[%d] = {neuron::container::do_not_search, " + "&(_nrn_mechanism_access_dparam(prop_ion)[0].literal_value())}; /* " "iontype for %s */\n", ioncount++, sion->name); @@ -968,33 +1048,58 @@ static void nrn_alloc(Prop* _prop) {\n\ } q = q->next; if (!dcurdef && ldifuslist) { - Sprintf(buf, - "\t_ppvar[%d] = &prop_ion->param[%d]; /* _ion_di%sdv */\n", - ioncount++, - IONDCUR, - sion->name); + Sprintf( + buf, + "\t_ppvar[%d] = _nrn_mechanism_get_param_handle(prop_ion, %d); /* _ion_di%sdv */\n", + ioncount++, + IONDCUR, + sion->name); Lappendstr(defs_list, buf); } } + + // I've put all the nrn_mech_inst_destruct here with nmodlrandoms allocation. + // Refactor if ever things other than nmodlrandoms need it. + nrn_mech_inst_destruct_list = newlist(); + ITERATE(q, nmodlrandoms) { + Sprintf(buf, "_p_%s = (void*)nrnran123_newstream();\n", SYM(q)->name); + Lappendstr(defs_list, buf); + Sprintf(buf, "nrnran123_deletestream(%s);\n", SYM(q)->name); + Lappendstr(nrn_mech_inst_destruct_list, buf); + } + if (nrn_mech_inst_destruct_list != nrn_mech_inst_destruct_list->next) { + auto& list = nrn_mech_inst_destruct_list; + // registration just means adding to nrn_mech_inst_destruct + Lappendstr(defs_list, "nrn_mech_inst_destruct[_mechtype] = _mech_inst_destruct;\n"); + // boilerplate for _mech_inst_destruct + Linsertstr(list, + "\nstatic void _mech_inst_destruct(Prop* _prop) {\n" + " Datum* _ppvar = _nrn_mechanism_access_dparam(_prop);\n"); + Lappendstr(list, "}\n"); + movelist(list->next, list->prev, procfunc); + // need a forward declaration before nrn_alloc. + insertstr(before_nrn_alloc, "\nstatic void _mech_inst_destruct(Prop* _prop);\n"); + } + if (constructorfunc->next != constructorfunc) { Lappendstr(defs_list, "if (!nrn_point_prop_) {_constructor(_prop);}\n"); if (vectorize) { Lappendstr(procfunc, - "\n\ -static void _constructor(Prop* _prop) {\n\ - double* _p; Datum* _ppvar; Datum* _thread;\n\ - _thread = (Datum*)0;\n\ - _p = _prop->param; _ppvar = _prop->dparam;\n\ -{\n\ -"); + "\n" + "static void _constructor(Prop* _prop) {\n" + " _nrn_mechanism_cache_instance _ml_real{_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n" + " Datum *_ppvar{_nrn_mechanism_access_dparam(_prop)}, *_thread{};\n" + " {\n"); } else { Lappendstr(procfunc, - "\n\ -static void _constructor(Prop* _prop) {\n\ - _p = _prop->param; _ppvar = _prop->dparam;\n\ -{\n\ -"); + "\n" + "static void _constructor(Prop* _prop) {\n" + " neuron::legacy::set_globals_from_prop(_prop, _ml_real, _ml, _iml);\n" + " _ppvar = _nrn_mechanism_access_dparam(_prop);\n" + " {\n"); } movelist(constructorfunc->next, constructorfunc->prev, procfunc); Lappendstr(procfunc, "\n}\n}\n"); @@ -1046,9 +1151,6 @@ static void _constructor(Prop* _prop) {\n\ if (vectorize && thread_cleanup_list->next != thread_cleanup_list) { Lappendstr(defs_list, "static void _thread_cleanup(Datum*);\n"); } - if (uip) { - lappendstr(defs_list, "static void _update_ion_pointer(Datum*);\n"); - } if (use_bbcorepointer) { lappendstr(defs_list, "static void bbcore_write(double*, int*, int*, int*, _threadargsproto_);\n"); @@ -1065,7 +1167,7 @@ static void _constructor(Prop* _prop) {\n\ "\ extern Symbol* hoc_lookup(const char*);\n\ extern void _nrn_thread_reg(int, int, void(*)(Datum*));\n\ -extern void _nrn_thread_table_reg(int, void(*)(double*, Datum*, Datum*, NrnThread*, int));\n\ +void _nrn_thread_table_reg(int, nrn_thread_table_check_t);\n\ extern void hoc_register_tolerance(int, HocStateTolerance*, Symbol***);\n\ extern void _cvode_abstol( Symbol**, double*, int);\n\n\ "); @@ -1115,18 +1217,20 @@ extern void _cvode_abstol( Symbol**, double*, int);\n\n\ Lappendstr(defs_list, buf); } if (vectorize && thread_data_index) { - Sprintf(buf, - " _extcall_thread = (Datum*)ecalloc(%d, sizeof(Datum));\n", - thread_data_index); + Sprintf(buf, " _extcall_thread.resize(%d);\n", thread_data_index); Lappendstr(defs_list, buf); if (thread_mem_init_list->next != thread_mem_init_list) { - Lappendstr(defs_list, " _thread_mem_init(_extcall_thread);\n"); + Lappendstr(defs_list, " _thread_mem_init(_extcall_thread.data());\n"); if (gind) { Lappendstr(defs_list, " _thread1data_inuse = 0;\n"); } } } Lappendstr(defs_list, "_mechtype = nrn_get_mechtype(_mechanism[1]);\n"); + if (!point_process) { + Lappendstr(defs_list, + " hoc_register_npy_direct(_mechtype, npy_direct_func_proc);\n"); + } lappendstr(defs_list, " _nrn_setdata_reg(_mechtype, _setdata);\n"); if (vectorize && thread_mem_init_list->next != thread_mem_init_list) { lappendstr(defs_list, " _nrn_thread_reg(_mechtype, 1, _thread_mem_init);\n"); @@ -1134,9 +1238,6 @@ extern void _cvode_abstol( Symbol**, double*, int);\n\n\ if (vectorize && thread_cleanup_list->next != thread_cleanup_list) { lappendstr(defs_list, " _nrn_thread_reg(_mechtype, 0, _thread_cleanup);\n"); } - if (uip) { - lappendstr(defs_list, " _nrn_thread_reg(_mechtype, 2, _update_ion_pointer);\n"); - } if (emit_check_table_thread) { lappendstr(defs_list, " _nrn_thread_table_reg(_mechtype, _check_table_thread);\n"); } @@ -1148,6 +1249,22 @@ extern void _cvode_abstol( Symbol**, double*, int);\n\n\ lappendstr(defs_list, "#if NMODL_TEXT\n register_nmodl_text_and_filename(_mechtype);\n#endif\n"); } + std::sort(ppvar_data_field_strings.begin(), ppvar_data_field_strings.end()); + std::transform(ppvar_data_field_strings.begin(), + ppvar_data_field_strings.end(), + std::back_inserter(data_field_strings), + [](auto const& pair) { return pair.second; }); + std::string register_data_fields{" _nrn_mechanism_register_data_fields("}; + auto const prefix_length = register_data_fields.size() + + 1 /* defs_list handling adds this */; + register_data_fields.append("_mechtype"); + for (auto const& data_field_str: data_field_strings) { + register_data_fields.append(",\n"); + register_data_fields.append(prefix_length, ' '); + register_data_fields.append(data_field_str); + } + register_data_fields.append(");\n"); + lappendstr(defs_list, register_data_fields.c_str()); Sprintf(buf, " hoc_register_prop_size(_mechtype, %d, %d);\n", parraycount, ppvar_cnt); Lappendstr(defs_list, buf); if (watch_seen_) { @@ -1227,8 +1344,8 @@ located in a section and is not associated with an integrator\n"); insertstr(ITM(q), " \ #if EXTRACELLULAR\n\ -if (_nd->_extnode) {\n\ - v = NODEV(_nd) +_nd->_extnode->_v[0];\n\ +if (auto* const _extnode = _nrn_mechanism_access_extnode(_nd); _extnode) {\n\ + v = NODEV(_nd) + _extnode->_v[0];\n\ }else\n\ #endif\n\ {\n\ @@ -1240,13 +1357,13 @@ if (_nd->_extnode) {\n\ lst = get_ion_variables(0); if (lst->next != lst->prev) { move(lst->next, lst->prev, ITM(q)); - freelist((List**) lst); + freelist(&lst); } q = q->next; lst = set_ion_variables(0); if (lst->next != lst->prev) { move(lst->next, lst->prev, ITM(q)); - freelist((List**) lst); + freelist(&lst); } q = q->next; Sprintf(buf, "\thoc_reg_ba(_mechtype, _ba%d, %s);\n", i, STR(q)); @@ -1254,12 +1371,16 @@ if (_nd->_extnode) {\n\ } if (ldifuslist) { Lappendstr(defs_list, "\thoc_register_ldifus1(_difusfunc);\n"); - Linsertstr(defs_list, "static void _difusfunc(ldifusfunc2_t, NrnThread*);\n"); + // don't use _nrn_model_sorted_token here because this is being inserted at the start of + // defs_list, before _nrn_model_sorted_token is defined + Linsertstr(defs_list, + "static void _difusfunc(ldifusfunc2_t, neuron::model_sorted_token const&, " + "NrnThread&);\n"); } } /* end of not "nothing" */ Lappendstr(defs_list, - "\ - hoc_register_var(hoc_scdoub, hoc_vdoub, hoc_intfunc);\n"); + "\n" + " hoc_register_var(hoc_scdoub, hoc_vdoub, hoc_intfunc);\n"); { char buf1[NRN_BUFSIZE]; char* pf{}; @@ -1287,26 +1408,23 @@ if (_nd->_extnode) {\n\ move(thread_cleanup_list->next, thread_cleanup_list->prev, procfunc); Lappendstr(procfunc, "}\n"); } - if (uip) { - move(uip->next, uip->prev, procfunc); - } if (destructorfunc->next != destructorfunc) { if (vectorize) { Lappendstr(procfunc, - "\n\ -static void _destructor(Prop* _prop) {\n\ - double* _p; Datum* _ppvar; Datum* _thread;\n\ - _thread = (Datum*)0;\n\ - _p = _prop->param; _ppvar = _prop->dparam;\n\ -{\n\ -"); + "\n" + "static void _destructor(Prop* _prop) {\n" + " _nrn_mechanism_cache_instance _ml_real{_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n" + " Datum *_ppvar{_nrn_mechanism_access_dparam(_prop)}, *_thread{};\n" + " {\n"); } else { Lappendstr(procfunc, - "\n\ -static void _destructor(Prop* _prop) {\n\ - _p = _prop->param; _ppvar = _prop->dparam;\n\ -{\n\ -"); + "\n" + "static void _destructor(Prop* _prop) {\n" + " neuron::legacy::set_globals_from_prop(_prop, _ml_real, _ml, _iml);\n" + " _ppvar = _nrn_mechanism_access_dparam(_prop);\n" + " {\n"); } movelist(destructorfunc->next, destructorfunc->prev, procfunc); Lappendstr(procfunc, "\n}\n}\n"); @@ -1342,11 +1460,44 @@ void check_ion_vars_as_constant(char* ion_name, const List* ion_var_list) { } } +static void check_sufficient_ion_read_statements(std::string const& ion_name, + List* read_variables, + List* write_variables) { + auto const have_type = [ion_name, read_variables, write_variables](int type) { + for (auto* const ion_var_list: {read_variables, write_variables}) { + Item* var; + ITERATE(var, ion_var_list) { + const Symbol* var_sym = SYM(var); + if (iontype(var_sym->name, const_cast(ion_name.c_str())) == type) { + return true; + } + } + } + return false; + }; + auto const add_readion = [read_variables](std::string name) { + auto* const sym = install(name.c_str(), NAME); + sym->nrntype |= IONCONC; + sym->nrntype |= IONCONC_IMPLICIT; + lappendsym(read_variables, sym); + }; + bool const have_ionin{have_type(IONIN)}, have_ionout{have_type(IONOUT)}; + if (have_ionin && !have_ionout) { + add_readion(ion_name + "o"); + } else if (have_ionout && !have_ionin) { + add_readion(ion_name + "i"); + } +} // check semantics of read & write variables from USEION statements void check_useion_variables() { const Item* ion_var; ITERATE(ion_var, useion) { + // with SoA data then if we emit any calls for nrn_wrote_conc then we need explicit READ + // statements for all arguments + check_sufficient_ion_read_statements(SYM(ion_var)->name, + LST(ion_var->next), + LST(ion_var->next->next)); // read variables check_ion_vars_as_constant(SYM(ion_var)->name, LST(ion_var->next)); // write variables @@ -1399,12 +1550,18 @@ void ldifusreg() { q = q->next; dfdcur = STR(q); ++n; - Sprintf(buf, - "static void* _difspace%d;\nextern double nrn_nernst_coef(int);\n\ -static double _difcoef%d(int _i, double* _p, Datum* _ppvar, double* _pdvol, double* _pdfcdc, Datum* _thread, NrnThread* _nt) {\n \ - *_pdvol = ", - n, - n); + Sprintf( + buf, + "static void* _difspace%d;\n" + "extern double nrn_nernst_coef(int);\n" + "static double _difcoef%d(int _i, Memb_list* _ml_arg, size_t _iml, Datum* _ppvar, " + "double* _pdvol, double* _pdfcdc, Datum* _thread, NrnThread* _nt, " + "_nrn_model_sorted_token const& _sorted_token) {\n" + " _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _ml_arg->_type()};\n" + " auto* const _ml = &_lmr;\n" + " *_pdvol = ", + n, + n); lappendstr(procfunc, buf); for (q1 = qvexp; q1 != qb2; q1 = q1->next) { lappenditem(procfunc, q1); @@ -1423,9 +1580,11 @@ static double _difcoef%d(int _i, double* _p, Datum* _ppvar, double* _pdvol, doub for (q1 = qdexp; q1 != qb1; q1 = q1->next) { lappenditem(procfunc, q1); } - lappendstr(procfunc, ";\n}\n"); + lappendstr(procfunc, ";\nreturn 0;\n}\n"); } - lappendstr(procfunc, "static void _difusfunc(ldifusfunc2_t _f, NrnThread* _nt) {int _i;\n"); + lappendstr(procfunc, + "static void _difusfunc(ldifusfunc2_t _f, _nrn_model_sorted_token const& " + "sorted_token, NrnThread& _nt) {int _i;\n"); n = 0; ITERATE(q, ldifuslist) { s = SYM(q); @@ -1444,26 +1603,13 @@ static double _difcoef%d(int _i, double* _p, Datum* _ppvar, double* _pdvol, doub ++n; if (s->subtype & ARRAY) { -#if MAC - Sprintf(buf, - " for (_i=0; _i < %d; ++_i) mac_difusfunc(_f, _mechtype, _difcoef%d, " - "&_difspace%d, _i, ", - s->araydim, - n, - n); -#else Sprintf(buf, " for (_i=0; _i < %d; ++_i) (*_f)(_mechtype, _difcoef%d, &_difspace%d, _i, ", s->araydim, n, n); -#endif } else { -#if MAC - Sprintf(buf, " mac_difusfunc(_f,_mechtype, _difcoef%d, &_difspace%d, 0, ", n, n); -#else Sprintf(buf, " (*_f)(_mechtype, _difcoef%d, &_difspace%d, 0, ", n, n); -#endif } lappendstr(procfunc, buf); @@ -1476,14 +1622,13 @@ static double _difcoef%d(int _i, double* _p, Datum* _ppvar, double* _pdvol, doub Sprintf(buf, "%d, %d", s->varnum, d->varnum); } lappendstr(procfunc, buf); - lappendstr(procfunc, ", _nt);\n"); + lappendstr(procfunc, ", sorted_token, _nt);\n"); } lappendstr(procfunc, "}\n"); } int decode_limits(Symbol* sym, double* pg1, double* pg2) { int i; - double d1; if (sym->subtype & PARM) { char* cp; int n; @@ -1507,7 +1652,6 @@ int decode_limits(Symbol* sym, double* pg1, double* pg2) { int decode_tolerance(Symbol* sym, double* pg1) { int i; - double d1; if (sym->subtype & STAT) { char* cp; int n; @@ -1632,11 +1776,20 @@ static void var_count(Symbol* s) { defs_h(s); s->used = varcount++; s->varnum = parraycount; + std::string field{"_nrn_mechanism_field{\""}; + field.append(s->name); + field.append(1, '"'); if (s->subtype & ARRAY) { - parraycount += s->araydim; - } else { - parraycount++; - } + field.append(", "); + field.append(std::to_string(s->araydim)); + } + // **ATTENTION** in AoS NEURON then parraycount was incremented by s->araydim if the variable + // was an array. In SoA NEURON this is not done; the array dimension is communicated separately. + ++parraycount; + field.append("} /* "); + field.append(std::to_string(s->varnum)); + field.append(" */"); + data_field_strings.push_back(std::move(field)); } void defs_h(Symbol* s) { @@ -1644,15 +1797,18 @@ void defs_h(Symbol* s) { if (s->subtype & ARRAY) { Sprintf(buf, - "#define %s (_p + %d)\n#define %s_columnindex %d\n", + "#define %s _ml->template data_array<%d, %d>(_iml)\n" + "#define %s_columnindex %d\n", s->name, parraycount, + s->araydim, s->name, parraycount); q = lappendstr(defs_list, buf); } else { Sprintf(buf, - "#define %s _p[%d]\n#define %s_columnindex %d\n", + "#define %s _ml->template fpfield<%d>(_iml)\n" + "#define %s_columnindex %d\n", s->name, parraycount, s->name, @@ -1719,6 +1875,17 @@ void nrn_list(Item* q1, Item* q2) { } use_bbcorepointer = 1; break; + case RANDOM: + for (q = q1->next; q != q2->next; q = q->next) { + Symbol* s = SYM(q); + if (s->type != NAME || s->subtype || s->nrntype) { + diag(s->name, " cannot be redeclared as RANDOM"); + } + s->nrntype |= NRNNOTP | EXTDEF_RANDOM; + s->type = RANDOMVAR; + } + plist = &nmodlrandoms; + break; } if (plist) { if (!*plist) { @@ -1736,14 +1903,18 @@ void bablk(int ba, int type, Item* q1, Item* q2) { if (!ba_list_) { ba_list_ = newlist(); } - Sprintf( - buf, - "static void _ba%d(Node*_nd, double* _pp, Datum* _ppd, Datum* _thread, NrnThread* _nt) ", - ++ba_index_); + Sprintf(buf, + "static void _ba%d(Node*_nd, Datum* _ppd, Datum* _thread, NrnThread* _nt, Memb_list* " + "_ml_arg, size_t _iml, _nrn_model_sorted_token const& _sorted_token) ", + ++ba_index_); insertstr(q1, buf); q = q1->next; - vectorize_substitute(insertstr(q, ""), "double* _p; Datum* _ppvar;"); - qv = insertstr(q, "_p = _pp; _ppvar = _ppd;\n"); + vectorize_substitute(insertstr(q, ""), "Datum* _ppvar;"); + qv = insertstr(q, + "_nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, " + "_ml_arg->_type()}; auto* const " + "_ml = &_lmr;\n"); + qv = insertstr(q, "_ppvar = _ppd;\n"); movelist(qb, q2, procfunc); ba = (ba == BEFORE) ? 10 : 20; /* BEFORE or AFTER */ @@ -1988,6 +2159,29 @@ void declare_p() { s = ifnew_install("_tsav"); var_count(s); } + linsertstr(defs_list, + "namespace {\n" + "template \n" + "using _nrn_mechanism_std_vector = std::vector;\n" + "using _nrn_model_sorted_token = neuron::model_sorted_token;\n" + "using _nrn_mechanism_cache_range = " + "neuron::cache::MechanismRange;\n" + "using _nrn_mechanism_cache_instance = " + "neuron::cache::MechanismInstance;\n" + "using _nrn_non_owning_id_without_container = " + "neuron::container::non_owning_identifier_without_container;\n" + "template \n" + "using _nrn_mechanism_field = neuron::mechanism::field;\n" + "template \n" + "void _nrn_mechanism_register_data_fields(Args&&... args) {\n" + " neuron::mechanism::register_data_fields(std::forward(args)...);\n" + "}\n" + "}\n") + ->itemtype = VERBATIM; + Sprintf(buf, "static constexpr auto number_of_floating_point_variables = %d;\n", parraycount); + linsertstr(defs_list, buf)->itemtype = VERBATIM; } List* set_ion_variables(int block) @@ -2042,20 +2236,18 @@ List* set_ion_variables(int block) another variable pointing to the ionstyle */ if (block == 2 && qconc) { - int ic = iontype(SYM(qconc)->name, in); - if (ic == IONIN) { - ic = 1; - } else if (ic == IONOUT) { - ic = 2; - } else { - assert(0); - } - /* first arg is just for the charge, second is pointer to erev, third ard is the style*/ + int const ic = iontype(SYM(qconc)->name, in); + assert(ic == IONIN || ic == IONOUT); + // first arg is just for the charge, last arg is the style. the old + // code with a single double* as a 2nd parameter was problematic as + // it implicitly assumed AoS format; now we require that explicit + // names are defined for erev and the internal/external concentrations Sprintf(buf, - " nrn_wrote_conc(_%s_sym, (&(_ion_%s)) - %d, _style_%s);\n", + " nrn_wrote_conc(_%s_sym, _ion_%s_erev, _ion_%si, _ion_%so, _style_%s);\n", + in, + in, + in, in, - SYM(qconc)->name, - ic, in); Lappendstr(l, buf); } @@ -2075,6 +2267,9 @@ List* get_ion_variables(int block) ITERATE(q, useion) { q = q->next; ITERATE(q1, LST(q)) { + if (SYM(q1)->nrntype & IONCONC_IMPLICIT) { + continue; + } if (block == 2 && (SYM(q1)->nrntype & IONCONC) && (SYM(q1)->subtype & STAT)) { continue; } @@ -2088,6 +2283,9 @@ List* get_ion_variables(int block) } q = q->next; ITERATE(q1, LST(q)) { + if (SYM(q1)->nrntype & IONCONC_IMPLICIT) { + continue; + } if (block == 2 && (SYM(q1)->nrntype & IONCONC) && (SYM(q1)->subtype & STAT)) { continue; } @@ -2117,38 +2315,36 @@ int iondef(int* p_pointercount) { ioncount = 0; if (point_process) { ioncount = 2; - q = lappendstr(defs_list, "#define _nd_area *_ppvar[0].get()\n"); + q = lappendstr(defs_list, "#define _nd_area *_ml->dptr_field<0>(_iml)\n"); q->itemtype = VERBATIM; - ppvar_semantics(0, "area"); - ppvar_semantics(1, "pntproc"); + ppvar_semantics(0, "area", "_nd_area", "double*"); + ppvar_semantics(1, "pntproc", "_pntproc" /* I made this up*/, "Point_process*"); } ITERATE(q, useion) { int dcurdef = 0; - if (!uip) { - uip = newlist(); - lappendstr(uip, "extern void nrn_update_ion_pointer(Symbol*, Datum*, int, int);\n"); - lappendstr(uip, "static void _update_ion_pointer(Datum* _ppvar) {\n"); - } need_style = 0; sion = SYM(q); Sprintf(ionname, "%s_ion", sion->name); q = q->next; ITERATE(q1, LST(q)) { SYM(q1)->nrntype |= NRNIONFLAG; - Sprintf(buf, - "#define _ion_%s *(_ppvar[%d].get())\n", - SYM(q1)->name, - ioncount); + std::string name{"_ion_"}; + name.append(SYM(q1)->name); + Sprintf( + buf, + "#define %s *(_ml->dptr_field<%d>(_iml))\n" + "#define _p%s static_cast>(_ppvar[%d])\n", + name.c_str(), + ioncount, + name.c_str(), + ioncount); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; - Sprintf(buf, - " nrn_update_ion_pointer(_%s_sym, _ppvar, %d, %d);\n", - sion->name, - ioncount, - iontype(SYM(q1)->name, sion->name)); - lappendstr(uip, buf); SYM(q1)->ioncount_ = ioncount; - ppvar_semantics(ioncount, ionname); + ppvar_semantics(ioncount, + ionname, + ("_ion_" + std::string{SYM(q1)->name}).c_str(), + "double*"); ioncount++; } q = q->next; @@ -2156,37 +2352,32 @@ int iondef(int* p_pointercount) { if (SYM(q1)->nrntype & NRNIONFLAG) { SYM(q1)->nrntype &= ~NRNIONFLAG; } else { + std::string name{"_ion_"}; + name.append(SYM(q1)->name); Sprintf(buf, - "#define _ion_%s *_ppvar[%d].get()\n", - SYM(q1)->name, + "#define %s *(_ml->dptr_field<%d>(_iml))\n" + "#define _p%s " + "static_cast>(_ppvar[%d])\n", + name.c_str(), + ioncount, + name.c_str(), ioncount); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; - Sprintf(buf, - " nrn_update_ion_pointer(_%s_sym, _ppvar, %d, %d);\n", - sion->name, - ioncount, - iontype(SYM(q1)->name, sion->name)); - lappendstr(uip, buf); SYM(q1)->ioncount_ = ioncount; - ppvar_semantics(ioncount, ionname); + ppvar_semantics(ioncount, ionname, name.c_str(), "double*"); ioncount++; } it = iontype(SYM(q1)->name, sion->name); if (it == IONCUR) { dcurdef = 1; - Sprintf(buf, - "#define _ion_di%sdv\t*_ppvar[%d].get()\n", - sion->name, - ioncount); + std::string name{"_ion_di"}; + name.append(sion->name); + name.append("dv"); + Sprintf(buf, "#define %s *(_ml->dptr_field<%d>(_iml))\n", name.c_str(), ioncount); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; - Sprintf(buf, - " nrn_update_ion_pointer(_%s_sym, _ppvar, %d, 4);\n", - sion->name, - ioncount); - lappendstr(uip, buf); - ppvar_semantics(ioncount, ionname); + ppvar_semantics(ioncount, ionname, name.c_str(), "double*"); ioncount++; } if (it == IONIN || it == IONOUT) { /* would have wrote_ion_conc */ @@ -2194,24 +2385,36 @@ int iondef(int* p_pointercount) { } } if (need_style) { - Sprintf(buf, "#define _style_%s\t*_ppvar[%d].get()\n", sion->name, ioncount); + // Need to be able to explicitly reference this when calling + // nrn_wrote_conc, the old code navigated to this value via pointer + // arithmetic that is not valid now the mechanism data are stored in + // SOA format + std::string name{"_ion_"}; + name.append(sion->name); + name.append("_erev"); + Sprintf(buf, "#define %s *_ml->dptr_field<%d>(_iml)\n", name.c_str(), ioncount); + q2 = lappendstr(defs_list, buf); + q2->itemtype = VERBATIM; + ppvar_semantics(ioncount, ionname, name.c_str(), "double*"); + ioncount++; + std::string stylename{"_style_"}; + stylename.append(sion->name); + Sprintf(buf, "#define %s\t*_ppvar[%d].get()\n", stylename.c_str(), ioncount); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; Sprintf(buf, "#%s", ionname); - ppvar_semantics(ioncount, buf); + ppvar_semantics(ioncount, buf, stylename.c_str(), "int*"); ioncount++; } q = q->next; if (!dcurdef && ldifuslist) { - Sprintf(buf, "#define _ion_di%sdv\t*_ppvar[%d].get()\n", sion->name, ioncount); + std::string name{"_ion_di"}; + name.append(sion->name); + name.append("dv"); + Sprintf(buf, "#define %s *_ml->dptr_field<%d>(_iml)\n", name.c_str(), ioncount); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; - Sprintf(buf, - " nrn_update_ion_pointer(_%s_sym, _ppvar, %d, 4);\n", - sion->name, - ioncount); - lappendstr(uip, buf); - ppvar_semantics(ioncount, ionname); + ppvar_semantics(ioncount, ionname, name.c_str(), "double*"); ioncount++; } } @@ -2233,15 +2436,45 @@ int iondef(int* p_pointercount) { q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; if (sion->nrntype & NRNPOINTER) { - ppvar_semantics(ioncount + *p_pointercount, "pointer"); + ppvar_semantics(ioncount + *p_pointercount, "pointer", sion->name, "double*"); } else { - ppvar_semantics(ioncount + *p_pointercount, "bbcorepointer"); + ppvar_semantics(ioncount + *p_pointercount, "bbcorepointer", sion->name, "double*"); } (*p_pointercount)++; } + // print all RANDOM variables + num_random_vars = 0; + ITERATE(q, nmodlrandoms) { + num_random_vars++; + } + if (num_random_vars) { + Sprintf(buf, "\n //RANDOM variables \n"); + lappendstr(defs_list, buf); + + int index = 0; + ITERATE(q, nmodlrandoms) { + Symbol* s = SYM(q); + Sprintf(buf, + "#define %s (nrnran123_State*)_ppvar[%d].get()\n", + s->name, + ioncount + *p_pointercount + index); + lappendstr(defs_list, buf); + Sprintf(buf, + "#define _p_%s _ppvar[%d].literal_value()\n", + s->name, + ioncount + *p_pointercount + index); + lappendstr(defs_list, buf); + ppvar_semantics(ioncount + *p_pointercount + index, "random", s->name, "void*"); + index++; + } + lappendstr(defs_list, "\n"); + } + if (diamdec) { /* must be last */ - Sprintf(buf, "#define diam *_ppvar[%d].get()\n", ioncount + *p_pointercount); + Sprintf(buf, + "#define diam *_ppvar[%d].get()\n", + ioncount + *p_pointercount + num_random_vars); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; } /* notice that ioncount is not incremented */ @@ -2249,23 +2482,35 @@ int iondef(int* p_pointercount) { procedures must be redone */ Sprintf(buf, "#define area *_ppvar[%d].get()\n", - ioncount + *p_pointercount + diamdec); + ioncount + *p_pointercount + num_random_vars + diamdec); q2 = lappendstr(defs_list, buf); q2->itemtype = VERBATIM; } /* notice that ioncount is not incremented */ - if (uip) { - lappendstr(uip, "}\n"); - } + Sprintf(buf, + "static constexpr auto number_of_datum_variables = %d;\n", + ioncount + *p_pointercount + num_random_vars + diamdec + areadec); + linsertstr(defs_list, buf)->itemtype = VERBATIM; return ioncount; } -void ppvar_semantics(int i, const char* name) { +void ppvar_semantics(int i, const char* semantics, const char* name, const char* type) { Item* q; if (!ppvar_semantics_) { ppvar_semantics_ = newlist(); } - q = Lappendstr(ppvar_semantics_, const_cast(name)); // TODO - ugly but ok for now + q = Lappendstr(ppvar_semantics_, const_cast(semantics)); // TODO - ugly but ok for now q->itemtype = (short) i; + std::string field{"_nrn_mechanism_field<"}; + field.append(type); + field.append(">{\""); + field.append(name); + field.append("\", \""); + field.append(semantics); + field.append("\"} /* "); + field.append(std::to_string(i)); + field.append(" */"); + // track the index because ppvar_semantics(...) is not necessarily called in order + ppvar_data_field_strings.emplace_back(i, std::move(field)); } List* begin_dion_stmt() { @@ -2451,7 +2696,7 @@ static void cvode_conc_map() { if (SYM(q1)->nrntype & IONCONC) { if ((SYM(q1)->subtype & STAT)) { sindex = slist_search(cvode_num_, SYM(q1)); - Sprintf(buf, "\t_pv[%d] = &(_ion_%s);\n", sindex, SYM(q1)->name); + Sprintf(buf, "\t_pv[%d] = _p_ion_%s;\n", sindex, SYM(q1)->name); lappendstr(procfunc, buf); } else { /* not a STATE but WRITE it*/ /*its got to have an assignment in a SOLVE block and that assignment @@ -2502,17 +2747,21 @@ printf("|%s||%s||%s|\n",STR(q3), s, buf); } void out_nt_ml_frag(List* p) { - vectorize_substitute(lappendstr(p, " Datum* _thread;\n"), - " double* _p; Datum* _ppvar; Datum* _thread;\n"); + vectorize_substitute(lappendstr(p, ""), " Datum* _ppvar;\n"); + vectorize_substitute(lappendstr(p, ""), " size_t _iml;"); + vectorize_substitute(lappendstr(p, ""), " _nrn_mechanism_cache_range* _ml;"); Lappendstr(p, - " Node* _nd; double _v; int _iml, _cntml;\n\ - _cntml = _ml->_nodecount;\n\ - _thread = _ml->_thread;\n\ - for (_iml = 0; _iml < _cntml; ++_iml) {\n\ - _p = _ml->_data[_iml]; _ppvar = _ml->_pdata[_iml];\n\ - _nd = _ml->_nodelist[_iml];\n\ - v = NODEV(_nd);\n\ -"); + " Node* _nd{};\n" + " double _v{};\n" + " int _cntml;\n" + " _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml_arg, _type};\n" + " _ml = &_lmr;\n" + " _cntml = _ml_arg->_nodecount;\n" + " Datum *_thread{_ml_arg->_thread};\n" + " for (_iml = 0; _iml < _cntml; ++_iml) {\n" + " _ppvar = _ml_arg->_pdata[_iml];\n" + " _nd = _ml_arg->_nodelist[_iml];\n" + " v = NODEV(_nd);\n"); } void cvode_emit_interface() { @@ -2531,9 +2780,9 @@ static int _ode_count(int _type){ hoc_execerror(\"%s\", \"cannot be used with CV Lappendstr(defs_list, "\n\ static int _ode_count(int);\n\ -static void _ode_map(int, double**, double**, double*, Datum*, double*, int);\n\ -static void _ode_spec(NrnThread*, Memb_list*, int);\n\ -static void _ode_matsol(NrnThread*, Memb_list*, int);\n\ +static void _ode_map(Prop*, int, neuron::container::data_handle*, neuron::container::data_handle*, double*, int);\n\ +static void _ode_spec(_nrn_model_sorted_token const&, NrnThread*, Memb_list*, int);\n\ +static void _ode_matsol(_nrn_model_sorted_token const&, NrnThread*, Memb_list*, int);\n\ "); Sprintf(buf, "\n\ @@ -2547,14 +2796,15 @@ static int _ode_count(int _type){ return %d;}\n", cvode_proced_emit(); } else { Lappendstr(procfunc, - "\nstatic void _ode_spec(NrnThread* _nt, Memb_list* _ml, int _type) {\n"); + "\nstatic void _ode_spec(_nrn_model_sorted_token const& _sorted_token, " + "NrnThread* _nt, Memb_list* _ml_arg, int _type) {\n"); out_nt_ml_frag(procfunc); lst = get_ion_variables(1); if (lst->next->itemtype) movelist(lst->next, lst->prev, procfunc); Sprintf(buf, " _ode_spec%d", cvode_num_); Lappendstr(procfunc, buf); - vectorize_substitute(lappendstr(procfunc, "();\n"), "(_p, _ppvar, _thread, _nt);\n"); + vectorize_substitute(lappendstr(procfunc, "();\n"), "(_threadargs_);\n"); lst = set_ion_variables(1); if (lst->next->itemtype) movelist(lst->next, lst->prev, procfunc); @@ -2562,18 +2812,16 @@ static int _ode_count(int _type){ return %d;}\n", Lappendstr(procfunc, "\n\ -static void _ode_map(int _ieq, double** _pv, double** _pvdot, double* _pp, Datum* _ppd, double* _atol, int _type) {"); - vectorize_substitute(lappendstr(procfunc, "\n"), - "\n\ - double* _p; Datum* _ppvar;\n"); +static void _ode_map(Prop* _prop, int _ieq, neuron::container::data_handle* _pv, neuron::container::data_handle* _pvdot, double* _atol, int _type) {"); + vectorize_substitute(lappendstr(procfunc, "\n"), "\n Datum* _ppvar;\n"); Sprintf(buf, - "\ - int _i; _p = _pp; _ppvar = _ppd;\n\ - _cvode_ieq = _ieq;\n\ - for (_i=0; _i < %d; ++_i) {\n\ - _pv[_i] = _pp + _slist%d[_i]; _pvdot[_i] = _pp + _dlist%d[_i];\n\ - _cvode_abstol(_atollist, _atol, _i);\n\ - }\n", + " _ppvar = _nrn_mechanism_access_dparam(_prop);\n" + " _cvode_ieq = _ieq;\n" + " for (int _i=0; _i < %d; ++_i) {\n" + " _pv[_i] = _nrn_mechanism_get_param_handle(_prop, _slist%d[_i]);\n" + " _pvdot[_i] = _nrn_mechanism_get_param_handle(_prop, _dlist%d[_i]);\n" + " _cvode_abstol(_atollist, _atol, _i);\n" + " }\n", cvode_neq_, cvode_num_, cvode_num_); @@ -2583,40 +2831,46 @@ static void _ode_map(int _ieq, double** _pv, double** _pvdot, double* _pp, Datum cvode_conc_map(); Lappendstr(procfunc, "}\n"); if (ion_synonym) { - Lappendstr(defs_list, "static void _ode_synonym(int, double**, Datum**);\n"); + Lappendstr(defs_list, + "static void _ode_synonym(_nrn_model_sorted_token const&, " + "NrnThread&, Memb_list&, int);\n"); Lappendstr(procfunc, - "\ -static void _ode_synonym(int _cnt, double** _pp, Datum** _ppd) {"); - vectorize_substitute(lappendstr(procfunc, "\n"), - "\n\ - double* _p; Datum* _ppvar;\n"); + "static void _ode_synonym(_nrn_model_sorted_token const& " + "_sorted_token, NrnThread& _nt, Memb_list& _ml_arg, int _type) {\n"); Lappendstr(procfunc, - "\ - int _i; \n\ - for (_i=0; _i < _cnt; ++_i) {_p = _pp[_i]; _ppvar = _ppd[_i];\n"); + "_nrn_mechanism_cache_range _lmr{_sorted_token, _nt, _ml_arg, _type};\n" + "auto* const _ml = &_lmr;\n" + "auto const _cnt = _ml_arg._nodecount;\n" + "for (int _iml = 0; _iml < _cnt; ++_iml) {\n" + " Datum* _ppvar = _ml_arg._pdata[_iml];\n"); movelist(ion_synonym->next, ion_synonym->prev, procfunc); - Lappendstr(procfunc, "}}\n"); + Lappendstr(procfunc, " }\n}\n"); } - Sprintf(buf, "static void _ode_matsol_instance%d(_threadargsproto_);\n", cvode_num_); + Sprintf(buf, + "static void _ode_matsol_instance%d(_internalthreadargsproto_);\n", + cvode_num_); Lappendstr(defs_list, buf); - Sprintf(buf, "\nstatic void _ode_matsol_instance%d(_threadargsproto_) {\n", cvode_num_); + Sprintf(buf, + "\nstatic void _ode_matsol_instance%d(_internalthreadargsproto_) {\n", + cvode_num_); Lappendstr(procfunc, buf); if (cvode_fun_->subtype == KINF) { int i = cvode_num_; - Sprintf( - buf, - "_cvode_sparse(&_cvsparseobj%d, %d, _dlist%d, _p, _ode_matsol%d, &_coef%d);\n", - i, - cvode_neq_, - i, - i, - i); + Sprintf(buf, + "_cvode_sparse(&_cvsparseobj%d, %d, _dlist%d, " + "neuron::scopmath::row_view{_ml, _iml}, " + "_ode_matsol%d, &_coef%d);\n", + i, + cvode_neq_, + i, + i, + i); Lappendstr(procfunc, buf); Sprintf(buf, "_cvode_sparse_thread(&(_thread[_cvspth%d].literal_value()), %d, " - "_dlist%d, _p, " - "_ode_matsol%d, _p, _ppvar, _thread, _nt);\n", + "_dlist%d, neuron::scopmath::row_view{_ml, _iml}, _ode_matsol%d, " + "_threadargs_);\n", i, cvode_neq_, i, @@ -2625,12 +2879,12 @@ static void _ode_synonym(int _cnt, double** _pp, Datum** _ppd) {"); } else { Sprintf(buf, "_ode_matsol%d", cvode_num_); Lappendstr(procfunc, buf); - vectorize_substitute(lappendstr(procfunc, "();\n"), - "(_p, _ppvar, _thread, _nt);\n"); + vectorize_substitute(lappendstr(procfunc, "();\n"), "(_threadargs_);\n"); } Lappendstr(procfunc, "}\n"); Lappendstr(procfunc, - "\nstatic void _ode_matsol(NrnThread* _nt, Memb_list* _ml, int _type) {\n"); + "\nstatic void _ode_matsol(_nrn_model_sorted_token const& _sorted_token, " + "NrnThread* _nt, Memb_list* _ml_arg, int _type) {\n"); out_nt_ml_frag(procfunc); lst = get_ion_variables(1); if (lst->next->itemtype) @@ -2688,8 +2942,8 @@ void cvode_interface(Symbol* fun, int num, int neq) { } Sprintf(buf, "\n\ -static int _ode_spec%d(_threadargsproto_);\n\ -/*static int _ode_matsol%d(_threadargsproto_);*/\n\ +static int _ode_spec%d(_internalthreadargsproto_);\n\ +/*static int _ode_matsol%d(_internalthreadargsproto_);*/\n\ ", num, num); @@ -2710,19 +2964,15 @@ void cvode_rw_cur(char (&b)[NRN_BUFSIZE]) { since it may compute some aspect of the current */ Item *q, *q1; int type; - Symbol* sion; b[0] = '\0'; ITERATE(q, useion) { - sion = SYM(q); q = q->next; ITERATE(q1, LST(q)) { type = SYM(q1)->nrntype; if ((type & NRNCURIN) && (type & NRNCUROUT)) { if (!cvode_not_allowed && cvode_emit) { if (vectorize) { - Sprintf(b, - "if (_nt->_vcv) { _ode_spec%d(_p, _ppvar, _thread, _nt); }\n", - cvode_num_); + Sprintf(b, "if (_nt->_vcv) { _ode_spec%d(_threadargs_); }\n", cvode_num_); } else { Sprintf(b, "if (_nt->_vcv) { _ode_spec%d(); }\n", cvode_num_); } @@ -2738,7 +2988,7 @@ void cvode_rw_cur(char (&b)[NRN_BUFSIZE]) { void net_receive(Item* qarg, Item* qp1, Item* qp2, Item* qstmt, Item* qend) { Item *q, *q1; Symbol* s; - int i, b; + int i; char snew[256]; if (net_receive_) { diag("Only one NET_RECEIVE block allowed", (char*) 0); @@ -2762,12 +3012,18 @@ void net_receive(Item* qarg, Item* qp1, Item* qp2, Item* qstmt, Item* qend) { } net_send_delivered_ = qstmt; q = insertstr(qstmt, "\n{"); - vectorize_substitute(q, "\n{ double* _p; Datum* _ppvar; Datum* _thread; NrnThread* _nt;\n"); + vectorize_substitute(q, "\n{ Prop* _p; Datum* _ppvar; Datum* _thread; NrnThread* _nt;\n"); if (watch_seen_) { insertstr(qstmt, " int _watch_rm = 0;\n"); } - q = insertstr(qstmt, " _p = _pnt->_prop->param; _ppvar = _pnt->_prop->dparam;\n"); - vectorize_substitute(insertstr(q, ""), " _thread = (Datum*)0; _nt = (NrnThread*)_pnt->_vnt;"); + vectorize_substitute( + insertstr(qstmt, + " neuron::legacy::set_globals_from_prop(_pnt->_prop, _ml_real, _ml, _iml);\n"), + " _nrn_mechanism_cache_instance _ml_real{_pnt->_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n"); + q = insertstr(qstmt, " _ppvar = _nrn_mechanism_access_dparam(_pnt->_prop);\n"); + vectorize_substitute(insertstr(q, ""), " _thread = nullptr; _nt = (NrnThread*)_pnt->_vnt;"); if (debugging_) { if (0) { insertstr(qstmt, " assert(_tsav <= t); _tsav = t;"); @@ -2852,14 +3108,14 @@ void net_receive(Item* qarg, Item* qp1, Item* qp2, Item* qstmt, Item* qend) { void net_init(Item* qinit, Item* qp2) { /* qinit=INITIAL { stmtlist qp2=} */ replacstr(qinit, "\nstatic void _net_init(Point_process* _pnt, double* _args, double _lflag)"); - Sprintf(buf, " _p = _pnt->_prop->param; _ppvar = _pnt->_prop->dparam;\n"); + Sprintf(buf, " _ppvar = _nrn_mechanism_access_dparam(_pnt->_prop);\n"); vectorize_substitute(insertstr(qinit->next->next, buf), - "\ - double* _p = _pnt->_prop->param;\n\ - Datum* _ppvar = _pnt->_prop->dparam;\n\ - Datum* _thread = (Datum*)0;\n\ - NrnThread* _nt = (NrnThread*)_pnt->_vnt;\n\ -"); + " _nrn_mechanism_cache_instance _ml_real{_pnt->_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n" + " Datum* _ppvar = _nrn_mechanism_access_dparam(_pnt->_prop);\n" + " Datum* _thread = (Datum*)0;\n" + " NrnThread* _nt = (NrnThread*)_pnt->_vnt;\n"); if (net_init_q1_) { diag("NET_RECEIVE block can contain only one INITIAL block", (char*) 0); } @@ -2933,7 +3189,6 @@ void chk_global_state() { } void conductance_hint(int blocktype, Item* q1, Item* q2) { - Item* q; if (blocktype != BREAKPOINT) { diag("CONDUCTANCE can only appear in BREAKPOINT block", (char*) 0); } @@ -2984,3 +3239,148 @@ Symbol* breakpoint_current(Symbol* s) { } return s; } + +// Determine if setdata is required to call FUNCTION or PROCEDURE +// setdata is required if RANGE var used. For safety, also VERBATIM. +// Deal with nested calls, via maintaining a list for each func. +// Note that the nest can be recursive and called function may not +// yet be defined til entire text is processed. + +#include +#include + +struct Info { + std::unordered_set func_calls; + bool need_setdata{false}; + bool is_being_looked_at{false}; // avoid recursion loops + Item* q{nullptr}; // To be modified if need_setdata. +}; + +static std::unordered_map funcs; +static Symbol* in_func_; + +void check_range_in_func(Symbol* s) { + if (in_func_) { + // If s is a RANGE variable or nullptr (VERBATIM) + // then mark the current function as needing setdata + // If s is FUNCTION or PROCEDURE, then add to list + Info& i = funcs[in_func_]; + if (!s) { // VERBATIM + i.need_setdata = true; + } else if (s->nrntype & (NRNRANGE | NRNPOINTER)) { + i.need_setdata = true; + } else if (s->usage & FUNCT) { + i.func_calls.insert(s); + } + } +} + +void set_inside_func(Symbol* s) { + in_func_ = s; + if (s) { + assert(funcs.count(s) == 0); + funcs[s] = {}; + } +} + +// Make sure need_setdata is properly marked for all funcs. +// I.e on entry, only ones marked are those that use RANGE or VERBATIM. +// Need to recursively look through func_calls but watch out for loops. +// If there are no RANGE then VERBATIM is ok and set all need_setdata to false. + +static bool check_func(Symbol* s); // recursive + +void func_needs_setdata() { + if (suffix[0] == '\0') { + return; + } + for (auto& f: funcs) { + f.second.is_being_looked_at = false; + } + + // if there are no RANGE then set all need_setdata to false. + bool norange{true}; + Item* q; + int i; + SYMLISTITER { + Symbol* s = SYM(q); + if (s->type == NAME && s->nrntype & (NRNRANGE | NRNPOINTER)) { + norange = false; + break; + } + } + if (norange) { + for (auto& f: funcs) { + f.second.need_setdata = false; + } + } + + for (auto& f: funcs) { + check_func(f.first); + } + for (auto& f: funcs) { // update the hocfunc item if need_setdata + auto& q = f.second.q; + if (q && f.second.need_setdata) { + // error if not valid id + Symbol* s = f.first; + Sprintf(buf, + "\n" + " if(!_prop_id) {\n" + " hoc_execerror(\"" + "No data for %s_%s. Requires prior call to setdata_%s" + " and that the specified mechanism instance still be in existence.\"," + " NULL);\n", + s->name, + mechname, + mechname); + insertstr(q, buf); + if (vectorize) { + insertstr(q, + " }\n" + " Prop* _local_prop = _extcall_prop;\n"); + } else { + // ensure current instance matches _extcall_prop + insertstr(q, + " } else {\n" + " _setdata(_extcall_prop);\n" + " }\n"); + } + + } else if (q) { + if (vectorize) { + // if id not valid then _local_prop must be nullptr + // because of later _ppvar = _local_prop ? ... + insertstr(q, "\n Prop* _local_prop = _prop_id ? _extcall_prop : nullptr;\n"); + } + } + } +} + +static bool check_func(Symbol* s) { // recursive + if (funcs.count(s) == 0) { + return false; + } + Info& i = funcs[s]; + if (i.need_setdata) { + return true; + } + if (i.is_being_looked_at) { + return false; + } + i.is_being_looked_at = true; + for (auto& s1: i.func_calls) { + if (check_func(s1)) { + i.need_setdata = true; + return true; + } + } + return false; +} + +// If the function needs setdata, then q can be changed to +// perform the check on _extcall_prop +// Not called for POINT_PROCESS functions. +void hocfunc_setdata_item(Symbol* s, Item* q) { + auto& i = funcs[s]; + i.q = q; +} diff --git a/src/nmodl/parsact.cpp b/src/nmodl/parsact.cpp index 918d6bdb4f..79e8b96584 100644 --- a/src/nmodl/parsact.cpp +++ b/src/nmodl/parsact.cpp @@ -142,14 +142,13 @@ void depinstall(int type, Item* qs, int makeconst, const char* abstol) { - char buf[NRN_BUFSIZE], *pstr; + char buf[NRN_BUFSIZE]; int c; if (!type && strlen(abstol) > 0) { printf("abstol = |%s|\n", abstol); diag(n->name, "tolerance can be specified only for a STATE"); } - pstr = n->u.str; /* make it work even if recursive */ if (n->u.str == (char*) 0) Lappendsym(syminorder, n); if (type) { @@ -218,19 +217,18 @@ static int func_arg_examine(Item* qpar, Item* qend) { } void vectorize_scan_for_func(Item* q1, Item* q2) { - Item *q, *qq; - int b; + Item* q; return; for (q = q1; q != q2; q = q->next) { if (q->itemtype == SYMBOL) { Symbol* s = SYM(q); - if ((s->usage & FUNCT) && !(s->subtype & (EXTDEF))) { + if ((s->usage & FUNCT) && !(s->subtype & (EXTDEF | EXTDEF_RANDOM))) { if (q->next->itemtype == SYMBOL && strcmp(SYM(q->next)->name, "(") == 0) { int b = func_arg_examine(q->next, q2); if (b == 0) { /* no args */ - vectorize_substitute(q->next, "(_p, _ppvar, _thread, _nt"); + vectorize_substitute(q->next, "(_threadargs_"); } else if (b == 1) { /* real args */ - vectorize_substitute(q->next, "(_p, _ppvar, _thread, _nt,"); + vectorize_substitute(q->next, "(_threadargscomma_"); } /* else no _p.._nt already there */ } } @@ -240,10 +238,10 @@ void vectorize_scan_for_func(Item* q1, Item* q2) { void defarg(Item* q1, Item* q2) /* copy arg list and define as doubles */ { - Item *q3, *q; + Item* q; if (q1->next == q2) { - vectorize_substitute(insertstr(q2, ""), "_threadargsproto_"); + vectorize_substitute(insertstr(q2, ""), "_internalthreadargsproto_"); return; } for (q = q1->next; q != q2; q = q->next) { @@ -251,7 +249,7 @@ void defarg(Item* q1, Item* q2) /* copy arg list and define as doubles */ insertstr(q, "double"); } } - vectorize_substitute(insertstr(q1->next, ""), "_threadargsprotocomma_"); + vectorize_substitute(insertstr(q1->next, ""), "_internalthreadargsprotocomma_"); } void lag_stmt(Item* q1, int blocktype) /* LAG name1 BY name2 */ @@ -373,17 +371,23 @@ int check_tables_threads(List* p) { Item* q; if (check_table_thread_list) { ITERATE(q, check_table_thread_list) { - Sprintf(buf, "\nstatic void %s(double*, Datum*, Datum*, NrnThread*);", STR(q)); + Sprintf(buf, "\nstatic void %s(_internalthreadargsproto_);", STR(q)); lappendstr(p, buf); } lappendstr(p, - "\nstatic void _check_table_thread(double* _p, Datum* _ppvar, Datum* _thread, " - "NrnThread* _nt, int _type) {\n"); + "\n" + "static void _check_table_thread(_threadargsprotocomma_ int _type, " + "_nrn_model_sorted_token const& _sorted_token) {\n" + " _nrn_mechanism_cache_range _lmr{_sorted_token, *_nt, *_ml, _type};\n" + " {\n" + " auto* const _ml = &_lmr;\n"); ITERATE(q, check_table_thread_list) { - Sprintf(buf, " %s(_p, _ppvar, _thread, _nt);\n", STR(q)); + Sprintf(buf, " %s(_threadargs_);\n", STR(q)); lappendstr(p, buf); } - lappendstr(p, "}\n"); + lappendstr(p, + " }\n" + "}\n"); return 1; } return 0; @@ -415,7 +419,7 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { } Sprintf(buf, "_check_%s();\n", fname); q = lappendstr(check_table_statements, buf); - Sprintf(buf, "_check_%s(_p, _ppvar, _thread, _nt);\n", fname); + Sprintf(buf, "_check_%s(_threadargs_);\n", fname); vectorize_substitute(q, buf); /*checking*/ if (type == FUNCTION1) { @@ -448,13 +452,13 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { fsym->subtype |= FUNCT; Sprintf(buf, "static double _n_%s(double);\n", fname); q = linsertstr(procfunc, buf); - Sprintf(buf, "static double _n_%s(_threadargsprotocomma_ double _lv);\n", fname); + Sprintf(buf, "static double _n_%s(_internalthreadargsprotocomma_ double _lv);\n", fname); vectorize_substitute(q, buf); } else { fsym->subtype |= PROCED; Sprintf(buf, "static void _n_%s(double);\n", fname); q = linsertstr(procfunc, buf); - Sprintf(buf, "static void _n_%s(_threadargsprotocomma_ double _lv);\n", fname); + Sprintf(buf, "static void _n_%s(_internalthreadargsprotocomma_ double _lv);\n", fname); vectorize_substitute(q, buf); } fsym->usage |= FUNCT; @@ -474,9 +478,7 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { vectorize_substitute(q, ""); Sprintf(buf, "static void _check_%s() {\n", fname); q = lappendstr(procfunc, buf); - Sprintf(buf, - "static void _check_%s(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt) {\n", - fname); + Sprintf(buf, "static void _check_%s(_internalthreadargsproto_) {\n", fname); vectorize_substitute(q, buf); Lappendstr(procfunc, " static int _maktable=1; int _i, _j, _ix = 0;\n"); Lappendstr(procfunc, " double _xi, _tmax;\n"); @@ -523,13 +525,13 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { s = SYM(q); Sprintf(buf, " _t_%s[_i] = _f_%s(_x);\n", s->name, fname); Lappendstr(procfunc, buf); - Sprintf(buf, " _t_%s[_i] = _f_%s(_p, _ppvar, _thread, _nt, _x);\n", s->name, fname); + Sprintf(buf, " _t_%s[_i] = _f_%s(_threadargscomma_ _x);\n", s->name, fname); vectorize_substitute(procfunc->prev, buf); } } else { Sprintf(buf, " _f_%s(_x);\n", fname); Lappendstr(procfunc, buf); - Sprintf(buf, " _f_%s(_p, _ppvar, _thread, _nt, _x);\n", fname); + Sprintf(buf, " _f_%s(_threadargscomma_ _x);\n", fname); vectorize_substitute(procfunc->prev, buf); ITERATE(q, table) { s = SYM(q); @@ -560,31 +562,25 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { /*declaration*/ if (type == FUNCTION1) { #define GLOBFUNC 1 -#if !GLOBFUNC - Lappendstr(procfunc, "static int"); -#endif Lappendstr(procfunc, "double"); } else { Lappendstr(procfunc, "static int"); } Sprintf(buf, "%s(double %s){", fname, arg->name); Lappendstr(procfunc, buf); - Sprintf(buf, - "%s(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double %s) {", - fname, - arg->name); + Sprintf(buf, "%s(_internalthreadargsprotocomma_ double %s) {", fname, arg->name); vectorize_substitute(procfunc->prev, buf); /* check the table */ Sprintf(buf, "_check_%s();\n", fname); q = lappendstr(procfunc, buf); - Sprintf(buf, "\n#if 0\n_check_%s(_p, _ppvar, _thread, _nt);\n#endif\n", fname); + Sprintf(buf, "\n#if 0\n_check_%s(_threadargs_);\n#endif\n", fname); vectorize_substitute(q, buf); if (type == FUNCTION1) { Lappendstr(procfunc, "return"); } Sprintf(buf, "_n_%s(%s);\n", fname, arg->name); Lappendstr(procfunc, buf); - Sprintf(buf, "_n_%s(_p, _ppvar, _thread, _nt, %s);\n", fname, arg->name); + Sprintf(buf, "_n_%s(_threadargscomma_ %s);\n", fname, arg->name); vectorize_substitute(procfunc->prev, buf); if (type != FUNCTION1) { Lappendstr(procfunc, "return 0;\n"); @@ -599,10 +595,7 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { } Sprintf(buf, "_n_%s(double %s){", fname, arg->name); Lappendstr(procfunc, buf); - Sprintf(buf, - "_n_%s(double* _p, Datum* _ppvar, Datum* _thread, NrnThread* _nt, double %s){", - fname, - arg->name); + Sprintf(buf, "_n_%s(_internalthreadargsprotocomma_ double %s){", fname, arg->name); vectorize_substitute(procfunc->prev, buf); Lappendstr(procfunc, "int _i, _j;\n"); Lappendstr(procfunc, "double _xi, _theta;\n"); @@ -614,7 +607,7 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { } Sprintf(buf, "_f_%s(%s);", fname, arg->name); Lappendstr(procfunc, buf); - Sprintf(buf, "_f_%s(_p, _ppvar, _thread, _nt, %s);", fname, arg->name); + Sprintf(buf, "_f_%s(_threadargscomma_ %s);", fname, arg->name); vectorize_substitute(procfunc->prev, buf); if (type != FUNCTION1) { Lappendstr(procfunc, "return;"); @@ -624,7 +617,7 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { /* table lookup */ Sprintf(buf, "_xi = _mfac_%s * (%s - _tmin_%s);\n", fname, arg->name, fname); Lappendstr(procfunc, buf); - Lappendstr(procfunc, "if (isnan(_xi)) {\n"); + Lappendstr(procfunc, "if (std::isnan(_xi)) {\n"); if (type == FUNCTION1) { Lappendstr(procfunc, " return _xi; }\n"); } else { @@ -738,41 +731,67 @@ void table_massage(List* tablist, Item* qtype, Item* qname, List* arglist) { freelist(&to); } -void hocfunchack(Symbol* n, Item* qpar1, Item* qpar2, int hack) { - extern int point_process; +extern int point_process; + +// Original hocfunchack modified to handle _npy_name definitions. +static void funchack(Symbol* n, bool ishoc, int hack) { Item* q; int i; Item* qp = 0; if (point_process) { Sprintf(buf, "\nstatic double _hoc_%s(void* _vptr) {\n double _r;\n", n->name); - } else { + } else if (ishoc) { Sprintf(buf, "\nstatic void _hoc_%s(void) {\n double _r;\n", n->name); + } else { // _npy_... + Sprintf(buf, + "\nstatic double _npy_%s(Prop* _prop) {\n" + " double _r{0.0};\n", + n->name); } Lappendstr(procfunc, buf); vectorize_substitute(lappendstr(procfunc, ""), - "\ - double* _p; Datum* _ppvar; Datum* _thread; NrnThread* _nt;\n\ -"); + "Datum* _ppvar; Datum* _thread; NrnThread* _nt;\n"); if (point_process) { - vectorize_substitute(lappendstr(procfunc, " _hoc_setdata(_vptr);\n"), - "\ - _p = ((Point_process*)_vptr)->_prop->param;\n\ - _ppvar = ((Point_process*)_vptr)->_prop->dparam;\n\ - _thread = _extcall_thread;\n\ - _nt = (NrnThread*)((Point_process*)_vptr)->_vnt;\n\ -"); - } else { - vectorize_substitute(lappendstr(procfunc, ""), - "\ - if (_extcall_prop) {_p = _extcall_prop->param; _ppvar = _extcall_prop->dparam;}else{ _p = (double*)0; _ppvar = (Datum*)0; }\n\ - _thread = _extcall_thread;\n\ - _nt = nrn_threads;\n\ -"); + Lappendstr(procfunc, + " auto* const _pnt = static_cast(_vptr);\n" + " auto* const _p = _pnt->_prop;\n" + " if (!_p) {\n" + " hoc_execerror(\"POINT_PROCESS data instance not valid\", NULL);\n" + " }\n"); + q = lappendstr(procfunc, " _setdata(_p);\n"); + vectorize_substitute(q, + " _nrn_mechanism_cache_instance _ml_real{_p};\n" + " auto* const _ml = &_ml_real;\n" + " size_t const _iml{};\n" + " _ppvar = _nrn_mechanism_access_dparam(_p);\n" + " _thread = _extcall_thread.data();\n" + " _nt = static_cast(_pnt->_vnt);\n"); + } else if (ishoc) { + hocfunc_setdata_item(n, lappendstr(procfunc, "")); + vectorize_substitute( + lappendstr(procfunc, ""), + "_nrn_mechanism_cache_instance _ml_real{_local_prop};\n" + "auto* const _ml = &_ml_real;\n" + "size_t const _iml{};\n" + "_ppvar = _local_prop ? _nrn_mechanism_access_dparam(_local_prop) : nullptr;\n" + "_thread = _extcall_thread.data();\n" + "_nt = nrn_threads;\n"); + } else { // _npy_... + q = lappendstr(procfunc, + " neuron::legacy::set_globals_from_prop(_prop, _ml_real, _ml, _iml);\n" + " _ppvar = _nrn_mechanism_access_dparam(_prop);\n"); + vectorize_substitute(q, + "_nrn_mechanism_cache_instance _ml_real{_prop};\n" + "auto* const _ml = &_ml_real;\n" + "size_t const _iml{};\n" + "_ppvar = _nrn_mechanism_access_dparam(_prop);\n" + "_thread = _extcall_thread.data();\n" + "_nt = nrn_threads;\n"); } if (n == last_func_using_table) { qp = lappendstr(procfunc, ""); - Sprintf(buf, "\n#if 1\n _check_%s(_p, _ppvar, _thread, _nt);\n#endif\n", n->name); + Sprintf(buf, "\n#if 1\n _check_%s(_threadargs_);\n#endif\n", n->name); vectorize_substitute(qp, buf); } if (n->subtype & FUNCT) { @@ -790,30 +809,51 @@ void hocfunchack(Symbol* n, Item* qpar1, Item* qpar2, int hack) { Lappendstr(procfunc, ","); } } - if (point_process) { + if (point_process || !ishoc) { Lappendstr(procfunc, ");\n return(_r);\n}\n"); - } else + } else if (ishoc) { Lappendstr(procfunc, ");\n hoc_retpushx(_r);\n}\n"); + } if (i) { - vectorize_substitute(qp, "_p, _ppvar, _thread, _nt,"); + vectorize_substitute(qp, "_threadargscomma_"); } else if (!hack) { - vectorize_substitute(qp, "_p, _ppvar, _thread, _nt"); + vectorize_substitute(qp, "_threadargs_"); } } +void hocfunchack(Symbol* n, Item* qpar1, Item* qpar2, int hack) { + funchack(n, true, hack); +} + +static void npyfunc(Symbol* n, int hack) { // supports seg.mech.n(...) + if (point_process) { + return; + } // direct calls from python from hocfunchack + funchack(n, false, hack); // direct calls from python via _npy_.... wrapper. +} + void hocfunc(Symbol* n, Item* qpar1, Item* qpar2) /*interface between modl and hoc for proc and func */ { /* Hack prevents FUNCTION_TABLE bug of 'double table_name()' extra args replacing the double in 'double name(...) */ hocfunchack(n, qpar1, qpar2, 0); + // wrapper for direct call from python + npyfunc(n, 0); // shares most of hocfunchack code (factored out). } /* ARGSUSED */ void vectorize_use_func(Item* qname, Item* qpar1, Item* qexpr, Item* qpar2, int blocktype) { Item* q; - if (SYM(qname)->subtype & EXTDEF) { + if (SYM(qname)->subtype & (EXTDEF | EXTDEF_RANDOM)) { if (strcmp(SYM(qname)->name, "nrn_pointing") == 0) { + // TODO: this relies on undefined behaviour in C++. &*foo is not + // guaranteed to be equivalent to foo if foo is null. See + // https://stackoverflow.com/questions/51691273/is-null-well-defined-in-c, + // https://en.cppreference.com/w/cpp/language/operator_member_access#Built-in_address-of_operator + // also confirms that the special case here in C does not apply to + // C++. All of that said, neither GCC nor Clang even produces a + // warning and it seems to work. Insertstr(qpar1->next, "&"); } else if (strcmp(SYM(qname)->name, "state_discontinuity") == 0) { if (blocktype == NETRECEIVE) { @@ -874,23 +914,16 @@ void vectorize_use_func(Item* qname, Item* qpar1, Item* qexpr, Item* qpar2, int } else { diag("net_move", "only allowed in NET_RECEIVE block"); } + } else if (SYM(qname)->subtype & EXTDEF_RANDOM) { + replacstr(qname, extdef_rand[SYM(qname)->name]); } return; } -#if 1 if (qexpr) { q = insertstr(qpar1->next, "_threadargscomma_"); } else { q = insertstr(qpar1->next, "_threadargs_"); } -#else - q = insertstr(qpar1->next, ""); - if (qexpr) { - vectorize_substitute(q, "_p, _ppvar, _thread, _nt,"); - } else { - vectorize_substitute(q, "_p, _ppvar, _thread, _nt"); - } -#endif } @@ -934,6 +967,7 @@ void function_table(Symbol* s, Item* qpar1, Item* qpar2, Item* qb1, Item* qb2) / Sprintf(buf, "\nstatic void* _ptable_%s = (void*)0;\n", s->name); linsertstr(procfunc, buf); hocfunchack(t, q1, q2, 1); + npyfunc(t, 1); } void watchstmt(Item* par1, Item* dir, Item* par2, Item* flag, int blocktype) { @@ -945,10 +979,17 @@ void watchstmt(Item* par1, Item* dir, Item* par2, Item* flag, int blocktype) { } Sprintf(buf, "\nstatic double _watch%d_cond(Point_process* _pnt) {\n", watch_seen_); lappendstr(procfunc, buf); - vectorize_substitute(lappendstr(procfunc, ""),(char*)"\tdouble* _p; Datum* _ppvar; Datum* _thread; NrnThread* _nt;\n\t_thread= (Datum*)0; _nt = (NrnThread*)_pnt->_vnt;\n"); + vectorize_substitute(lappendstr(procfunc, ""), + " Datum* _ppvar; Datum* _thread{};\n" + " NrnThread* _nt{static_cast(_pnt->_vnt)};\n"); Sprintf(buf, - "\t_p = _pnt->_prop->param; _ppvar = _pnt->_prop->dparam;\n\tv = " - "NODEV(_pnt->node);\n return "); + " auto* const _prop = _pnt->_prop;\n" + " _nrn_mechanism_cache_instance _ml_real{_prop};\n" + " auto* const _ml = &_ml_real;\n" + " size_t _iml{};\n" + " _ppvar = _nrn_mechanism_access_dparam(_prop);\n" + " v = NODEV(_pnt->node);\n" + " return "); lappendstr(procfunc, buf); movelist(par1, par2, procfunc); movelist(dir->next, par2, procfunc); diff --git a/src/nmodl/parse1.ypp b/src/nmodl/parse1.ypp index 342b354049..03c7108c46 100755 --- a/src/nmodl/parse1.ypp +++ b/src/nmodl/parse1.ypp @@ -111,6 +111,7 @@ static int nr_argcnt_, argcnt_; /* for matching number of args in NET_RECEIVE %type neuronblk nrnuse nrnlist valence initstmt bablk optontology %token CONDUCTANCE %type conducthint +%token RANDOM RANDOMVAR /* precedence in expressions--- low to high */ %left OR @@ -186,6 +187,11 @@ Name: NAME SYM($1) = checklocal(SYM($1)); /* it was a bug when this was done to the lookahead token in lex */ } + | RANDOMVAR error { + std::string s{SYM($1)->name}; + s += " RANDOM var can only be used as the first arg of a random_ function"; + myerr(s.c_str()); + } ; declare: parmblk | indepblk | depblk | stateblk | neuronblk | unitblk | constblk @@ -228,7 +234,9 @@ limits: /*nothing*/ } ; name: Name + {check_range_in_func(SYM($1));} | PRIME + {check_range_in_func(SYM($1));} ; number: NUMBER {lastok = $1;} | '-' NUMBER @@ -397,6 +405,7 @@ ostmt: fromstmt | conducthint | VERBATIM {inblock(SYM($1)->name); + check_range_in_func(nullptr); replacstr($1, "\n/*VERBATIM*/\n"); if (!assert_threadsafe && !saw_verbatim_) { fprintf(stderr, "Notice: VERBATIM blocks are not thread safe\n"); @@ -609,6 +618,7 @@ funccall: NAME '(' { if (SYM($1)->subtype & EXTDEF2) { extdef2 = 1;}} exprlist ')' {lastok = $5; SYM($1)->usage |= FUNCT; + check_range_in_func(SYM($1)); if (SYM($1)->subtype & EXTDEF2) { extdef2 = 0;} if (SYM($1)->subtype & EXTDEF3) { add_reset_args($2);} if (SYM($1)->subtype & EXTDEF4) { add_nrnthread_arg($2);} @@ -618,10 +628,21 @@ fprintf(stderr, "Notice: %s is not thread safe\n", SYM($1)->name); vectorize = 0; } } + Item* arg = $2->next; + if (SYM($1)->subtype & EXTDEF_RANDOM) { + if (arg == $2 || arg->itemtype != SYMBOL || SYM(arg)->type != RANDOMVAR) { + diag(SYM($1)->name, " must have RANDOM var as first argument"); + } + }else{ + if (arg != $2 && arg->itemtype == SYMBOL && SYM(arg)->type == RANDOMVAR) { + diag(SYM($1)->name, " cannot have RANDOM var as an argument"); + } + } vectorize_use_func($1,$2,$4,$5,blocktype); } ; exprlist: /*nothing*/{$$ = ITEM0;} + | RANDOMVAR | expr | STRING | exprlist ',' expr @@ -707,7 +728,7 @@ functableblk: FUNCTION_TABLE NAME '(' arglist ')' units } ; funcblk: FUNCTION1 NAME '(' arglist ')' units - {IGNORE(copylocal(SYM($2)));} + {IGNORE(copylocal(SYM($2))); set_inside_func(SYM($2));} stmtlist '}' /* boilerplate added to form double function(){...} Note all arguments have prefix _l */ @@ -727,6 +748,7 @@ funcblk: FUNCTION1 NAME '(' arglist ')' units SYM($2)->subtype |= FUNCT; SYM($2)->usage |= FUNCT; hocfunc(s, $3, $5); + set_inside_func(nullptr); poplocal(); freelist(&$4);} ; arglist: /*nothing*/ {pushlocal(); $$ = LIST0; argcnt_ = 0;} @@ -741,20 +763,21 @@ arglist1: name units ++argcnt_; } ; -procedblk: PROCEDURE NAME '(' arglist ')' units stmtlist '}' +procedblk: PROCEDURE NAME '(' arglist ')' units {set_inside_func(SYM($2));} stmtlist '}' {Symbol *s = SYM($2); s->u.i = 0; /* avoid objectcenter warning if solved */ s->varnum = argcnt_; /* allow proper number of "double" in prototype */ table_massage(table_list, $1, $2, $4); freelist(&table_list); replacstr($1, "\nstatic int "); defarg($3, $5); - Insertstr($8, " return 0;"); - movelist($1, $8, procfunc); + Insertstr($9, " return 0;"); + movelist($1, $9, procfunc); if (SYM($2)->subtype & PROCED) { diag(SYM($2)->name, " declared as PROCEDURE twice"); } SYM($2)->subtype |= PROCED; SYM($2)->usage |= FUNCT; hocfunc(s, $3, $5); + set_inside_func(nullptr); poplocal(); freelist(&$4);} ; netrecblk: NETRECEIVE '(' arglist ')' @@ -924,13 +947,13 @@ factordef: NAME '=' real unit | NAME '=' unit unit { SYM($1)->subtype |= nmodlCONST; - nrnunit_dynamic_str(buf, SYM($1)->name, $3, $4); + nrnunit_str(buf, SYM($1)->name, $3, $4); Lappendstr(firstlist, buf); } | NAME '=' unit '-' GT unit { SYM($1)->subtype |= nmodlCONST; - nrnunit_dynamic_str(buf, SYM($1)->name, $3, $6); + nrnunit_str(buf, SYM($1)->name, $3, $6); Lappendstr(firstlist, buf); } | error {myerr("Unit factor syntax: examples:\n\ @@ -1004,6 +1027,8 @@ nrnstmt: /*nothing*/ { nrn_list($2, $3);} | nrnstmt THREADSAFE { assert_threadsafe = 1; } + | nrnstmt RANDOM nrnlist + { nrn_list($2, $3);} ; nrnuse: USEION NAME READ nrnlist valence optontology {nrn_use($2, $4, ITEM0, $5);} diff --git a/src/nmodl/simultan.cpp b/src/nmodl/simultan.cpp index 68072cd075..a946a334cc 100644 --- a/src/nmodl/simultan.cpp +++ b/src/nmodl/simultan.cpp @@ -14,7 +14,8 @@ void solv_nonlin(Item* qsol, Symbol* fun, Symbol* method, int numeqn, int listnu // added so that method->name != "newton" then those methods may need to be modified as newton // was Sprintf(buf, - "%s<%d>(_slist%d, _p, %s_wrapper_returning_int, _dlist%d);\n", + "%s<%d>(_slist%d, neuron::scopmath::row_view{_ml, _iml}, %s_wrapper_returning_int, " + "_dlist%d);\n", method->name, numeqn, listnum, @@ -27,8 +28,11 @@ void solv_nonlin(Item* qsol, Symbol* fun, Symbol* method, int numeqn, int listnu } void solv_lineq(Item* qsol, Symbol* fun, Symbol* method, int numeqn, int listnum) { + // examples of method->name: simeq Sprintf(buf, - " 0; %s();\n error = %s(%d, _coef%d, _p, _slist%d);\n", + " 0;\n" + " %s();\n" + " error = %s(%d, _coef%d, neuron::scopmath::row_view{_ml, _iml}, _slist%d);\n", fun->name, method->name, numeqn, @@ -95,14 +99,14 @@ int nonlin_common(Item* q4) /* used by massagenonlin() and mixed_eqns() */ int dim = s->araydim; using_array = 1; Sprintf(buf, - "for(_i=0;_i<%d;_i++){\n _slist%d[%d+_i] = %s_columnindex + _i;}\n", + "for(_i=0;_i<%d;_i++){\n _slist%d[%d+_i] = {%s_columnindex, _i};}\n", dim, numlist, counts, s->name); counts += dim; } else { - Sprintf(buf, "_slist%d[%d] = %s_columnindex;\n", numlist, counts, s->name); + Sprintf(buf, "_slist%d[%d] = {%s_columnindex, 0};\n", numlist, counts, s->name); counts++; } Lappendstr(initlist, buf); @@ -148,13 +152,13 @@ int nonlin_common(Item* q4) /* used by massagenonlin() and mixed_eqns() */ } freeqnqueue(); Sprintf(buf, - "static int _slist%d[%d]; static double _dlist%d[%d];\n", + "static neuron::container::field_index _slist%d[%d]; static double _dlist%d[%d];\n", numlist, counts, numlist, counts); q = linsertstr(procfunc, buf); - Sprintf(buf, "static int _slist%d[%d];\n", numlist, counts); + Sprintf(buf, "static neuron::container::field_index _slist%d[%d];\n", numlist, counts); vectorize_substitute(q, buf); return counts; } @@ -186,16 +190,18 @@ Item* mixed_eqns(Item* q2, Item* q3, Item* q4) /* name, '{', '}' */ counts); vectorize_substitute(q, buf); Insertstr(q3, "if (!_recurse) {\n _recurse = 1;\n"); + // olupton 2023-01-19: this code does not appear to be covered by the test suite Sprintf(buf, - "error = newton<%d>(_slist%d, _p, %s, _dlist%d);\n", + "error = newton<%d>(_slist%d, neuron::scopmath::row_view{_ml, _iml}, %s, _dlist%d);\n", counts, numlist, SYM(q2)->name, numlist); qret = insertstr(q3, buf); Sprintf(buf, - "error = nrn_newton_thread(_newtonspace%d, %d,_slist%d, _p, " - "%s, _dlist%d, _p, _ppvar, _thread, _nt);\n", + "error = nrn_newton_thread(_newtonspace%d, %d, _slist%d, " + "neuron::scopmath::row_view{_ml, _iml}, %s, _dlist%d, _ml," + " _iml, _ppvar, _thread, _nt);\n", numlist - 1, counts, numlist, @@ -259,14 +265,14 @@ void lin_state_term(Item* q1, Item* q2) /* term last*/ int dim = statsym->araydim; using_array = 1; Sprintf(buf, - "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = %s_columnindex + _i;}\n", + "for(_i=0;_i<%d;_i++){_slist%d[%d+_i] = {%s_columnindex, _i};}\n", dim, numlist, nstate, statsym->name); nstate += dim; } else { - Sprintf(buf, "_slist%d[%d] = %s_columnindex;\n", numlist, nstate, statsym->name); + Sprintf(buf, "_slist%d[%d] = {%s_columnindex, 0};\n", numlist, nstate, statsym->name); nstate++; } Lappendstr(initlist, buf); @@ -344,7 +350,11 @@ void massage_linblk(Item* q1, Item* q2, Item* q3, Item* q4) /* LINEAR NAME stmtl #endif } linblk->used = nstate; - Sprintf(buf, "static int _slist%d[%d];static double **_coef%d;\n", numlist, nstate, numlist); + Sprintf(buf, + "static neuron::container::field_index _slist%d[%d];static double **_coef%d;\n", + numlist, + nstate, + numlist); Linsertstr(procfunc, buf); Sprintf(buf, "\n#define _RHS%d(arg) _coef%d[arg][%d]\n", numlist, numlist, nstate); Linsertstr(procfunc, buf); diff --git a/src/nmodl/solve.cpp b/src/nmodl/solve.cpp index 6fffb3a161..9051da0f49 100644 --- a/src/nmodl/solve.cpp +++ b/src/nmodl/solve.cpp @@ -94,7 +94,11 @@ void solvequeue(Item* qName, Item* qMethod, int blocktype) /*solve NAME [using M lq = lappendsym(solvq, SYM0); LST(lq) = errstmt; Sprintf(buf, - "if(error){fprintf(stderr,\"%s\\n\"); nrn_complain(_p); abort_run(error);}\n", + "if(error){\n" + " std_cerr_stream << \"%s\\n\";\n" + " std_cerr_stream << _ml << ' ' << _iml << '\\n';\n" + " abort_run(error);\n" + "}\n", current_line()); insertstr(errstmt, buf); } @@ -245,7 +249,7 @@ void solvhandler() { } Sprintf(buf, " %s();\n", fun->name); replacstr(qsol, buf); - Sprintf(buf, "{ %s(_p, _ppvar, _thread, _nt); }\n", fun->name); + Sprintf(buf, "{ %s(_threadargs_); }\n", fun->name); vectorize_substitute(qsol, buf); break; #endif diff --git a/src/nrncvode/bbtqueue.cpp b/src/nrncvode/bbtqueue.cpp index 8fb20e8018..ce31f5c72a 100644 --- a/src/nrncvode/bbtqueue.cpp +++ b/src/nrncvode/bbtqueue.cpp @@ -1,9 +1,9 @@ // balanced binary tree queue implemented by Michael Hines TQItem::TQItem() { - left_ = nil; - right_ = nil; - parent_ = nil; + left_ = nullptr; + right_ = nullptr; + parent_ = nullptr; } TQItem::~TQItem() { @@ -22,8 +22,8 @@ static void deleteitem(TQItem* i) { if (i->right_) { deleteitem(i->right_); } - i->left_ = nil; - i->right_ = nil; + i->left_ = nullptr; + i->right_ = nullptr; tpool_->free(i); } @@ -91,8 +91,8 @@ TQueue::TQueue() { if (!tpool_) { tpool_ = new TQItemPool(1000); } - root_ = nil; - least_ = nil; + root_ = nullptr; + least_ = nullptr; #if COLLECT_TQueue_STATISTICS nmove = ninsert = nrem = nleast = nbal = ncmplxrem = 0; @@ -124,7 +124,7 @@ void TQueue::check(const char* mes) { if (root_) { root_->t_iterate(chk, 0); } - errmess_ = nil; + errmess_ = nullptr; #endif } @@ -170,7 +170,7 @@ void TQueue::new_least() { assert(b->left_ == least_); least_ = b; } else { - least_ = nil; + least_ = nullptr; } } } @@ -315,15 +315,15 @@ void TQueue::remove1(TQItem* i) { (*child)->parent_ = p; } else { root_ = i->right_; - root_->parent_ = nil; + root_->parent_ = nullptr; } } else { // a leaf // printf("removing leaf %g\n", i->t_); if (p) { - *child = nil; + *child = nullptr; } else { - root_ = nil; + root_ = nullptr; } } if (doweight) { @@ -332,9 +332,9 @@ void TQueue::remove1(TQItem* i) { p = p->parent_; } } - i->right_ = nil; - i->left_ = nil; - i->parent_ = nil; + i->right_ = nullptr; + i->left_ = nullptr; + i->parent_ = nullptr; check("end remove1"); } @@ -359,7 +359,7 @@ void TQueue::reverse(TQItem* b) { // switch item and parent } } else { assert(root_ == p); - b->parent_ = nil; + b->parent_ = nullptr; root_ = b; } b->w_ = p->w_; diff --git a/src/nrncvode/cvodeobj.cpp b/src/nrncvode/cvodeobj.cpp index 1cce0bc8ae..0579d71964 100644 --- a/src/nrncvode/cvodeobj.cpp +++ b/src/nrncvode/cvodeobj.cpp @@ -20,21 +20,20 @@ extern int hoc_return_type_code; #include "nrncvode.h" #include "nrndaspk.h" #include "nrniv_mf.h" +#include "nrnpy.h" #include "tqueue.h" #include "mymath.h" #include "htlist.h" -#include #include #if NRN_ENABLE_THREADS static MUTDEC #endif - // Use of the above static mutex was broken by changeset 7ffd95c in 2014 - // when a MUTDEC was added explicitly to the NetCvode class namespace to - // handle interthread send events. - static void - static_mutex_for_at_time(bool b) { +// Use of the above static mutex was broken by changeset 7ffd95c in 2014 +// when a MUTDEC was added explicitly to the NetCvode class namespace to +// handle interthread send events. +static void static_mutex_for_at_time(bool b) { if (b) { MUTCONSTRUCT(1) } else { @@ -64,7 +63,6 @@ extern int secondorder; extern int linmod_extra_eqn_count(); extern int nrn_modeltype(); extern int nrn_use_selfqueue_; -extern int use_cachevec; extern void (*nrnthread_v_transfer_)(NrnThread*); extern void (*nrnmpi_v_transfer_)(); @@ -73,7 +71,7 @@ extern short* nrn_is_artificial_; #if USENCS extern void nrn2ncs_netcons(); #endif // USENCS -#if PARANEURON +#if NRNMPI extern "C" { extern N_Vector N_VNew_Parallel(int comm, long int local_length, long int global_length); extern N_Vector N_VNew_NrnParallelLD(int comm, long int local_length, long int global_length); @@ -350,12 +348,10 @@ static double use_mxb(void* v) { } static double cache_efficient(void* v) { - if (ifarg(1)) { - int i = (int) chkarg(1, 0, 1); - nrn_cachevec(i); - } + // Perhaps a warning on cache_efficient(True) and an error on cache_efficient(False) would be + // justified. hoc_return_type_code = 2; // boolean - return (double) use_cachevec; + return 1.0; } static double use_long_double(void* v) { @@ -424,7 +420,7 @@ static double tstop_event(void* v) { } } if (ifarg(2)) { - Object* ppobj = nil; + Object* ppobj = nullptr; int reinit = 0; if (ifarg(3)) { ppobj = *hoc_objgetarg(3); @@ -435,7 +431,7 @@ static double tstop_event(void* v) { reinit = int(chkarg(4, 0, 1)); } if (hoc_is_object_arg(2)) { - d->hoc_event(x, nil, ppobj, reinit, *hoc_objgetarg(2)); + d->hoc_event(x, nullptr, ppobj, reinit, *hoc_objgetarg(2)); } else { d->hoc_event(x, gargstr(2), ppobj, reinit); } @@ -494,7 +490,7 @@ static double ncs_netcons(void* v) { // for testing when there is actually no pc.transfer or pc.multisplit present // forces the global step to be truly global across processors. static double use_parallel(void* v) { -#if PARANEURON +#if NRNMPI // assume single thread and global step NetCvode* d = (NetCvode*) v; assert(d->gcv_); @@ -516,9 +512,6 @@ static double nrn_diam_change_count(void* v) { return double(diam_change_cnt); } -int (*nrnpy_pysame)(Object*, Object*); -extern int (*nrnpy_hoccommand_exec)(Object*); - using ExtraScatterList = std::vector; static ExtraScatterList* extra_scatterlist[2]; // 0 scatter, 1 gather @@ -527,7 +520,7 @@ void nrn_extra_scatter_gather(int direction, int tid) { if (esl) { nrn_thread_error("extra_scatter_gather not allowed with multiple threads"); for (Object* callable: *esl) { - if (!(*nrnpy_hoccommand_exec)(callable)) { + if (!neuron::python::methods.hoccommand_exec(callable)) { hoc_execerror("extra_scatter_gather runtime error", 0); } } @@ -556,7 +549,7 @@ static double extra_scatter_gather_remove(void* v) { for (auto it = esl->begin(); it != esl->end();) { Object* o1 = *it; // if esl exists then python exists - if ((*nrnpy_pysame)(o, o1)) { + if (neuron::python::methods.pysame(o, o1)) { it = esl->erase(it); hoc_obj_unref(o1); } else { @@ -569,13 +562,13 @@ static double extra_scatter_gather_remove(void* v) { } static double use_fast_imem(void* v) { - int i = nrn_use_fast_imem; + auto i = nrn_use_fast_imem; hoc_return_type_code = 2; // boolean if (ifarg(1)) { - nrn_use_fast_imem = int(chkarg(1, 0., 1.)); + nrn_use_fast_imem = chkarg(1, 0., 1.); nrn_fast_imem_alloc(); } - return double(i); + return i; } static double poolshrink(void*) { @@ -656,7 +649,7 @@ static void* cons(Object*) { d = new NetCvode(1); net_cvode_instance = d; } - active(nil); + active(nullptr); return (void*) d; #else return (void*) net_cvode_instance; @@ -697,13 +690,13 @@ static void f_gvardt(realtype t, N_Vector y, N_Vector ydot, void* f_data); static void f_lvardt(realtype t, N_Vector y, N_Vector ydot, void* f_data); static CVRhsFn pf_; -static void* msolve_thread(NrnThread*); +static void msolve_thread(neuron::model_sorted_token const&, NrnThread&); static void* msolve_thread_part1(NrnThread*); static void* msolve_thread_part2(NrnThread*); static void* msolve_thread_part3(NrnThread*); -static void* f_thread(NrnThread*); -static void* f_thread_transfer_part1(NrnThread*); -static void* f_thread_transfer_part2(NrnThread*); +static void f_thread(neuron::model_sorted_token const&, NrnThread&); +static void f_thread_transfer_part1(neuron::model_sorted_token const&, NrnThread&); +static void f_thread_transfer_part2(neuron::model_sorted_token const&, NrnThread&); static void* f_thread_ms_part1(NrnThread*); static void* f_thread_ms_part2(NrnThread*); static void* f_thread_ms_part3(NrnThread*); @@ -718,30 +711,30 @@ Cvode::Cvode() { cvode_constructor(); } void Cvode::cvode_constructor() { - nthsizes_ = nil; - nth_ = nil; - ncv_ = nil; - ctd_ = nil; - tqitem_ = nil; - mem_ = nil; + nthsizes_ = nullptr; + nth_ = nullptr; + ncv_ = nullptr; + ctd_ = nullptr; + tqitem_ = nullptr; + mem_ = nullptr; #if NEOSIMorNCS - neosim_self_events_ = nil; + neosim_self_events_ = nullptr; #endif initialize_ = false; can_retreat_ = false; tstop_begin_ = 0.; tstop_end_ = 0.; use_daspk_ = false; - daspk_ = nil; + daspk_ = nullptr; - mem_ = nil; - y_ = nil; - atolnvec_ = nil; - maxstate_ = nil; - maxacor_ = nil; + mem_ = nullptr; + y_ = nullptr; + atolnvec_ = nullptr; + maxstate_ = nullptr; + maxacor_ = nullptr; neq_ = 0; structure_change_ = true; -#if PARANEURON +#if NRNMPI use_partrans_ = false; global_neq_ = 0; opmode_ = 0; @@ -808,7 +801,7 @@ void Cvode::set_init_flag() { } N_Vector Cvode::nvnew(long int n) { -#if PARANEURON +#if NRNMPI if (use_partrans_) { if (net_cvode_instance->use_long_double_) { return N_VNew_NrnParallelLD(0, n, global_neq_); @@ -893,19 +886,19 @@ void Cvode::init_prepare() { if (init_global()) { if (y_) { N_VDestroy(y_); - y_ = nil; + y_ = nullptr; } if (mem_) { CVodeFree(mem_); - mem_ = nil; + mem_ = nullptr; } if (atolnvec_) { N_VDestroy(atolnvec_); - atolnvec_ = nil; + atolnvec_ = nullptr; } if (daspk_) { delete daspk_; - daspk_ = nil; + daspk_ = nullptr; } init_eqn(); if (neq_ > 0) { @@ -927,8 +920,8 @@ void Cvode::activate_maxstate(bool on) { if (maxstate_) { N_VDestroy(maxstate_); N_VDestroy(maxacor_); - maxstate_ = nil; - maxacor_ = nil; + maxstate_ = nullptr; + maxacor_ = nullptr; } if (on && neq_ > 0) { maxstate_ = nvnew(neq_); @@ -1062,7 +1055,7 @@ void Cvode::maxstep(double x) { void Cvode::free_cvodemem() { if (mem_) { CVodeFree(mem_); - mem_ = nil; + mem_ = nullptr; } } @@ -1088,7 +1081,6 @@ int Cvode::cvode_init(double) { nrn_nonvint_block_ode_reinit(neq_, N_VGetArrayPointer(y_), 0); if (mem_) { err = CVodeReInit(mem_, pf_, t0_, y_, CV_SV, &ncv_->rtol_, atolnvec_); - CVodeSetFdata(mem_, (void*) this); // printf("CVodeReInit\n"); if (err != SUCCESS) { Printf("Cvode %p %s CVReInit error %d\n", @@ -1106,7 +1098,6 @@ int Cvode::cvode_init(double) { minstep(ncv_->minstep()); maxstep(ncv_->maxstep()); CVodeMalloc(mem_, pf_, t0_, y_, CV_SV, &ncv_->rtol_, atolnvec_); - CVodeSetFdata(mem_, (void*) this); if (err != SUCCESS) { Printf("Cvode %p %s CVodeMalloc error %d\n", this, @@ -1120,8 +1111,10 @@ int Cvode::cvode_init(double) { ((CVodeMem) mem_)->cv_gamma = 0.; ((CVodeMem) mem_)->cv_h = 0.; // fun called before cvode sets this (though fun does not need it // really) - // fun(t_, N_VGetArrayPointer(y_), nil); - (*pf_)(t_, y_, nil, (void*) this); + // fun(t_, N_VGetArrayPointer(y_), nullptr); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + std::pair opaque{this, sorted_token}; + pf_(t_, y_, nullptr, &opaque); can_retreat_ = false; return err; } @@ -1140,7 +1133,7 @@ void Cvode::alloc_daspk() { // nodes may or may not have capacitors to ground. } -int Cvode::advance_tn() { +int Cvode::advance_tn(neuron::model_sorted_token const& sorted_token) { int err = SUCCESS; if (neq_ == 0) { t_ += 1e9; @@ -1174,14 +1167,14 @@ int Cvode::advance_tn() { } else { nt_t = t_; } - do_nonode(nth_); -#if PARANEURON + do_nonode(sorted_token, nth_); +#if NRNMPI opmode_ = 1; #endif if (use_daspk_) { err = daspk_advance_tn(); } else { - err = cvode_advance_tn(); + err = cvode_advance_tn(sorted_token); } can_retreat_ = true; maxstate(true); @@ -1206,7 +1199,7 @@ int Cvode::solve() { err = init(t_); } } else { - err = advance_tn(); + err = advance_tn(nrn_ensure_model_data_are_sorted()); } // printf("Cvode::solve exit %p current_time=%g tn=%g\n", this, t_, tn()); return err; @@ -1223,7 +1216,7 @@ int Cvode::init(double tout) { next_at_time_ = t_ + 1e5; init_prepare(); if (neq_) { -#if PARANEURON +#if NRNMPI opmode_ = 3; #endif if (use_daspk_) { @@ -1233,7 +1226,7 @@ int Cvode::init(double tout) { } } tstop_ = next_at_time_ - NetCvode::eps(next_at_time_); -#if PARANEURON +#if NRNMPI if (use_partrans_) { tstop_ = nrnmpi_dbl_allmin(tstop_); } @@ -1313,7 +1306,7 @@ int Cvode::interpolate(double tout) { assert(tout >= t0() && tout <= tn()); ++interpolate_calls_; -#if PARANEURON +#if NRNMPI opmode_ = 2; #endif if (use_daspk_) { @@ -1323,7 +1316,7 @@ int Cvode::interpolate(double tout) { } } -int Cvode::cvode_advance_tn() { +int Cvode::cvode_advance_tn(neuron::model_sorted_token const& sorted_token) { #if PRINT_EVENT if (net_cvode_instance->print_event_ > 1) { Printf("Cvode::cvode_advance_tn %p %d initialize_=%d tstop=%.20g t_=%.20g to ", @@ -1334,9 +1327,12 @@ int Cvode::cvode_advance_tn() { t_); } #endif + std::pair opaque{this, sorted_token}; + CVodeSetFdata(mem_, &opaque); CVodeSetStopTime(mem_, tstop_); // printf("cvode_advance_tn begin t0_=%g t_=%g tn_=%g tstop=%g\n", t0_, t_, tn_, tstop_); int err = CVode(mem_, tstop_, y_, &t_, CV_ONE_STEP_TSTOP); + CVodeSetFdata(mem_, nullptr); #if PRINT_EVENT if (net_cvode_instance->print_event_ > 1) { Printf("t_=%.20g\n", t_); @@ -1347,17 +1343,12 @@ int Cvode::cvode_advance_tn() { this, secname(ctd_[0].v_node_[ctd_[0].rootnodecount_]->sec), err); - (*pf_)(t_, y_, nil, (void*) this); + pf_(t_, y_, nullptr, &opaque); return err; } // this is very bad, performance-wise. However cvode modifies its states // after a call to fun with the proper t. -#if 1 - (*pf_)(t_, y_, nil, (void*) this); -#else - NrnThread* _nt; - scatter_y(y_); -#endif + pf_(t_, y_, nullptr, &opaque); tn_ = ((CVodeMem) mem_)->cv_tn; t0_ = tn_ - ((CVodeMem) mem_)->cv_h; // printf("t=%.15g t_=%.15g tn()=%.15g tstop_=%.15g t0_=%.15g\n", nrn_threads->t, t_, tn(), @@ -1379,8 +1370,12 @@ int Cvode::cvode_interpolate(double tout) { #endif // avoid CVode-- tstop = 0.5 is behind current t = 0.5 // is this really necessary anymore. Maybe NORMAL mode ignores tstop + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + std::pair opaque{this, sorted_token}; + CVodeSetFdata(mem_, &opaque); CVodeSetStopTime(mem_, tstop_ + tstop_); int err = CVode(mem_, tout, y_, &t_, CV_NORMAL); + CVodeSetFdata(mem_, nullptr); #if PRINT_EVENT if (net_cvode_instance->print_event_ > 1) { Printf("%.20g\n", t_); @@ -1393,7 +1388,7 @@ int Cvode::cvode_interpolate(double tout) { err); return err; } - (*pf_)(t_, y_, nil, (void*) this); + pf_(t_, y_, nullptr, &opaque); // printf("t_=%g h=%g q=%d y=%g\n", t_, ((CVodeMem)mem_)->cv_h, ((CVodeMem)mem_)->cv_q, // N_VIth(y_,0)); return SUCCESS; @@ -1506,7 +1501,8 @@ static int msetup(CVodeMem m, N_Vector) { // printf("msetup\n"); *jcurPtr = true; - Cvode* cv = (Cvode*) m->cv_f_data; + auto* const cv = + static_cast*>(m->cv_f_data)->first; return cv->setup(yp, fp); } @@ -1518,7 +1514,10 @@ static int msolve(CVodeMem m, N_Vector b, N_Vector weight, N_Vector ycur, N_Vect // N_VIth(b, 0) /= (1. + m->cv_gamma); // N_VIth(b, 0) /= (1. + m->cv_gammap); // N_VIth(b,0) *= 2./(1. + m->cv_gamrat); - msolve_cv_ = (Cvode*) m->cv_f_data; + auto* const f_typed_data = static_cast*>( + m->cv_f_data); + msolve_cv_ = f_typed_data->first; + auto const& sorted_token = f_typed_data->second; Cvode& cv = *msolve_cv_; ++cv.mxb_calls_; if (cv.ncv_->stiff() == 0) { @@ -1534,12 +1533,15 @@ static int msolve(CVodeMem m, N_Vector b, N_Vector weight, N_Vector ycur, N_Vect nrn_multithread_job(msolve_thread_part2); nrn_multithread_job(msolve_thread_part3); } else { - nrn_multithread_job(msolve_thread); + nrn_multithread_job(sorted_token, msolve_thread); } return 0; } static int msolve_lvardt(CVodeMem m, N_Vector b, N_Vector weight, N_Vector ycur, N_Vector fcur) { - Cvode* cv = (Cvode*) m->cv_f_data; + auto* const f_typed_data = static_cast*>( + m->cv_f_data); + auto* const cv = f_typed_data->first; + auto const& sorted_token = f_typed_data->second; ++cv->mxb_calls_; if (cv->ncv_->stiff() == 0) { return 0; @@ -1548,17 +1550,19 @@ static int msolve_lvardt(CVodeMem m, N_Vector b, N_Vector weight, N_Vector ycur, return 0; } cv->nth_->_vcv = cv; - cv->solvex_thread(cv->n_vector_data(b, 0), cv->n_vector_data(ycur, 0), cv->nth_); + cv->solvex_thread(sorted_token, cv->n_vector_data(b, 0), cv->n_vector_data(ycur, 0), cv->nth_); cv->nth_->_vcv = 0; return 0; } -static void* msolve_thread(NrnThread* nt) { - int i = nt->id; +static void msolve_thread(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { + int i = nt.id; Cvode* cv = msolve_cv_; - nt->_vcv = cv; - cv->solvex_thread(cv->n_vector_data(msolve_b_, i), cv->n_vector_data(msolve_ycur_, i), nt); - nt->_vcv = 0; - return 0; + nt._vcv = cv; + cv->solvex_thread(sorted_token, + cv->n_vector_data(msolve_b_, i), + cv->n_vector_data(msolve_ycur_, i), + &nt); + nt._vcv = 0; } static void* msolve_thread_part1(NrnThread* nt) { int i = nt->id; @@ -1589,10 +1593,12 @@ static N_Vector f_y_; static N_Vector f_ydot_; static Cvode* f_cv_; static void f_gvardt(realtype t, N_Vector y, N_Vector ydot, void* f_data) { + auto* const f_typed_data = static_cast*>( + f_data); + f_cv_ = f_typed_data->first; // ydot[0] = -y[0]; // N_VIth(ydot, 0) = -N_VIth(y, 0); // printf("f(%g, %p, %p)\n", t, y, ydot); - f_cv_ = (Cvode*) f_data; ++f_cv_->f_calls_; f_t_ = t; f_y_ = y; @@ -1611,47 +1617,53 @@ static void f_gvardt(realtype t, N_Vector y, N_Vector ydot, void* f_data) { nrn_multithread_job(f_thread_ms_part34); } } else if (nrnthread_v_transfer_) { - nrn_multithread_job(f_thread_transfer_part1); + nrn_multithread_job(f_typed_data->second, f_thread_transfer_part1); if (nrnmpi_v_transfer_) { (*nrnmpi_v_transfer_)(); } - nrn_multithread_job(f_thread_transfer_part2); + nrn_multithread_job(f_typed_data->second, f_thread_transfer_part2); } else { - nrn_multithread_job(f_thread); + nrn_multithread_job(f_typed_data->second, f_thread); } } else { - nrn_multithread_job(f_thread); + nrn_multithread_job(f_typed_data->second, f_thread); } } static void f_lvardt(realtype t, N_Vector y, N_Vector ydot, void* f_data) { - Cvode* cv = (Cvode*) f_data; + auto* const f_typed_data = static_cast*>( + f_data); + auto* const cv = f_typed_data->first; + auto const& sorted_token = f_typed_data->second; ++cv->f_calls_; cv->nth_->_vcv = cv; - cv->fun_thread(t, cv->n_vector_data(y, 0), cv->n_vector_data(ydot, 0), cv->nth_); + cv->fun_thread(sorted_token, t, cv->n_vector_data(y, 0), cv->n_vector_data(ydot, 0), cv->nth_); cv->nth_->_vcv = 0; } -static void* f_thread(NrnThread* nt) { +static void f_thread(neuron::model_sorted_token const& sorted_token, NrnThread& ntr) { + auto* const nt = &ntr; int i = nt->id; Cvode* cv = f_cv_; nt->_vcv = cv; - cv->fun_thread(f_t_, cv->n_vector_data(f_y_, i), cv->n_vector_data(f_ydot_, i), nt); + cv->fun_thread( + sorted_token, f_t_, cv->n_vector_data(f_y_, i), cv->n_vector_data(f_ydot_, i), &ntr); nt->_vcv = 0; - return 0; } -static void* f_thread_transfer_part1(NrnThread* nt) { +static void f_thread_transfer_part1(neuron::model_sorted_token const& sorted_token, + NrnThread& ntr) { + auto* const nt = &ntr; int i = nt->id; Cvode* cv = f_cv_; nt->_vcv = cv; - cv->fun_thread_transfer_part1(f_t_, cv->n_vector_data(f_y_, i), nt); - return 0; + cv->fun_thread_transfer_part1(sorted_token, f_t_, cv->n_vector_data(f_y_, i), nt); } -static void* f_thread_transfer_part2(NrnThread* nt) { +static void f_thread_transfer_part2(neuron::model_sorted_token const& sorted_token, + NrnThread& ntr) { + auto* const nt = &ntr; int i = nt->id; Cvode* cv = f_cv_; - cv->fun_thread_transfer_part2(cv->n_vector_data(f_ydot_, i), nt); + cv->fun_thread_transfer_part2(sorted_token, cv->n_vector_data(f_ydot_, i), &ntr); nt->_vcv = 0; - return 0; } static void* f_thread_ms_part1(NrnThread* nt) { int i = nt->id; diff --git a/src/nrncvode/cvodeobj.h b/src/nrncvode/cvodeobj.h index 222778119e..19933ff057 100644 --- a/src/nrncvode/cvodeobj.h +++ b/src/nrncvode/cvodeobj.h @@ -15,26 +15,38 @@ class TQueue; typedef std::vector PreSynList; struct BAMech; struct NrnThread; -class PlayRecList; class PlayRecord; class STEList; class HTList; +namespace neuron { +struct model_sorted_token; +} -class CvMembList { - public: - CvMembList(); - virtual ~CvMembList(); - CvMembList* next; - Memb_list* ml; - int index; +/** + * @brief Wrapper for Memb_list in CVode related code. + * + * This gets used in two ways: + * - with ml.size() == 1 and ml[0].nodecount > 1 when the mechanism instances to be processed are + * contiguous + * - with ml.size() >= 1 and ml[i].nodecount == 1 when non-contiguous instances need to be processed + * + * generic configurations with ml.size() and ml[i].nodecount both larger than one are not supported. + */ +struct CvMembList { + CvMembList(int type) + : index{type} { + ml.emplace_back(type); + } + CvMembList* next{}; + std::vector ml{}; + int index{}; }; -class BAMechList { - public: +struct BAMechList { BAMechList(BAMechList** first); BAMechList* next; BAMech* bam; - Memb_list* ml; + std::vector ml; static void destruct(BAMechList** first); }; @@ -62,15 +74,14 @@ class CvodeThreadData { Node** v_parent_; PreSynList* psl_th_; // with a threshold HTList* watch_list_; - double** pv_; - double** pvdot_; + std::vector> pv_, pvdot_; int nvoffset_; // beginning of this threads states int nvsize_; // total number of states for this thread int neq_v_; // for daspk, number of voltage states for this thread int nonvint_offset_; // synonym for neq_v_. Beginning of this threads nonvint variables. int nonvint_extra_offset_; // extra states (probably Python). Not scattered or gathered. - PlayRecList* record_; - PlayRecList* play_; + std::vector* play_; + std::vector* record_; }; class Cvode { @@ -79,9 +90,9 @@ class Cvode { Cvode(); virtual ~Cvode(); - virtual int handle_step(NetCvode*, double); + virtual int handle_step(neuron::model_sorted_token const&, NetCvode*, double); virtual int init(double t); - virtual int advance_tn(); + virtual int advance_tn(neuron::model_sorted_token const&); virtual int interpolate(double t); virtual double tn() { return tn_; @@ -114,7 +125,7 @@ class Cvode { void alloc_cvode(); void alloc_daspk(); int cvode_init(double); - int cvode_advance_tn(); + int cvode_advance_tn(neuron::model_sorted_token const&); int cvode_interpolate(double); int daspk_init(double); int daspk_advance_tn(); @@ -123,13 +134,20 @@ class Cvode { public: N_Vector nvnew(long); int setup(N_Vector ypred, N_Vector fpred); - int solvex_thread(double* b, double* y, NrnThread* nt); + int solvex_thread(neuron::model_sorted_token const&, double* b, double* y, NrnThread* nt); int solvex_thread_part1(double* b, NrnThread* nt); int solvex_thread_part2(NrnThread* nt); int solvex_thread_part3(double* b, NrnThread* nt); - void fun_thread(double t, double* y, double* ydot, NrnThread* nt); - void fun_thread_transfer_part1(double t, double* y, NrnThread* nt); - void fun_thread_transfer_part2(double* ydot, NrnThread* nt); + void fun_thread(neuron::model_sorted_token const&, + double t, + double* y, + double* ydot, + NrnThread* nt); + void fun_thread_transfer_part1(neuron::model_sorted_token const&, + double t, + double* y, + NrnThread* nt); + void fun_thread_transfer_part2(neuron::model_sorted_token const&, double* ydot, NrnThread* nt); void fun_thread_ms_part1(double t, double* y, NrnThread* nt); void fun_thread_ms_part2(NrnThread* nt); void fun_thread_ms_part3(NrnThread* nt); @@ -153,8 +171,8 @@ class Cvode { void play_add(PlayRecord*); void play_continuous(double t); void play_continuous_thread(double t, NrnThread*); - void do_ode(NrnThread*); - void do_nonode(NrnThread* nt = 0); + void do_ode(neuron::model_sorted_token const&, NrnThread&); + void do_nonode(neuron::model_sorted_token const&, NrnThread* nt = 0); double* n_vector_data(N_Vector, int); private: @@ -163,17 +181,17 @@ class Cvode { void init_eqn(); void daspk_init_eqn(); void matmeth(); - void nocap_v(NrnThread*); + void nocap_v(neuron::model_sorted_token const&, NrnThread*); void nocap_v_part1(NrnThread*); void nocap_v_part2(NrnThread*); void nocap_v_part3(NrnThread*); - void solvemem(NrnThread*); + void solvemem(neuron::model_sorted_token const&, NrnThread*); void atolvec_alloc(int); double h(); N_Vector ewtvec(); N_Vector acorvec(); void new_no_cap_memb(CvodeThreadData&, NrnThread*); - void before_after(BAMechList*, NrnThread*); + void before_after(neuron::model_sorted_token const&, BAMechList*, NrnThread*); public: // daspk @@ -185,7 +203,7 @@ class Cvode { void daspk_gather_y(N_Vector); void daspk_scatter_y(double*, int); void daspk_gather_y(double*, int); - void scatter_y(double*, int); + void scatter_y(neuron::model_sorted_token const&, double*, int); void gather_y(N_Vector); void gather_y(double*, int); void scatter_ydot(double*, int); @@ -223,17 +241,18 @@ class Cvode { double tstop_begin_, tstop_end_; private: - void rhs(NrnThread*); - void rhs_memb(CvMembList*, NrnThread*); - void lhs(NrnThread*); - void lhs_memb(CvMembList*, NrnThread*); + void rhs(neuron::model_sorted_token const&, NrnThread*); + void rhs_memb(neuron::model_sorted_token const&, CvMembList*, NrnThread*); + void lhs(neuron::model_sorted_token const&, NrnThread*); + void lhs_memb(neuron::model_sorted_token const&, CvMembList*, NrnThread*); void triang(NrnThread*); void bksub(NrnThread*); private: // segregation of old vectorized information to per cell info friend class NetCvode; - bool is_owner(double*); // for play and record in local step context. + bool is_owner(neuron::container::data_handle const&); // for play and record in + // local step context. bool local_; void daspk_setup1_tree_matrix(); // unused void daspk_setup2_tree_matrix(); // unused @@ -241,12 +260,12 @@ class Cvode { private: int prior2init_; -#if PARANEURON +#if NRNMPI public: bool use_partrans_; int global_neq_; int opmode_; // 1 advance, 2 interpolate, 3 init; for testing -#endif // PARANEURON +#endif // NRNMPI }; #endif diff --git a/src/nrncvode/cvodestb.cpp b/src/nrncvode/cvodestb.cpp index 63e5e0d111..25dd21b74f 100644 --- a/src/nrncvode/cvodestb.cpp +++ b/src/nrncvode/cvodestb.cpp @@ -29,7 +29,6 @@ void nrn_deliver_events(NrnThread*); void init_net_events(); void nrn_record_init(); void nrn_play_init(); -void fixed_record_continuous(NrnThread* nt); void fixed_play_continuous(NrnThread* nt); void nrn_solver_prepare(); static void check_thresh(NrnThread*); @@ -88,9 +87,9 @@ void fixed_play_continuous(NrnThread* nt) { } } -void fixed_record_continuous(NrnThread* nt) { +void fixed_record_continuous(neuron::model_sorted_token const& cache_token, NrnThread& nt) { if (net_cvode_instance) { - net_cvode_instance->fixed_record_continuous(nt); + net_cvode_instance->fixed_record_continuous(cache_token, nt); } } diff --git a/src/nrncvode/cvtrset.cpp b/src/nrncvode/cvtrset.cpp index 068f686a87..f7edae1ce6 100644 --- a/src/nrncvode/cvtrset.cpp +++ b/src/nrncvode/cvtrset.cpp @@ -9,9 +9,7 @@ #include "membfunc.h" #include "neuron.h" -void Cvode::rhs(NrnThread* _nt) { - int i; - +void Cvode::rhs(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) { CvodeThreadData& z = CTD(_nt->id); if (diam_changed) { recalc_diam(); @@ -19,25 +17,25 @@ void Cvode::rhs(NrnThread* _nt) { if (z.v_node_count_ == 0) { return; } - for (i = 0; i < z.v_node_count_; ++i) { + for (int i = 0; i < z.v_node_count_; ++i) { NODERHS(z.v_node_[i]) = 0.; } - if (_nt->_nrn_fast_imem) { - double* p = _nt->_nrn_fast_imem->_nrn_sav_rhs; - for (i = 0; i < z.v_node_count_; ++i) { + auto const vec_sav_rhs = _nt->node_sav_rhs_storage(); + if (vec_sav_rhs) { + for (int i = 0; i < z.v_node_count_; ++i) { Node* nd = z.v_node_[i]; - p[nd->v_node_index] = 0; + vec_sav_rhs[nd->v_node_index] = 0; } } - rhs_memb(z.cv_memb_list_, _nt); - nrn_nonvint_block_current(_nt->end, _nt->_actual_rhs, _nt->id); + rhs_memb(sorted_token, z.cv_memb_list_, _nt); + auto const vec_rhs = _nt->node_rhs_storage(); + nrn_nonvint_block_current(_nt->end, vec_rhs, _nt->id); - if (_nt->_nrn_fast_imem) { - double* p = _nt->_nrn_fast_imem->_nrn_sav_rhs; - for (i = 0; i < z.v_node_count_; ++i) { - Node* nd = z.v_node_[i]; - p[nd->v_node_index] -= NODERHS(nd); + if (vec_sav_rhs) { + for (int i = 0; i < z.v_node_count_; ++i) { + auto const node_index = z.v_node_[i]->v_node_index; + vec_sav_rhs[node_index] -= vec_rhs[node_index]; } } @@ -45,27 +43,30 @@ void Cvode::rhs(NrnThread* _nt) { /* now the internal axial currents. rhs += ai_j*(vi_j - vi) */ - for (i = z.rootnodecount_; i < z.v_node_count_; ++i) { - Node* nd = z.v_node_[i]; - Node* pnd = z.v_parent_[i]; - double dv = NODEV(pnd) - NODEV(nd); - /* our connection coefficients are negative so */ - NODERHS(nd) -= NODEB(nd) * dv; - NODERHS(pnd) += NODEA(nd) * dv; + auto const vec_a = _nt->node_a_storage(); + auto const vec_b = _nt->node_b_storage(); + auto const vec_v = _nt->node_voltage_storage(); + for (auto i = z.rootnodecount_; i < z.v_node_count_; ++i) { + auto const node_i = z.v_node_[i]->v_node_index; + auto const parent_i = z.v_parent_[i]->v_node_index; + auto const dv = vec_v[parent_i] - vec_v[node_i]; + // our connection coefficients are negative + vec_rhs[node_i] -= vec_b[node_i] * dv; + vec_rhs[parent_i] += vec_a[node_i] * dv; } } -void Cvode::rhs_memb(CvMembList* cmlist, NrnThread* _nt) { +void Cvode::rhs_memb(neuron::model_sorted_token const& sorted_token, + CvMembList* cmlist, + NrnThread* _nt) { errno = 0; for (CvMembList* cml = cmlist; cml; cml = cml->next) { - Memb_func* mf = memb_func + cml->index; - Pvmi s = mf->current; - if (s) { - Memb_list* ml = cml->ml; - (*s)(_nt, ml, cml->index); - if (errno) { - if (nrn_errno_check(cml->index)) { - hoc_warning("errno set during calculation of currents", (char*) 0); + const Memb_func& mf = memb_func[cml->index]; + if (auto const current = mf.current; current) { + for (auto& ml: cml->ml) { + current(sorted_token, _nt, &ml, cml->index); + if (errno && nrn_errno_check(cml->index)) { + hoc_warning("errno set during calculation of currents", nullptr); } } } @@ -75,7 +76,7 @@ void Cvode::rhs_memb(CvMembList* cmlist, NrnThread* _nt) { activclamp_rhs(); } -void Cvode::lhs(NrnThread* _nt) { +void Cvode::lhs(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) { int i; CvodeThreadData& z = CTD(_nt->id); @@ -86,11 +87,13 @@ void Cvode::lhs(NrnThread* _nt) { NODED(z.v_node_[i]) = 0.; } - lhs_memb(z.cv_memb_list_, _nt); - nrn_nonvint_block_conductance(_nt->end, _nt->_actual_rhs, _nt->id); - nrn_cap_jacob(_nt, z.cmlcap_->ml); + lhs_memb(sorted_token, z.cv_memb_list_, _nt); + nrn_nonvint_block_conductance(_nt->end, _nt->node_rhs_storage(), _nt->id); + for (auto& ml: z.cmlcap_->ml) { + nrn_cap_jacob(sorted_token, _nt, &ml); + } - // _nrn_fast_imem not needed since exact icap added in nrn_div_capacity + // fast_imem not needed since exact icap added in nrn_div_capacity /* now add the axial currents */ for (i = 0; i < z.v_node_count_; ++i) { @@ -101,18 +104,17 @@ void Cvode::lhs(NrnThread* _nt) { } } -void Cvode::lhs_memb(CvMembList* cmlist, NrnThread* _nt) { +void Cvode::lhs_memb(neuron::model_sorted_token const& sorted_token, + CvMembList* cmlist, + NrnThread* _nt) { CvMembList* cml; for (cml = cmlist; cml; cml = cml->next) { - Memb_func* mf = memb_func + cml->index; - Memb_list* ml = cml->ml; - Pvmi s = mf->jacob; - if (s) { - Pvmi s = mf->jacob; - (*s)(_nt, ml, cml->index); - if (errno) { - if (nrn_errno_check(cml->index)) { - hoc_warning("errno set during calculation of di/dv", (char*) 0); + const Memb_func& mf = memb_func[cml->index]; + if (auto const jacob = mf.jacob; jacob) { + for (auto& ml: cml->ml) { + jacob(sorted_token, _nt, &ml, cml->index); + if (errno && nrn_errno_check(cml->index)) { + hoc_warning("errno set during calculation of di/dv", nullptr); } } } diff --git a/src/nrncvode/hocevent.cpp b/src/nrncvode/hocevent.cpp index 49886c671e..e594f95a79 100644 --- a/src/nrncvode/hocevent.cpp +++ b/src/nrncvode/hocevent.cpp @@ -8,8 +8,8 @@ using HocEventPool = MutexPool; HocEventPool* HocEvent::hepool_; HocEvent::HocEvent() { - stmt_ = nil; - ppobj_ = nil; + stmt_ = nullptr; + ppobj_ = nullptr; reinit_ = 0; } @@ -32,7 +32,7 @@ HocEvent* HocEvent::alloc(const char* stmt, Object* ppobj, int reinit, Object* p nrn_hoc_unlock(); } HocEvent* he = hepool_->alloc(); - he->stmt_ = nil; + he->stmt_ = nullptr; he->ppobj_ = ppobj; he->reinit_ = reinit; if (pyact) { @@ -46,7 +46,7 @@ HocEvent* HocEvent::alloc(const char* stmt, Object* ppobj, int reinit, Object* p void HocEvent::hefree() { if (stmt_) { delete stmt_; - stmt_ = nil; + stmt_ = nullptr; } hepool_->hpfree(this); } @@ -54,7 +54,7 @@ void HocEvent::hefree() { void HocEvent::clear() { if (stmt_) { delete stmt_; - stmt_ = nil; + stmt_ = nullptr; } } @@ -135,7 +135,7 @@ DiscreteEvent* HocEvent::savestate_save() { void HocEvent::savestate_restore(double tt, NetCvode* nc) { // pr("HocEvent::savestate_restore", tt, nc); - HocEvent* he = alloc(nil, nil, 0); + HocEvent* he = alloc(nullptr, nullptr, 0); NrnThread* nt = nrn_threads; if (stmt_) { if (stmt_->pyobject()) { @@ -156,7 +156,7 @@ DiscreteEvent* HocEvent::savestate_read(FILE* f) { HocEvent* he = new HocEvent(); int have_stmt, have_obj, index; char stmt[256], objname[100], buf[200]; - Object* obj = nil; + Object* obj = nullptr; // nrn_assert(fscanf(f, "%d %d\n", &have_stmt, &have_obj) == 2); nrn_assert(fgets(buf, 200, f)); nrn_assert(sscanf(buf, "%d %d\n", &have_stmt, &have_obj) == 2); diff --git a/src/nrncvode/netcon.h b/src/nrncvode/netcon.h index c317b619f9..e4a29a8bd6 100644 --- a/src/nrncvode/netcon.h +++ b/src/nrncvode/netcon.h @@ -2,11 +2,9 @@ #define netcon_h #undef check -#if MAC -#define NetCon nrniv_Dinfo -#endif #include "htlist.h" +#include "neuron/container/data_handle.hpp" #include "nrnmpi.h" #include "nrnneosm.h" #include "pool.h" @@ -261,7 +259,7 @@ class STECondition: public WatchCondition { class PreSyn: public ConditionEvent { public: - PreSyn(double* src, Object* osrc, Section* ssrc = nil); + PreSyn(neuron::container::data_handle src, Object* osrc, Section* ssrc = nullptr); virtual ~PreSyn(); virtual void send(double sendtime, NetCvode*, NrnThread*); virtual void deliver(double, NetCvode*, NrnThread*); @@ -281,15 +279,15 @@ class PreSyn: public ConditionEvent { static DiscreteEvent* savestate_read(FILE*); virtual double value() { + assert(thvar_); return *thvar_ - threshold_; } void update(Observable*); void disconnect(Observable*); - void update_ptr(double*); void record_stmt(const char*); void record_stmt(Object*); - void record(IvocVect*, IvocVect* idvec = nil, int rec_id = 0); + void record(IvocVect*, IvocVect* idvec = nullptr, int rec_id = 0); void record(double t); void init(); double mindelay(); @@ -298,7 +296,7 @@ class PreSyn: public ConditionEvent { NetConPList dil_; double threshold_; double delay_; - double* thvar_; + neuron::container::data_handle thvar_{}; Object* osrc_; Section* ssrc_; IvocVect* tvec_; @@ -358,7 +356,7 @@ class HocEvent: public DiscreteEvent { HocEvent(); virtual ~HocEvent(); virtual void pr(const char*, double t, NetCvode*); - static HocEvent* alloc(const char* stmt, Object*, int, Object* pyact = nil); + static HocEvent* alloc(const char* stmt, Object*, int, Object* pyact = nullptr); void hefree(); void clear(); // called by hepool_->free_all virtual void deliver(double, NetCvode*, NrnThread*); diff --git a/src/nrncvode/netcvode.cpp b/src/nrncvode/netcvode.cpp index 44b324d1c6..f86afcfdce 100644 --- a/src/nrncvode/netcvode.cpp +++ b/src/nrncvode/netcvode.cpp @@ -7,7 +7,7 @@ #include #include #include -#include +#include #include "classreg.h" #include "nrnoc2iv.h" #include "parse.hpp" @@ -22,6 +22,7 @@ #include "shared/sundialsmath.h" #include "kssingle.h" #include "ocnotify.h" +#include "utils/enumerate.h" #if HAVE_IV #include "ivoc.h" #include "glinerec.h" @@ -39,10 +40,9 @@ #include "nrniv_mf.h" #include "nrnste.h" #include "profile.h" -#include "treeset.h" #include "utils/profile/profiler_interface.h" -#include +#include #include #include @@ -96,7 +96,7 @@ extern Symlist* hoc_built_in_symlist; extern Symlist* hoc_top_level_symlist; extern TQueue* net_cvode_instance_event_queue(NrnThread*); extern hoc_Item* net_cvode_instance_psl(); -extern PlayRecList* net_cvode_instance_prl(); +extern std::vector* net_cvode_instance_prl(); extern void nrn_update_ps2nt(); extern void nrn_use_busywait(int); void* nrn_interthread_enqueue(NrnThread*); @@ -194,8 +194,6 @@ static void* neosim_entity_; #endif void ncs2nrn_integrate(double tstop); -void nrn_fixed_step(); -void nrn_fixed_step_group(int); extern void (*nrn_allthread_handle)(); static void allthread_handle_callback() { net_cvode_instance->allthread_handle(); @@ -207,10 +205,10 @@ static void allthread_handle_callback() { // NetCon connections were specified by the hoc cvode method // Cvode.ncs_netcons(List nc_inlist, List nc_outlist) . The former list // is normally in one to one correspondence with all the synapses. These -// NetCon objects have those synapses as targets and nil sources. +// NetCon objects have those synapses as targets and nullptr sources. // The latter list is normally in one to one correspondence with // the cells of the subnet. Those NetCon objects have sources of the form -// cell.axon.v(1) and nil targets. +// cell.axon.v(1) and nullptr targets. // Note that the program that creates the hoc file knows // how to tell NCS that a particular integer corresponds to a particular // NetCon @@ -303,7 +301,7 @@ hoc_Item* net_cvode_instance_psl() { return net_cvode_instance->psl_; } -PlayRecList* net_cvode_instance_prl() { +std::vector* net_cvode_instance_prl() { return net_cvode_instance->playrec_list(); } @@ -351,7 +349,7 @@ PlayRecordSave* PlayRecord::savestate_save() { } PlayRecordSave* PlayRecord::savestate_read(FILE* f) { - PlayRecordSave* prs = nil; + PlayRecordSave* prs = nullptr; int type, index; char buf[100]; nrn_assert(fgets(buf, 100, f)); @@ -406,7 +404,7 @@ void NetCon::disconnect(Observable* o) { // printf("%s disconnect from ", hoc_object_name(obj_)); if (target_->ob == ob) { // printf("target %s\n", hoc_object_name(target_->ob)); - target_ = nil; + target_ = nullptr; active_ = 0; } } @@ -444,22 +442,23 @@ static PreSyn* unused_presyn; // holds the NetCons with no source static double nc_preloc(void* v) { // user must pop section stack after call NetCon* d = (NetCon*) v; - Section* s = nil; + Section* s = nullptr; if (d->src_) { s = d->src_->ssrc_; } if (s) { nrn_pushsec(s); - double* v = d->src_->thvar_; + // This is a special handle, not just a pointer. + auto const& v = d->src_->thvar_; nrn_parent_info(s); // make sure parentnode exists // there is no efficient search for the location of // an arbitrary variable. Search only for v at 0 - 1. // Otherwise return .5 . - if (v == &NODEV(s->parentnode)) { + if (v == s->parentnode->v_handle()) { return nrn_arc_position(s, s->parentnode); } for (int i = 0; i < s->nnode; ++i) { - if (v == &NODEV(s->pnode[i])) { + if (v == s->pnode[i]->v_handle()) { return nrn_arc_position(s, s->pnode[i]); } } @@ -478,16 +477,16 @@ static Object** nc_preseg(void* v) { // user must pop section stack after call s = d->src_->ssrc_; } if (s && nrnpy_seg_from_sec_x) { - double* v = d->src_->thvar_; + auto const& v = d->src_->thvar_; nrn_parent_info(s); // make sure parentnode exists // there is no efficient search for the location of // an arbitrary variable. Search only for v at 0 - 1. // Otherwise return NULL. - if (v == &NODEV(s->parentnode)) { + if (v == s->parentnode->v_handle()) { x = nrn_arc_position(s, s->parentnode); } for (int i = 0; i < s->nnode; ++i) { - if (v == &NODEV(s->pnode[i])) { + if (v == s->pnode[i]->v_handle()) { x = nrn_arc_position(s, s->pnode[i]); continue; } @@ -524,7 +523,7 @@ static Object** nc_postseg(void* v) { // user must pop section stack after call static Object** nc_syn(void* v) { NetCon* d = (NetCon*) v; - Object* ob = nil; + Object* ob = nullptr; if (d->target_) { ob = d->target_->ob; } @@ -533,7 +532,7 @@ static Object** nc_syn(void* v) { static Object** nc_pre(void* v) { NetCon* d = (NetCon*) v; - Object* ob = nil; + Object* ob = nullptr; if (d->src_) { ob = d->src_->osrc_; } @@ -591,7 +590,7 @@ static Object** nc_postcelllist(void* v) { OcList* o; Object** po = newoclist(1, o); hoc_Item* q; - Object* cell = nil; + Object* cell = nullptr; if (d->target_ && d->target_->sec) { cell = nrn_sec2cell(d->target_->sec); } @@ -612,7 +611,7 @@ static Object** nc_precelllist(void* v) { OcList* o; Object** po = newoclist(1, o); hoc_Item* q; - Object* cell = nil; + Object* cell = nullptr; if (d->src_ && d->src_->ssrc_) { cell = nrn_sec2cell(d->src_->ssrc_); } @@ -639,7 +638,7 @@ static Object** nc_precell(void* v) { static Object** nc_postcell(void* v) { NetCon* d = (NetCon*) v; - Object* ob = nil; + Object* ob = nullptr; if (d->target_ && d->target_->sec) { ob = nrn_sec2cell(d->target_->sec); } @@ -648,14 +647,14 @@ static Object** nc_postcell(void* v) { static double nc_setpost(void* v) { NetCon* d = (NetCon*) v; - Object* otar = nil; + Object* otar = nullptr; if (ifarg(1)) { otar = *hoc_objgetarg(1); } if (otar && !is_point_process(otar)) { hoc_execerror("argument must be a point process or NULLobject", 0); } - Point_process* tar = nil; + Point_process* tar = nullptr; if (otar) { tar = ob2pntproc(otar); } @@ -663,7 +662,7 @@ static double nc_setpost(void* v) { #if DISCRETE_EVENT_OBSERVER ObjObservable::Detach(d->target_->ob, d); #endif - d->target_ = nil; + d->target_ = nullptr; } int cnt = 1; if (tar) { @@ -747,7 +746,7 @@ static double nc_record(void* v) { d->src_->record_stmt(*hoc_objgetarg(1)); } } else { - d->src_->record((IvocVect*) nil); + d->src_->record(nullptr); } return 0; } @@ -763,7 +762,7 @@ static double nc_srcgid(void* v) { static Object** nc_get_recordvec(void* v) { NetCon* d = (NetCon*) v; - Object* ob = nil; + Object* ob = nullptr; if (d->src_ && d->src_->tvec_) { ob = d->src_->tvec_->obj_; } @@ -821,7 +820,7 @@ static void steer_val(void* v) { static double dummy = 0.; d->chksrc(); if (d->src_->thvar_) { - hoc_pushpx(d->src_->thvar_); + hoc_push(d->src_->thvar_); } else { dummy = 0.; hoc_pushpx(&dummy); @@ -838,16 +837,16 @@ static void* cons(Object* o) { hoc_execerror("CVode instance must exist", 0); } // source, target, threshold, delay, magnitude - Object *osrc = nil, *otar; - Section* srcsec = nil; - double* psrc = nil; + Object *osrc = nullptr, *otar; + Section* srcsec = nullptr; + neuron::container::data_handle psrc{}; if (hoc_is_object_arg(1)) { osrc = *hoc_objgetarg(1); if (osrc && !is_point_process(osrc)) { hoc_execerror("if arg 1 is an object it must be a point process or NULLObject", 0); } } else { - psrc = hoc_pgetarg(1); + psrc = hoc_hgetarg(1); srcsec = chk_access(); } otar = *hoc_objgetarg(2); @@ -879,18 +878,18 @@ void NetCon_reg() { Symbol* s; s = hoc_table_lookup("delay", nc->u.ctemplate->symtable); s->type = VAR; - s->arayinfo = nil; + s->arayinfo = nullptr; s = hoc_table_lookup("x", nc->u.ctemplate->symtable); s->type = VAR; - s->arayinfo = nil; + s->arayinfo = nullptr; s = hoc_table_lookup("threshold", nc->u.ctemplate->symtable); s->type = VAR; - s->arayinfo = nil; + s->arayinfo = nullptr; s = hoc_table_lookup("weight", nc->u.ctemplate->symtable); s->type = VAR; s->arayinfo = new Arrayinfo; s->arayinfo->refcount = 1; - s->arayinfo->a_varn = nil; + s->arayinfo->a_varn = nullptr; s->arayinfo->nsub = 1; s->arayinfo->sub[0] = 1; } @@ -930,102 +929,78 @@ Object** NetCvode::netconlist() { Object** po = newoclist(4, o); - Object *opre = nil, *opost = nil, *otar = nil; - Regexp *spre = nil, *spost = nil, *star = nil; - char* s; - int n; + Object *opre = nullptr, *opost = nullptr, *otar = nullptr; + std::regex spre, spost, star; if (hoc_is_object_arg(1)) { opre = *hoc_objgetarg(1); } else { - s = gargstr(1); - if (s[0] == '\0') { - spre = new Regexp(".*"); + std::string s(gargstr(1)); + if (s.empty()) { + spre = std::regex(".*"); } else { - spre = new Regexp(escape_bracket(s)); - } - if (!spre->pattern()) { - delete std::exchange(spre, nullptr); - hoc_execerror(gargstr(1), "not a valid regular expression"); + try { + spre = std::regex(escape_bracket(s.data())); + } catch (std::regex_error&) { + hoc_execerror(gargstr(1), "not a valid regular expression"); + } } } if (hoc_is_object_arg(2)) { opost = *hoc_objgetarg(2); } else { - s = gargstr(2); - if (s[0] == '\0') { - spost = new Regexp(".*"); + std::string s(gargstr(2)); + if (s.empty()) { + spost = std::regex(".*"); } else { - spost = new Regexp(escape_bracket(s)); - } - if (!spost->pattern()) { - delete std::exchange(spost, nullptr); - delete std::exchange(spre, nullptr); - hoc_execerror(gargstr(2), "not a valid regular expression"); + try { + spost = std::regex(escape_bracket(s.data())); + } catch (std::regex_error&) { + hoc_execerror(gargstr(2), "not a valid regular expression"); + } } } if (hoc_is_object_arg(3)) { otar = *hoc_objgetarg(3); } else { - s = gargstr(3); - if (s[0] == '\0') { - star = new Regexp(".*"); + std::string s(gargstr(3)); + if (s.empty()) { + star = std::regex(".*"); } else { - star = new Regexp(escape_bracket(s)); - } - if (!star->pattern()) { - delete std::exchange(star, nullptr); - delete std::exchange(spre, nullptr); - delete std::exchange(spost, nullptr); - hoc_execerror(gargstr(3), "not a valid regular expression"); + try { + star = std::regex(escape_bracket(s.data())); + } catch (std::regex_error&) { + hoc_execerror(gargstr(3), "not a valid regular expression"); + } } } - bool b; hoc_Item* q; if (psl_) { ITERATE(q, psl_) { PreSyn* ps = (PreSyn*) VOIDITM(q); - b = false; + bool b = false; if (ps->ssrc_) { Object* precell = nrn_sec2cell(ps->ssrc_); if (opre) { - if (precell == opre) { - b = true; - } else { - b = false; - } + b = precell == opre; } else { - s = hoc_object_name(precell); - n = strlen(s); - if (spre->Match(s, n, 0) > 0) { - b = true; - } else { - b = false; - } + std::string s(hoc_object_name(precell)); + b = std::regex_search(s, spre); } } else if (ps->osrc_) { Object* presyn = ps->osrc_; if (opre) { - if (presyn == opre) { - b = true; - } else { - b = false; - } + b = presyn == opre; } else { - s = hoc_object_name(presyn); - n = strlen(s); - if (spre->Match(s, n, 0) > 0) { - b = true; - } else { - b = false; - } + std::string s(hoc_object_name(presyn)); + b = std::regex_search(s, spre); } } - if (b == true) { + if (b) { for (const auto& d: ps->dil_) { - Object* postcell = nil; - Object* target = nil; + Object* postcell = nullptr; + Object* target = nullptr; if (d->target_) { Point_process* p = d->target_; target = p->ob; @@ -1034,37 +1009,19 @@ Object** NetCvode::netconlist() { } } if (opost) { - if (postcell == opost) { - b = true; - } else { - b = false; - } + b = postcell == opost; } else { - s = hoc_object_name(postcell); - n = strlen(s); - if (spost->Match(s, n, 0) > 0) { - b = true; - } else { - b = false; - } + std::string s(hoc_object_name(postcell)); + b = std::regex_search(s, spost); } - if (b == true) { + if (b) { if (otar) { - if (target == otar) { - b = true; - } else { - b = false; - } + b = target == otar; } else { - s = hoc_object_name(target); - n = strlen(s); - if (star->Match(s, n, 0) > 0) { - b = true; - } else { - b = false; - } + std::string s(hoc_object_name(target)); + b = std::regex_search(s, star); } - if (b == true) { + if (b) { o->append(d->obj_); } } @@ -1072,9 +1029,6 @@ Object** NetCvode::netconlist() { } } } - delete std::exchange(spre, nullptr); - delete std::exchange(spost, nullptr); - delete std::exchange(star, nullptr); return po; } @@ -1084,10 +1038,10 @@ NetCvodeThreadData::NetCvodeThreadData() { // tqe_ accessed only by thread i so no locking tqe_ = new TQueue(tpool_, 0); sepool_ = new SelfEventPool(1000, 1); - selfqueue_ = nil; - psl_thr_ = nil; - tq_ = nil; - lcv_ = nil; + selfqueue_ = nullptr; + psl_thr_ = nullptr; + tq_ = nullptr; + lcv_ = nullptr; ite_size_ = ITE_SIZE; ite_cnt_ = 0; unreffed_event_cnt_ = 0; @@ -1188,7 +1142,7 @@ NetCvode::NetCvode(bool single) { atol_ = 1e-3; jacobian_ = 0; stiff_ = 2; - mst_ = nil; + mst_ = nullptr; condition_order_ = 1; null_event_ = new DiscreteEvent(); eps_ = 100. * UNIT_ROUNDOFF; @@ -1196,29 +1150,32 @@ NetCvode::NetCvode(bool single) { nrn_use_fifo_queue_ = false; single_ = single; nrn_use_daspk_ = false; - gcv_ = nil; + gcv_ = nullptr; allthread_hocevents_ = new HocEventList(); pcnt_ = 0; - p = nil; + p = nullptr; p_construct(1); // eventually these should not have to be thread safe - pst_ = nil; + pst_ = nullptr; pst_cnt_ = 0; - psl_ = nil; + psl_ = nullptr; // for parallel network simulations hardly any presyns have // a threshold and it can be very inefficient to check the entire // presyn list for thresholds during the fixed step method. // So keep a threshold list. - unused_presyn = nil; + unused_presyn = nullptr; structure_change_cnt_ = -1; fornetcon_change_cnt_ = -2; matrix_change_cnt_ = -1; playrec_change_cnt_ = 0; alloc_list(); - prl_ = new PlayRecList(10); - fixed_play_ = new PlayRecList(10); - fixed_record_ = new PlayRecList(10); - vec_event_store_ = nil; + prl_ = new std::vector(); + prl_->reserve(10); + fixed_play_ = new std::vector(); + fixed_play_->reserve(10); + fixed_record_ = new std::vector(); + fixed_record_->reserve(10); + vec_event_store_ = nullptr; if (!record_init_items_) { record_init_items_ = new TQList(); } @@ -1228,7 +1185,7 @@ NetCvode::NetCvode(bool single) { NetCvode::~NetCvode() { MUTDESTRUCT if (net_cvode_instance == (NetCvode*) this) { - net_cvode_instance = nil; + net_cvode_instance = nullptr; } delete_list(); p_construct(0); @@ -1250,11 +1207,11 @@ NetCvode::~NetCvode() { delete std::exchange(pst_, nullptr); delete std::exchange(fixed_play_, nullptr); delete std::exchange(fixed_record_, nullptr); - while (prl_->count()) { - delete prl_->item(prl_->count() - 1); + for (auto& item: *prl_) { + delete item; } delete std::exchange(prl_, nullptr); - unused_presyn = nil; + unused_presyn = nullptr; wl_list_.clear(); delete std::exchange(allthread_hocevents_, nullptr); } @@ -1304,7 +1261,7 @@ void NetCvode::use_daspk(bool b) { // Append new BAMechList item to arg BAMechList::BAMechList(BAMechList** first) { // preserve the list order - next = nil; + next = nullptr; BAMechList* last; if (*first) { for (last = *first; last->next; last = last->next) { @@ -1321,44 +1278,38 @@ void BAMechList::destruct(BAMechList** first) { bn = b->next; delete b; } - *first = nil; + *first = nullptr; } CvodeThreadData::CvodeThreadData() { no_cap_count_ = 0; no_cap_child_count_ = 0; - no_cap_node_ = nil; - no_cap_child_ = nil; - cv_memb_list_ = nil; - cmlcap_ = nil; - cmlext_ = nil; - no_cap_memb_ = nil; - before_breakpoint_ = nil; - after_solve_ = nil; - before_step_ = nil; + no_cap_node_ = nullptr; + no_cap_child_ = nullptr; + cv_memb_list_ = nullptr; + cmlcap_ = nullptr; + cmlext_ = nullptr; + no_cap_memb_ = nullptr; + before_breakpoint_ = nullptr; + after_solve_ = nullptr; + before_step_ = nullptr; rootnodecount_ = 0; v_node_count_ = 0; - v_node_ = nil; - v_parent_ = nil; - psl_th_ = nil; - watch_list_ = nil; - pv_ = nil; - pvdot_ = nil; + v_node_ = nullptr; + v_parent_ = nullptr; + psl_th_ = nullptr; + watch_list_ = nullptr; nvoffset_ = 0; nvsize_ = 0; neq_v_ = nonvint_offset_ = 0; nonvint_extra_offset_ = 0; - record_ = nil; - play_ = nil; + record_ = nullptr; + play_ = nullptr; } CvodeThreadData::~CvodeThreadData() { if (no_cap_memb_) { delete_memb_list(no_cap_memb_); } - if (pv_) { - delete[] pv_; - delete[] pvdot_; - } if (no_cap_node_) { delete[] no_cap_node_; delete[] no_cap_child_; @@ -1439,28 +1390,18 @@ void NetCvode::del_cv_memb_list(Cvode* cvode) { } } -CvMembList::CvMembList() { - index = -1; - ml = new Memb_list; -} -CvMembList::~CvMembList() { - delete ml; -} - void CvodeThreadData::delete_memb_list(CvMembList* cmlist) { CvMembList *cml, *cmlnext; for (cml = cmlist; cml; cml = cmlnext) { - Memb_list* ml = cml->ml; + auto const& ml = cml->ml; cmlnext = cml->next; - delete[] std::exchange(ml->nodelist, nullptr); -#if CACHEVEC - delete[] std::exchange(ml->nodeindices, nullptr); -#endif - if (memb_func[cml->index].hoc_mech) { - delete[] std::exchange(ml->prop, nullptr); - } else { - delete[] std::exchange(ml->_data, nullptr); - delete[] std::exchange(ml->pdata, nullptr); + for (auto& ml: cml->ml) { + delete[] std::exchange(ml.nodelist, nullptr); + delete[] std::exchange(ml.nodeindices, nullptr); + delete[] std::exchange(ml.prop, nullptr); + if (!memb_func[cml->index].hoc_mech) { + delete[] std::exchange(ml.pdata, nullptr); + } } delete cml; } @@ -1579,6 +1520,23 @@ bool NetCvode::init_global() { matrix_change_cnt_ = -1; playrec_change_cnt_ = 0; NrnThread* _nt; + // We copy Memb_list* into cml->ml below. At the moment this CVode code + // generates its own complicated set of Memb_list* that operate in + // list-of-handles mode instead of referring to contiguous sets of values. + // This is a shame, as it forces that list-of-handles mode to exist. + // Possible alternatives could include: + // - making the sorting algorithm more sophisticated so that the values that + // are going to be processed together are contiguous -- this might be a + // bit intricate, but it shouldn't be *too* had to assert that we get the + // right answer -- and it's the only way of making sure the actual compute + // kernels are cache efficient / vectorisable. + // - changing the type used by this code to not be Memb_list but rather some + // Memb_list_with_list_of_handles type and adding extra glue to the code + // generation so that we can call into translated MOD file code using that + // type + // - Using a list of Memb_list with size 1 instead of a single Memb_list + // that holds a list of handles? + auto const cache_token = nrn_ensure_model_data_are_sorted(); if (single_) { if (!gcv_ || gcv_->nctd_ != nrn_nthread) { delete_list(); @@ -1586,7 +1544,7 @@ bool NetCvode::init_global() { } del_cv_memb_list(); Cvode& cv = *gcv_; - distribute_dinfo(nil, 0); + distribute_dinfo(nullptr, 0); FOR_THREADS(_nt) { CvodeThreadData& z = cv.ctd_[_nt->id]; z.rootnodecount_ = _nt->ncell; @@ -1597,34 +1555,35 @@ bool NetCvode::init_global() { CvMembList* last = 0; for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { i = tml->index; - Memb_func* mf = memb_func + i; + const Memb_func& mf = memb_func[i]; Memb_list* ml = tml->ml; - if (ml->nodecount && (i == CAP || mf->current || mf->ode_count || mf->ode_matsol || - mf->ode_spec || mf->state)) { + if (ml->nodecount && (i == CAP || mf.current || mf.ode_count || mf.ode_matsol || + mf.ode_spec || mf.state)) { // maintain same order (not reversed) for // singly linked list built below - cml = new CvMembList; + cml = new CvMembList{i}; if (!z.cv_memb_list_) { z.cv_memb_list_ = cml; } else { last->next = cml; } last = cml; - cml->next = nil; - cml->index = i; - cml->ml->nodecount = ml->nodecount; + cml->next = nullptr; + auto const mech_offset = cache_token.thread_cache(_nt->id).mechanism_offset.at( + i); + assert(mech_offset != neuron::container::invalid_row); + assert(cml->ml.size() == 1); + cml->ml[0].set_storage_offset(mech_offset); + cml->ml[0].nodecount = ml->nodecount; // assumes cell info grouped contiguously - cml->ml->nodelist = ml->nodelist; -#if CACHEVEC - cml->ml->nodeindices = ml->nodeindices; -#endif - if (mf->hoc_mech) { - cml->ml->prop = ml->prop; - } else { - cml->ml->_data = ml->_data; - cml->ml->pdata = ml->pdata; + cml->ml[0].nodelist = ml->nodelist; + cml->ml[0].nodeindices = ml->nodeindices; + assert(ml->prop); + cml->ml[0].prop = ml->prop; // used for ode_map even when hoc_mech = false + if (!mf.hoc_mech) { + cml->ml[0].pdata = ml->pdata; } - cml->ml->_thread = ml->_thread; + cml->ml[0]._thread = ml->_thread; } } fill_global_ba(_nt, BEFORE_BREAKPOINT, &z.before_breakpoint_); @@ -1635,12 +1594,12 @@ bool NetCvode::init_global() { // have the global cvode as its nvi field for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { i = tml->index; - Memb_func* mf = memb_func + i; - if (mf->is_point && !nrn_is_artificial_[i]) { + const Memb_func& mf = memb_func[i]; + if (mf.is_point && !nrn_is_artificial_[i]) { Memb_list* ml = tml->ml; int j; for (j = 0; j < ml->nodecount; ++j) { - auto& datum = mf->hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1]; + auto& datum = mf.hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1]; auto* pp = datum.get(); pp->nvi_ = gcv_; } @@ -1709,8 +1668,8 @@ bool NetCvode::init_global() { // statement that needs to be handled. std::unordered_set ba_candidate; { - std::vector batypes = {BEFORE_STEP, BEFORE_BREAKPOINT, AFTER_SOLVE}; - for (const auto& bat: batypes) { + constexpr std::array batypes{BEFORE_STEP, BEFORE_BREAKPOINT, AFTER_SOLVE}; + for (auto const bat: batypes) { for (BAMech* bam = bamech_[bat]; bam; bam = bam->next) { ba_candidate.insert(bam->type); } @@ -1719,11 +1678,10 @@ bool NetCvode::init_global() { for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { i = tml->index; - Memb_func* mf = memb_func + i; + const Memb_func& mf = memb_func[i]; Memb_list* ml = tml->ml; - if (ml->nodecount && - (mf->current || mf->ode_count || mf->ode_matsol || mf->ode_spec || mf->state || - i == CAP || ba_candidate.count(i) == 1)) { + if (ml->nodecount && (mf.current || mf.ode_count || mf.ode_matsol || mf.ode_spec || + mf.state || i == CAP || ba_candidate.count(i) == 1)) { // maintain same order (not reversed) for // singly linked list built below int j; @@ -1732,22 +1690,23 @@ bool NetCvode::init_global() { Cvode& cv = d.lcv_[cellnum[inode]]; CvodeThreadData& z = cv.ctd_[0]; if (!z.cv_memb_list_) { - cml = new CvMembList; - cml->next = nil; - cml->index = i; - cml->ml->nodecount = 0; + cml = new CvMembList{i}; + cml->next = nullptr; + assert(cml->ml.size() == 1); + cml->ml[0].nodecount = 0; z.cv_memb_list_ = cml; last[cellnum[inode]] = cml; } if (last[cellnum[inode]]->index == i) { - ++last[cellnum[inode]]->ml->nodecount; + assert(last[cellnum[inode]]->ml.size() == 1); + ++last[cellnum[inode]]->ml[0].nodecount; } else { - cml = new CvMembList; + cml = new CvMembList{i}; last[cellnum[inode]]->next = cml; - cml->next = nil; + cml->next = nullptr; last[cellnum[inode]] = cml; - cml->index = i; - cml->ml->nodecount = 1; + assert(cml->ml.size() == 1); + cml->ml[0].nodecount = 1; } } } @@ -1757,49 +1716,39 @@ bool NetCvode::init_global() { for (i = 0; i < d.nlcv_; ++i) { cvml[i] = d.lcv_[i].ctd_[0].cv_memb_list_; for (cml = cvml[i]; cml; cml = cml->next) { - Memb_list* ml = cml->ml; - ml->nodelist = new Node*[ml->nodecount]; -#if CACHEVEC - ml->nodeindices = new int[ml->nodecount]; -#endif - if (memb_func[cml->index].hoc_mech) { - ml->prop = new Prop*[ml->nodecount]; - } else { - ml->_data = new double*[ml->nodecount]; - ml->pdata = new Datum*[ml->nodecount]; - } - ml->nodecount = 0; + // non-contiguous mode, so we're going to create a lot of 1-element Memb_list + // inside cml->ml + cml->ml.reserve(cml->ml[0].nodecount); + // remove the single entry from contiguous mode + cml->ml.clear(); } } // fill pointers (and nodecount) // now list order is from 0 to n_memb_func for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { i = tml->index; - Memb_func* mf = memb_func + i; + const Memb_func& mf = memb_func[i]; Memb_list* ml = tml->ml; - if (ml->nodecount && - (mf->current || mf->ode_count || mf->ode_matsol || mf->ode_spec || mf->state || - i == CAP || ba_candidate.count(i) == 1)) { - int j; - for (j = 0; j < ml->nodecount; ++j) { + if (ml->nodecount && (mf.current || mf.ode_count || mf.ode_matsol || mf.ode_spec || + mf.state || i == CAP || ba_candidate.count(i) == 1)) { + for (int j = 0; j < ml->nodecount; ++j) { int icell = cellnum[ml->nodelist[j]->v_node_index]; if (cvml[icell]->index != i) { cvml[icell] = cvml[icell]->next; assert(cvml[icell] && cvml[icell]->index); } cml = cvml[icell]; - cml->ml->nodelist[cml->ml->nodecount] = ml->nodelist[j]; -#if CACHEVEC - cml->ml->nodeindices[cml->ml->nodecount] = ml->nodeindices[j]; -#endif - if (mf->hoc_mech) { - cml->ml->prop[cml->ml->nodecount] = ml->prop[j]; - } else { - cml->ml->_data[cml->ml->nodecount] = ml->_data[j]; - cml->ml->pdata[cml->ml->nodecount] = ml->pdata[j]; + auto& newml = cml->ml.emplace_back(cml->index /* mechanism type */); + newml.nodecount = 1; + newml.nodelist = new Node*[1]; + newml.nodelist[0] = ml->nodelist[j]; + newml.nodeindices = new int[1]{ml->nodeindices[j]}; + newml.prop = new Prop* [1] { ml->prop[j] }; + if (!mf.hoc_mech) { + newml.set_storage_offset(ml->get_storage_offset() + j); + newml.pdata = new Datum* [1] { ml->pdata[j] }; } - cml->ml->_thread = ml->_thread; - ++cml->ml->nodecount; + newml._thread = ml->_thread; } } } @@ -1811,18 +1760,18 @@ bool NetCvode::init_global() { // artifical cells have no integrator for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { i = tml->index; - Memb_func* mf = memb_func + i; - if (mf->is_point) { + const Memb_func& mf = memb_func[i]; + if (mf.is_point) { Memb_list* ml = tml->ml; int j; for (j = 0; j < ml->nodecount; ++j) { - auto& datum = mf->hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1]; + auto& datum = mf.hoc_mech ? ml->prop[j]->dparam[1] : ml->pdata[j][1]; auto* pp = datum.get(); if (nrn_is_artificial_[i] == 0) { int inode = ml->nodelist[j]->v_node_index; pp->nvi_ = d.lcv_ + cellnum[inode]; } else { - pp->nvi_ = nil; + pp->nvi_ = nullptr; } } } @@ -1837,7 +1786,7 @@ void NetCvode::fill_global_ba(NrnThread* nt, int bat, BAMechList** baml) { for (tbl = nt->tbl[bat]; tbl; tbl = tbl->next) { BAMechList* ba = new BAMechList(baml); ba->bam = tbl->bam; - ba->ml = tbl->ml; + ba->ml.push_back(tbl->ml); } } @@ -1855,10 +1804,11 @@ void NetCvode::fill_local_ba_cnt(int bat, int* celnum, NetCvodeThreadData& d) { assert(cv->nctd_ == 1); for (CvMembList* cml = cv->ctd_[0].cv_memb_list_; cml; cml = cml->next) { if (cml->index == bam->type) { - Memb_list* ml = cml->ml; BAMechList* bl = cvbml(bat, bam, cv); bl->bam = bam; - bl->ml = ml; + for (auto& ml: cml->ml) { + bl->ml.push_back(&ml); + } } } } @@ -2081,14 +2031,15 @@ int NetCvode::solve(double tout) { } } } else if (!gcv_) { // lvardt + auto const cache_token = nrn_ensure_model_data_are_sorted(); if (tout >= 0.) { - time_t rt = time(nil); + time_t rt = time(nullptr); // int cnt = 0; TQueue* tq = p[0].tq_; TQueue* tqe = p[0].tqe_; NrnThread* nt = nrn_threads; while (tq->least_t() < tout || tqe->least_t() <= tout) { - err = local_microstep(nt); + err = local_microstep(cache_token, *nt); if (nrn_allthread_handle) { (*nrn_allthread_handle)(); } @@ -2097,13 +2048,13 @@ int NetCvode::solve(double tout) { } #if HAVE_IV IFGUI - if (rt < time(nil)) { + if (rt < time(nullptr)) { // if (++cnt > 10000) { // cnt = 0; Oc oc; oc.notify(); single_event_run(); - rt = time(nil); + rt = time(nullptr); } ENDGUI #endif @@ -2122,7 +2073,7 @@ int NetCvode::solve(double tout) { double tc = tq->least_t(); double te = p[0].tqe_->least_t(); while (tq->least_t() <= tc && p[0].tqe_->least_t() <= te) { - err = local_microstep(nrn_threads); + err = local_microstep(cache_token, *nrn_threads); if (nrn_allthread_handle) { (*nrn_allthread_handle)(); } @@ -2176,7 +2127,8 @@ bool NetCvode::deliver_event(double til, NrnThread* nt) { } } -int NetCvode::local_microstep(NrnThread* nt) { +int NetCvode::local_microstep(neuron::model_sorted_token const& sorted_token, NrnThread& ntr) { + auto* const nt = &ntr; int err = NVI_SUCCESS; int i = nt->id; if (p[i].tqe_->least_t() <= p[i].tq_->least_t()) { @@ -2184,7 +2136,7 @@ int NetCvode::local_microstep(NrnThread* nt) { } else { TQItem* q = p[i].tq_->least(); Cvode* cv = (Cvode*) q->data_; - err = cv->handle_step(this, 1e100); + err = cv->handle_step(sorted_token, this, 1e100); p[i].tq_->move_least(cv->t_); } return err; @@ -2202,7 +2154,7 @@ int NetCvode::global_microstep() { assert(tdiff == 0.0 || (gcv_->tstop_begin_ <= tt && tt <= gcv_->tstop_end_)); deliver_events(tt, nt); } else { - err = gcv_->handle_step(this, tt); + err = gcv_->handle_step(nrn_ensure_model_data_are_sorted(), this, tt); } if (p[0].tqe_->least_t() < gcv_->t_) { gcv_->interpolate(p[0].tqe_->least_t()); @@ -2210,7 +2162,7 @@ int NetCvode::global_microstep() { return err; } -int Cvode::handle_step(NetCvode* ns, double te) { +int Cvode::handle_step(neuron::model_sorted_token const& sorted_token, NetCvode* ns, double te) { int err = NVI_SUCCESS; // first order correct condition evaluation goes here if (ns->condition_order() == 1) { @@ -2254,7 +2206,7 @@ int Cvode::handle_step(NetCvode* ns, double te) { err = interpolate(tn_); } else { record_continuous(); - err = advance_tn(); + err = advance_tn(sorted_token); // second order correct condition evaluation goes here if (ns->condition_order() == 2) { evaluate_conditions(nth_); @@ -2590,7 +2542,7 @@ void NetCvode::vec_event_store() { // not destroyed when vector destroyed. // should resize to 0 or remove before storing, just keeps incrementing if (vec_event_store_) { - vec_event_store_ = nil; + vec_event_store_ = nullptr; } if (ifarg(1)) { vec_event_store_ = vector_arg(1); @@ -2699,7 +2651,7 @@ void NetCvode::hoc_event(double tt, const char* stmt, Object* ppobj, int reinit, p[i].interthread_send(tt, HocEvent::alloc(stmt, ppobj, reinit, pyact), nt + i); nrn_interthread_enqueue(nt + i); } else { - HocEvent* he = HocEvent::alloc(stmt, nil, 0, pyact); + HocEvent* he = HocEvent::alloc(stmt, nullptr, 0, pyact); // put on each queue. The first thread to execute the deliver // for he will set the nrn_allthread_handle // callback which will cause all threads to rejoin at the @@ -2719,7 +2671,7 @@ void NetCvode::hoc_event(double tt, const char* stmt, Object* ppobj, int reinit, } void NetCvode::allthread_handle() { - nrn_allthread_handle = nil; + nrn_allthread_handle = nullptr; t = nt_t; while (!allthread_hocevents_->empty()) { HocEvent* he = (*allthread_hocevents_)[0]; @@ -2844,7 +2796,7 @@ void NetCvode::clear_events() { // invalid item data pointers HocEvent::reclaim(); allthread_hocevents_->clear(); - nrn_allthread_handle = nil; + nrn_allthread_handle = nullptr; #if USENEOSIM if (p_nrn2neosim_send) for (i = 0; i < nlist_; ++i) { @@ -2975,7 +2927,7 @@ void NetCvode::init_events() { Object* obj = OBJ(q); auto* d = static_cast(obj->u.this_pointer); if (d->target_) { - int type = d->target_->prop->_type; + int type = d->target_->prop->_type; // somehow prop is non-deterministically-null here if (pnt_receive_init[type]) { (*pnt_receive_init[type])(d->target_, d->weight_, 0); } else { @@ -3022,7 +2974,7 @@ void NetCvode::deliver_events(double til, NrnThread* nt) { } } -static IvocVect* peqvec; // if not nil then the sorted times on the event queue. +static IvocVect* peqvec; // if not nullptr then the sorted times on the event queue. static void peq(const TQItem*, int); static void peq(const TQItem* q, int) { if (peqvec) { @@ -3041,7 +2993,7 @@ void NetCvode::print_event_queue() { peqvec->resize(0); } p[0].tqe_->forall_callback(peq); - peqvec = nil; + peqvec = nullptr; } static int event_info_type_; @@ -3052,21 +3004,17 @@ static OcList* event_info_list_; // netcon or point_process static void event_info_callback(const TQItem*, int); static void event_info_callback(const TQItem* q, int) { DiscreteEvent* d = (DiscreteEvent*) q->data_; - NetCon* nc; - PreSyn* ps; - SelfEvent* se; - int n = event_info_tvec_->size(); switch (d->type()) { case NetConType: if (event_info_type_ == NetConType) { - nc = (NetCon*) d; + auto* nc = static_cast(d); event_info_tvec_->push_back(q->t_); event_info_list_->append(nc->obj_); } break; case SelfEventType: if (event_info_type_ == SelfEventType) { - se = (SelfEvent*) d; + auto* se = static_cast(d); event_info_tvec_->push_back(q->t_); event_info_flagvec_->push_back(se->flag_); event_info_list_->append(se->target_->ob); @@ -3074,13 +3022,11 @@ static void event_info_callback(const TQItem* q, int) { break; case PreSynType: if (event_info_type_ == NetConType) { - ps = (PreSyn*) d; - for (auto it = ps->dil_.rbegin(); it != ps->dil_.rend(); ++it) { - nc = *it; + auto* ps = static_cast(d); + for (const auto& nc: reverse(ps->dil_)) { double td = nc->delay_ - ps->delay_; event_info_tvec_->push_back(q->t_ + td); event_info_list_->append(nc->obj_); - ++n; } } break; @@ -3200,9 +3146,9 @@ void NetCon::pr(const char* s, double tt, NetCvode* ns) { if (src_) { Printf(" src=%s", src_->osrc_ ? hoc_object_name(src_->osrc_) : secname(src_->ssrc_)); } else { - Printf(" src=nil"); + Printf(" src=nullptr"); } - Printf(" target=%s %.15g\n", (target_ ? hoc_object_name(target_->ob) : "nil"), tt); + Printf(" target=%s %.15g\n", (target_ ? hoc_object_name(target_->ob) : "nullptr"), tt); } void PreSyn::send(double tt, NetCvode* ns, NrnThread* nt) { @@ -3271,7 +3217,7 @@ void PreSyn::deliver(double tt, NetCvode* ns, NrnThread* nt) { if (qthresh_) { // the thread is the one that owns the PreSyn assert(nt == nt_); - qthresh_ = nil; + qthresh_ = nullptr; // printf("PreSyn::deliver %s condition event tt=%20.15g\n", ssrc_?secname(ssrc_):"", tt); STATISTICS(deliver_qthresh_); // If local variable time step and send is recorded, @@ -3284,7 +3230,7 @@ void PreSyn::deliver(double tt, NetCvode* ns, NrnThread* nt) { Cvode* cv = (Cvode*) q->data_; if (tt < cv->t_) { int err = NVI_SUCCESS; - err = cv->handle_step(ns, tt); + err = cv->handle_step(nrn_ensure_model_data_are_sorted(), ns, tt); ns->p[i].tq_->move_least(cv->t_); } } @@ -3330,7 +3276,7 @@ void PreSyn::pgvts_deliver(double tt, NetCvode* ns) { NrnThread* nt = 0; assert(0); if (qthresh_) { - qthresh_ = nil; + qthresh_ = nullptr; // printf("PreSyn::deliver %s condition event tt=%20.15g\n", ssrc_?secname(ssrc_):"", tt); STATISTICS(deliver_qthresh_); send(tt, ns, nt); @@ -3674,9 +3620,9 @@ int NetCvode::pgvts_event(double& tt) { } DiscreteEvent* NetCvode::pgvts_least(double& tt, int& op, int& init) { - DiscreteEvent* de = nil; -#if PARANEURON - TQItem* q = nil; + DiscreteEvent* de = nullptr; +#if NRNMPI + TQItem* q = nullptr; if (gcv_->initialize_ && p[0].tqe_->least_t() > gcv_->t_) { tt = gcv_->t_; op = 3; @@ -3724,7 +3670,7 @@ DiscreteEvent* NetCvode::pgvts_least(double& tt, int& op, int& init) { } else if (ts == tt && q && ops == op) { // safe to do this event as well p[0].tqe_->remove(q); } else { - de = nil; + de = nullptr; } #endif return de; @@ -3739,7 +3685,7 @@ int NetCvode::pgvts_cvode(double tt, int op) { gcv_->check_deliver(); } gcv_->record_continuous(); - err = gcv_->advance_tn(); + err = gcv_->advance_tn(nrn_ensure_model_data_are_sorted()); if (condition_order() == 2) { gcv_->evaluate_conditions(); } @@ -3759,7 +3705,7 @@ int NetCvode::pgvts_cvode(double tt, int op) { } bool NetCvode::use_partrans() { -#if PARANEURON +#if NRNMPI if (gcv_) { return gcv_->use_partrans_; } else { @@ -3772,8 +3718,9 @@ bool NetCvode::use_partrans() { void ncs2nrn_integrate(double tstop) { double ts; nrn_use_busywait(1); // just a possibility + auto const cache_token = nrn_ensure_model_data_are_sorted(); if (cvode_active_) { -#if PARANEURON +#if NRNMPI if (net_cvode_instance->use_partrans()) { net_cvode_instance->pgvts(tstop); t = nt_t; @@ -3789,7 +3736,7 @@ void ncs2nrn_integrate(double tstop) { #if 1 int n = (int) ((tstop - nt_t) / dt + 1e-9); if (n > 3 && !nrnthread_v_transfer_) { - nrn_fixed_step_group(n); + nrn_fixed_step_group(cache_token, n); } else #endif { @@ -3802,7 +3749,7 @@ void ncs2nrn_integrate(double tstop) { ts = tstop - .5 * dt; while (nt_t < ts) { #endif - nrn_fixed_step(); + nrn_fixed_step(cache_token); if (stoprun) { break; } @@ -4083,7 +4030,7 @@ void NetCvode::fornetcon_prepare() { int type = nrn_fornetcon_type_[i]; t2i[type] = index; if (nrn_is_artificial_[type]) { - Memb_list* m = memb_list + type; + auto* const m = &memb_list[type]; for (j = 0; j < m->nodecount; ++j) { // Save ForNetConsInfo* as void* to avoid needing to expose the // definition of ForNetConsInfo to translated MOD file code @@ -4118,7 +4065,8 @@ void NetCvode::fornetcon_prepare() { for (const auto& d1: dil) { Point_process* pnt = d1->target_; if (pnt && t2i[pnt->prop->_type] > -1) { - auto* fnc = pnt->prop->dparam[t2i[pnt->prop->_type]].get(); + auto* fnc = static_cast( + pnt->prop->dparam[t2i[pnt->prop->_type]].get()); assert(fnc); fnc->size += 1; } @@ -4130,7 +4078,7 @@ void NetCvode::fornetcon_prepare() { int index = nrn_fornetcon_index_[i]; int type = nrn_fornetcon_type_[i]; if (nrn_is_artificial_[type]) { - Memb_list* m = memb_list + type; + auto* const m = &memb_list[type]; for (j = 0; j < m->nodecount; ++j) { auto* fnc = static_cast(m->pdata[j][index].get()); if (fnc->size > 0) { @@ -4192,8 +4140,7 @@ void record_init_clear(const TQItem* q, int) { } void NetCvode::record_init() { - int i, cnt = prl_->count(); - if (cnt) { + if (!prl_->empty()) { // there may be some events on the queue descended from // finitialize that need to be removed record_init_items_->clear(); @@ -4203,15 +4150,14 @@ void NetCvode::record_init() { } record_init_items_->clear(); } - for (i = 0; i < cnt; ++i) { - prl_->item(i)->record_init(); + for (auto& item: *prl_) { + item->record_init(); } } void NetCvode::play_init() { - int i, cnt = prl_->count(); - for (i = 0; i < cnt; ++i) { - prl_->item(i)->play_init(); + for (auto& item: *prl_) { + item->play_init(); } } @@ -4292,11 +4238,11 @@ void NetCvode::dstates() { void nrn_cvfun(double t, double* y, double* ydot) { NetCvode* d = net_cvode_instance; - d->gcv_->fun_thread(t, y, ydot, nrn_threads); + d->gcv_->fun_thread(nrn_ensure_model_data_are_sorted(), t, y, ydot, nrn_threads); } double nrn_hoc2fixed_step(void*) { - nrn_fixed_step(); + nrn_fixed_step(nrn_ensure_model_data_are_sorted()); return 0.; } @@ -4331,7 +4277,7 @@ double nrn_hoc2scatter_y(void* v) { if (nrn_nthread > 1) { hoc_execerror("only one thread allowed", 0); } - d->gcv_->scatter_y(vector_vec(s), 0); + d->gcv_->scatter_y(nrn_ensure_model_data_are_sorted(), vector_vec(s), 0); return 0.; } @@ -4410,8 +4356,9 @@ void NetCvode::acor() { /** @brief Create a lookup table for variable names. * * This is only created on-demand because it involves building a lookup table - * of pointers, which are prone to being invalidated. In nrn#1929 this may be - * superseded by the output operator of data_handle. + * of pointers, some of which are obtained from data_handles (and are therefore + * unstable). Eventually the operator<< of data_handle might provide the + * necessary functionality and this could be dropped completely. */ HocDataPaths NetCvode::create_hdp(int style) { int n{}; @@ -4471,8 +4418,8 @@ std::string NetCvode::statename(int is, int style) { assert(sym); return sym2name(sym); } else { - auto* s = hdp.retrieve(raw_ptr); - return s ? s->string() : "unknown"; + std::string s = hdp.retrieve(raw_ptr); + return !s.empty() ? s.c_str() : "unknown"; } }; int j{}; @@ -4531,7 +4478,7 @@ Symbol* NetCvode::name2sym(const char* name) { } } hoc_execerror(name, "must be in form rangevar or Template.var"); - return nil; + return nullptr; } void NetCvode::rtol(double x) { @@ -4616,15 +4563,15 @@ void NetCvode::structure_change() { } } -NetCon* NetCvode::install_deliver(double* dsrc, +NetCon* NetCvode::install_deliver(neuron::container::data_handle dsrc, Section* ssrc, Object* osrc, Object* target, double threshold, double delay, double magnitude) { - PreSyn* ps = nil; - double* psrc = nil; + PreSyn* ps = nullptr; + neuron::container::data_handle psrc{}; if (ssrc) { consist_sec_pd("NetCon", ssrc, dsrc); } @@ -4643,7 +4590,7 @@ NetCon* NetCvode::install_deliver(double* dsrc, assert(pp && pp->prop); if (!pnt_receive[pp->prop->_type]) { // only if no NET_RECEIVE block Sprintf(buf, "%s.x", hoc_object_name(osrc)); - psrc = hoc_val_pointer(buf); + psrc = hoc_val_handle(buf); } } } else { @@ -4676,7 +4623,7 @@ NetCon* NetCvode::install_deliver(double* dsrc, } } else if (target) { // no source so use the special presyn if (!unused_presyn) { - unused_presyn = new PreSyn(nil, nil, nil); + unused_presyn = new PreSyn({}, nullptr, nullptr); unused_presyn->hi_ = hoc_l_insertvoid(psl_, unused_presyn); } ps = unused_presyn; @@ -4698,15 +4645,15 @@ void NetCvode::psl_append(PreSyn* ps) { void NetCvode::presyn_disconnect(PreSyn* ps) { if (ps == unused_presyn) { - unused_presyn = nil; + unused_presyn = nullptr; } if (ps->hi_) { hoc_l_delete(ps->hi_); - ps->hi_ = nil; + ps->hi_ = nullptr; } if (ps->hi_th_) { hoc_l_delete(ps->hi_th_); - ps->hi_th_ = nil; + ps->hi_th_ = nullptr; } if (ps->thvar_) { --pst_cnt_; @@ -4767,15 +4714,15 @@ void DiscreteEvent::savestate_write(FILE* f) { NetCon::NetCon(PreSyn* src, Object* target) { NetConSave::invalid(); - obj_ = nil; + obj_ = nullptr; src_ = src; delay_ = 1.0; if (src_) { src_->dil_.push_back(this); src_->use_min_delay_ = 0; } - if (target == nil) { - target_ = nil; + if (target == nullptr) { + target_ = nullptr; active_ = false; cnt_ = 1; weight_ = new double[cnt_]; @@ -4791,7 +4738,7 @@ NetCon::NetCon(PreSyn* src, Object* target) { hoc_execerror("No NET_RECEIVE in target PointProcess:", hoc_object_name(target)); } cnt_ = pnt_receive_size[target_->prop->_type]; - weight_ = nil; + weight_ = nullptr; if (cnt_) { weight_ = new double[cnt_]; for (int i = 0; i < cnt_; ++i) { @@ -4906,7 +4853,7 @@ NetCon* NetConSave::weight2netcon(double* pd) { assert(nc->weight_ == pd); return nc; } else { - return nil; + return nullptr; } } @@ -4930,7 +4877,7 @@ NetCon* NetConSave::index2netcon(long id) { assert(nc->obj_->index == id); return nc; } else { - return nil; + return nullptr; } } @@ -4942,7 +4889,7 @@ void NetCvode::ps_thread_link(PreSyn* ps) { if (!ps) { return; } - ps->nt_ = nil; + ps->nt_ = nullptr; if (!v_structure_change) { // PP2NT etc are correct if (ps->osrc_) { ps->nt_ = PP2NT(ob2pntproc(ps->osrc_)); @@ -4994,23 +4941,23 @@ void NetCvode::p_construct(int n) { } } -PreSyn::PreSyn(double* src, Object* osrc, Section* ssrc) { - // printf("Presyn %x %s\n", (long)this, osrc?hoc_object_name(osrc):"nil"); +PreSyn::PreSyn(neuron::container::data_handle src, Object* osrc, Section* ssrc) + : thvar_{std::move(src)} { + // printf("Presyn %x %s\n", (long)this, osrc?hoc_object_name(osrc):"nullptr"); PreSynSave::invalid(); hi_index_ = -1; - hi_th_ = nil; + hi_th_ = nullptr; flag_ = false; valthresh_ = 0; - thvar_ = src; osrc_ = osrc; ssrc_ = ssrc; threshold_ = 10.; use_min_delay_ = 0; - tvec_ = nil; - idvec_ = nil; - stmt_ = nil; + tvec_ = nullptr; + idvec_ = nullptr; + stmt_ = nullptr; gid_ = -1; - nt_ = nil; + nt_ = nullptr; if (thvar_) { if (osrc) { nt_ = PP2NT(ob2pntproc(osrc)); @@ -5032,7 +4979,7 @@ PreSyn::PreSyn(double* src, Object* osrc, Section* ssrc) { #endif #if DISCRETE_EVENT_OBSERVER if (thvar_) { - nrn_notify_when_double_freed(thvar_, this); + neuron::container::notify_when_handle_dies(thvar_, this); } else if (osrc_) { nrn_notify_when_void_freed(osrc_, this); } @@ -5047,11 +4994,11 @@ PreSyn::~PreSyn() { #if DISCRETE_EVENT_OBSERVER if (tvec_) { ObjObservable::Detach(tvec_->obj_, this); - tvec_ = nil; + tvec_ = nullptr; } if (idvec_) { ObjObservable::Detach(idvec_->obj_, this); - idvec_ = nil; + idvec_ = nullptr; } #endif if (thvar_ || osrc_) { @@ -5062,12 +5009,12 @@ PreSyn::~PreSyn() { // even if the point process section was deleted earlier Point_process* pnt = ob2pntproc_0(osrc_); if (pnt) { - pnt->presyn_ = nil; + pnt->presyn_ = nullptr; } } } for (const auto& d: dil_) { - d->src_ = nil; + d->src_ = nullptr; } net_cvode_instance->presyn_disconnect(this); } @@ -5088,7 +5035,7 @@ void PreSynSave::savestate_restore(double tt, NetCvode* nc) { } DiscreteEvent* PreSyn::savestate_read(FILE* f) { - PreSyn* ps = nil; + PreSyn* ps = nullptr; char buf[200]; int index, tid; nrn_assert(fgets(buf, 200, f)); @@ -5134,12 +5081,12 @@ PreSyn* PreSynSave::hindx2presyn(long id) { assert(ps->hi_index_ == id); return ps; } else { - return nil; + return nullptr; } } void PreSyn::init() { - qthresh_ = nil; + qthresh_ = nullptr; if (tvec_) { tvec_->resize(0); } @@ -5214,12 +5161,12 @@ void PreSyn::record(double tt) { void PreSyn::disconnect(Observable* o) { // printf("PreSyn::disconnect %s\n", hoc_object_name(((ObjObservable*)o)->object())); if (tvec_ && tvec_->obj_ == ((ObjObservable*) o)->object()) { - tvec_ = nil; + tvec_ = nullptr; } if (idvec_ && idvec_->obj_ == ((ObjObservable*) o)->object()) { - idvec_ = nil; + idvec_ = nullptr; } - if (dil_.size() == 0 && tvec_ == nil && idvec_ == nil && output_index_ == -1) { + if (dil_.size() == 0 && tvec_ == nullptr && idvec_ == nullptr && output_index_ == -1) { delete this; } } @@ -5239,28 +5186,20 @@ if (d->obj_) { #if DISCRETE_EVENT_OBSERVER ObjObservable::Detach(tvec_->obj_, this); #endif - tvec_ = nil; + tvec_ = nullptr; } if (idvec_) { #if DISCRETE_EVENT_OBSERVER ObjObservable::Detach(idvec_->obj_, this); #endif - idvec_ = nil; + idvec_ = nullptr; } net_cvode_instance->presyn_disconnect(this); - thvar_ = nil; - osrc_ = nil; + thvar_ = {}; + osrc_ = nullptr; delete this; } -void PreSyn::update_ptr(double* pd) { -#if DISCRETE_EVENT_OBSERVER - nrn_notify_pointer_disconnect(this); - nrn_notify_when_double_freed(pd, this); -#endif - thvar_ = pd; -} - void ConditionEvent::check(NrnThread* nt, double tt, double teps) { if (value() > 0.0) { if (flag_ == false) { @@ -5298,7 +5237,7 @@ void ConditionEvent::condition(Cvode* cv) { // logic for high order threshold d // abandon the event STATISTICS(abandon_); net_cvode_instance->remove_event(qthresh_, nt->id); - qthresh_ = nil; + qthresh_ = nullptr; valthresh_ = 0.; flag_ = false; } @@ -5369,7 +5308,7 @@ void ConditionEvent::abandon_statistics(Cvode* cv) { } WatchCondition::WatchCondition(Point_process* pnt, double (*c)(Point_process*)) - : HTList(nil) { + : HTList(nullptr) { pnt_ = pnt; c_ = c; watch_index_ = 0; // For transfer, will be a small positive integer. @@ -5393,7 +5332,7 @@ STECondition::~STECondition() { void WatchCondition::activate(double flag) { Cvode* cv = NULL; int id = 0; - qthresh_ = nil; + qthresh_ = nullptr; flag_ = (value() >= -hoc_epsilon) ? true : false; valthresh_ = 0.; nrflag_ = flag; @@ -5409,7 +5348,7 @@ void WatchCondition::activate(double flag) { id = (cv->nctd_ > 1) ? thread()->id : 0; HTList*& wl = cv->ctd_[id].watch_list_; if (!wl) { - wl = new HTList(nil); + wl = new HTList(nullptr); net_cvode_instance->wl_list_[id].push_back(wl); } Remove(); @@ -5431,7 +5370,7 @@ void WatchCondition::send(double tt, NetCvode* nc, NrnThread* nt) { void WatchCondition::deliver(double tt, NetCvode* ns, NrnThread* nt) { if (qthresh_) { - qthresh_ = nil; + qthresh_ = nullptr; STATISTICS(deliver_qthresh_); } Cvode* cv = (Cvode*) pnt_->nvi_; @@ -5443,7 +5382,7 @@ void WatchCondition::deliver(double tt, NetCvode* ns, NrnThread* nt) { PP2t(pnt_) = tt; } STATISTICS(watch_deliver_); - POINT_RECEIVE(type, pnt_, nil, nrflag_); + POINT_RECEIVE(type, pnt_, nullptr, nrflag_); if (errno) { if (nrn_errno_check(type)) { hoc_warning("errno set during WatchCondition deliver to NET_RECEIVE", (char*) 0); @@ -5453,13 +5392,13 @@ void WatchCondition::deliver(double tt, NetCvode* ns, NrnThread* nt) { void StateTransitionEvent::transition(int src, int dest, - double* var1, - double* var2, + neuron::container::data_handle var1, + neuron::container::data_handle var2, std::unique_ptr hc) { STETransition& st = states_[src].add_transition(pnt_); st.dest_ = dest; - st.var1_ = var1; - st.var2_ = var2; + st.var1_ = std::move(var1); + st.var2_ = std::move(var2); st.hc_ = std::move(hc); st.ste_ = this; st.var1_is_time_ = (static_cast(st.var1_) == &t); @@ -5467,7 +5406,8 @@ void StateTransitionEvent::transition(int src, void STETransition::activate() { if (var1_is_time_) { - var1_ = &stec_->thread()->_t; + var1_ = neuron::container::data_handle{neuron::container::do_not_search, + &stec_->thread()->_t}; } if (stec_->qthresh_) { // is it on the queue net_cvode_instance->remove_event(stec_->qthresh_, stec_->thread()->id); @@ -5486,7 +5426,7 @@ void STETransition::deactivate() { void STECondition::deliver(double tt, NetCvode* ns, NrnThread* nt) { if (qthresh_) { - qthresh_ = nil; + qthresh_ = nullptr; STATISTICS(deliver_qthresh_); } if (!pnt_) { @@ -5528,12 +5468,12 @@ NrnThread* STECondition::thread() { void WatchCondition::pgvts_deliver(double tt, NetCvode* ns) { assert(0); if (qthresh_) { - qthresh_ = nil; + qthresh_ = nullptr; STATISTICS(deliver_qthresh_); } int type = pnt_->prop->_type; STATISTICS(watch_deliver_); - POINT_RECEIVE(type, pnt_, nil, nrflag_); + POINT_RECEIVE(type, pnt_, nullptr, nrflag_); if (errno) { if (nrn_errno_check(type)) { hoc_warning("errno set during WatchCondition deliver to NET_RECEIVE", (char*) 0); @@ -5544,7 +5484,7 @@ void WatchCondition::pgvts_deliver(double tt, NetCvode* ns) { void STECondition::pgvts_deliver(double tt, NetCvode* ns) { assert(0); if (qthresh_) { - qthresh_ = nil; + qthresh_ = nullptr; STATISTICS(deliver_qthresh_); } int type = pnt_->prop->_type; @@ -5569,7 +5509,7 @@ static void* eval_cond(NrnThread* nt) { } void Cvode::evaluate_conditions(NrnThread* nt) { if (!nt) { - if (nrn_nthread > 1) { + if (nrn_nthread > 1 && nctd_ > 1) { eval_cv = this; nrn_multithread_job(eval_cond); return; @@ -5621,23 +5561,18 @@ void Cvode::check_deliver(NrnThread* nt) { } } -void NetCvode::fixed_record_continuous(NrnThread* nt) { - int i, cnt; - nrn_ba(nt, BEFORE_STEP); - cnt = fixed_record_->count(); - for (i = 0; i < cnt; ++i) { // should be made more efficient - PlayRecord* pr = fixed_record_->item(i); - if (pr->ith_ == nt->id) { - pr->continuous(nt->_t); +void NetCvode::fixed_record_continuous(neuron::model_sorted_token const& cache_token, + NrnThread& nt) { + nrn_ba(cache_token, nt, BEFORE_STEP); + for (auto& pr: *fixed_record_) { + if (pr->ith_ == nt.id) { + pr->continuous(nt._t); } } } void NetCvode::fixed_play_continuous(NrnThread* nt) { - int i, cnt; - cnt = fixed_play_->count(); - for (i = 0; i < cnt; ++i) { - PlayRecord* pr = fixed_play_->item(i); + for (auto& pr: *fixed_play_) { if (pr->ith_ == nt->id) { pr->continuous(nt->_t); } @@ -5656,7 +5591,7 @@ void NetCvode::fixed_play_continuous(NrnThread* nt) { static int trajec_buffered(NrnThread& nt, int bsize, IvocVect* v, - double* pd, + neuron::container::data_handle pd, int i_pr, PlayRecord* pr, void** vpr, @@ -5675,10 +5610,11 @@ static int trajec_buffered(NrnThread& nt, v->resize(bsize + cur_size); varrays[i_trajec] = vector_vec(v) + cur_size; // begin filling here } else { + // Danger, think this through better pvars[i_trajec] = static_cast(pd); } vpr[i_pr] = pr; - if (pd == &nt._t) { + if (static_cast(pd) == &nt._t) { types[i_trajec] = 0; indices[i_trajec] = 0; } else { @@ -5686,7 +5622,7 @@ static int trajec_buffered(NrnThread& nt, if (err) { Fprintf(stderr, "Pointer %p of PlayRecord type %d ignored because not a Range Variable", - pd, + static_cast(pd), pr->type()); } } @@ -5731,12 +5667,9 @@ void nrnthread_get_trajectory_requests(int tid, pvars = NULL; if (tid < nrn_nthread) { NrnThread& nt = nrn_threads[tid]; - PlayRecList* fr = net_cvode_instance->fixed_record_; - int cntp; - cntp = fr->count(); + auto* fr = net_cvode_instance->fixed_record_; // allocate - for (int i = 0; i < cntp; ++i) { - PlayRecord* pr = fr->item(i); + for (auto& pr: *fr) { if (pr->ith_ == tid) { if (pr->type() == TvecRecordType || pr->type() == YvecRecordType) { n_pr++; @@ -5744,7 +5677,7 @@ void nrnthread_get_trajectory_requests(int tid, #if HAVE_IV } else if (pr->type() == GLineRecordType) { n_pr++; - if (pr->pd_ == NULL) { + if (!pr->pd_) { GLineRecord* glr = (GLineRecord*) pr; assert(glr->gl_->expr_); glr->fill_pd(); @@ -5785,18 +5718,17 @@ void nrnthread_get_trajectory_requests(int tid, // everything allocated, start over and fill n_pr = 0; n_trajec = 0; - for (int i = 0; i < cntp; ++i) { + for (auto& pr: *fr) { int err = 0; - PlayRecord* pr = fr->item(i); if (pr->ith_ == tid) { if (1) { // buffered or per time step value return - IvocVect* v = NULL; if (pr->type() == TvecRecordType) { - v = ((TvecRecord*) pr)->t_; + IvocVect* v = ((TvecRecord*) pr)->t_; err = trajec_buffered(nt, bsize, v, - &nt._t, + neuron::container::data_handle{ + neuron::container::do_not_search, &nt._t}, n_pr++, pr, vpr, @@ -5810,7 +5742,7 @@ void nrnthread_get_trajectory_requests(int tid, n_trajec--; } } else if (pr->type() == YvecRecordType) { - v = ((YvecRecord*) pr)->y_; + IvocVect* v = ((YvecRecord*) pr)->y_; err = trajec_buffered(nt, bsize, v, @@ -5834,7 +5766,7 @@ void nrnthread_get_trajectory_requests(int tid, if (bsize && !glr->v_) { glr->v_ = new IvocVect(bsize); } - v = glr->v_; + IvocVect* v = glr->v_; err = trajec_buffered(nt, bsize, v, @@ -5852,19 +5784,17 @@ void nrnthread_get_trajectory_requests(int tid, n_trajec--; } } else { // glr->gl_->name expression involves several range variables - GLineRecordEData& ed = glr->pd_and_vec_; int n = n_trajec; - for (GLineRecordEData::iterator it = ed.begin(); it != ed.end(); ++it) { - double* pd = (*it).first; + for (auto&& [pd, v]: glr->pd_and_vec_) { assert(pd); - v = (*it).second; - if (bsize && v == NULL) { - v = (*it).second = new IvocVect(bsize); + if (bsize && v == nullptr) { + v = new IvocVect(bsize); } + // TODO avoid the conversion? err = trajec_buffered(nt, bsize, v, - pd, + neuron::container::data_handle{pd}, n_pr, pr, vpr, @@ -6129,44 +6059,23 @@ void NetCvode::deliver_net_events(NrnThread* nt) { // for default method nt->_t = tsav; } -implementPtrList(PlayRecList, PlayRecord) - void NetCvode::playrec_add(PlayRecord* pr) { // called by PlayRecord constructor // printf("NetCvode::playrec_add %p\n", pr); playrec_change_cnt_ = 0; - prl_->append(pr); + prl_->push_back(pr); } void NetCvode::playrec_remove(PlayRecord* pr) { // called by PlayRecord destructor // printf("NetCvode::playrec_remove %p\n", pr); playrec_change_cnt_ = 0; - int i, cnt = prl_->count(); - for (i = 0; i < cnt; ++i) { - if (prl_->item(i) == pr) { - prl_->remove(i); - break; - } - } - cnt = fixed_play_->count(); - for (i = 0; i < cnt; ++i) { - if (fixed_play_->item(i) == pr) { - fixed_play_->remove(i); - break; - } - } - cnt = fixed_record_->count(); - for (i = 0; i < cnt; ++i) { - if (fixed_record_->item(i) == pr) { - fixed_record_->remove(i); - break; - } - } + erase_first(*prl_, pr); + erase_first(*fixed_play_, pr); + erase_first(*fixed_record_, pr); } int NetCvode::playrec_item(PlayRecord* pr) { - int i, cnt = prl_->count(); - for (i = 0; i < cnt; ++i) { - if (prl_->item(i) == pr) { + for (const auto&& [i, e]: enumerate(*prl_)) { + if (e == pr) { return i; } } @@ -6174,27 +6083,25 @@ int NetCvode::playrec_item(PlayRecord* pr) { } PlayRecord* NetCvode::playrec_item(int i) { - assert(i < prl_->count()); - return prl_->item(i); + return prl_->at(i); } PlayRecord* NetCvode::playrec_uses(void* v) { - int i, cnt = prl_->count(); - for (i = 0; i < cnt; ++i) { - if (prl_->item(i)->uses(v)) { - return prl_->item(i); + for (auto& item: *prl_) { + if (item->uses(v)) { + return item; } } - return nil; + return nullptr; } -PlayRecord::PlayRecord(double* pd, Object* ppobj) { +PlayRecord::PlayRecord(neuron::container::data_handle pd, Object* ppobj) + : pd_{std::move(pd)} { // printf("PlayRecord::PlayRecord %p\n", this); - pd_ = pd; - cvode_ = nil; + cvode_ = nullptr; ith_ = 0; if (pd_) { - nrn_notify_when_double_freed(pd_, this); + neuron::container::notify_when_handle_dies(pd_, this); } ppobj_ = ppobj; if (ppobj_) { @@ -6212,12 +6119,6 @@ PlayRecord::~PlayRecord() { net_cvode_instance->playrec_remove(this); } -void PlayRecord::update_ptr(double* pd) { - nrn_notify_pointer_disconnect(this); - nrn_notify_when_double_freed(pd, this); - pd_ = pd; -} - void PlayRecord::disconnect(Observable*) { // printf("PlayRecord::disconnect %ls\n", (long)this); delete this; @@ -6228,7 +6129,7 @@ void PlayRecord::record_add(Cvode* cv) { if (cv) { cv->record_add(this); } - net_cvode_instance->fixed_record_->append(this); + net_cvode_instance->fixed_record_->push_back(this); } void PlayRecord::play_add(Cvode* cv) { @@ -6236,7 +6137,7 @@ void PlayRecord::play_add(Cvode* cv) { if (cv) { cv->play_add(this); } - net_cvode_instance->fixed_play_->append(this); + net_cvode_instance->fixed_play_->push_back(this); } void PlayRecord::pr() { @@ -6244,7 +6145,7 @@ void PlayRecord::pr() { } TvecRecord::TvecRecord(Section* sec, IvocVect* t, Object* ppobj) - : PlayRecord(&NODEV(sec->pnode[0]), ppobj) { + : PlayRecord(sec->pnode[0]->v_handle(), ppobj) { // printf("TvecRecord\n"); t_ = t; ObjObservable::Attach(t_->obj_, this); @@ -6272,8 +6173,8 @@ void TvecRecord::continuous(double tt) { t_->push_back(tt); } -YvecRecord::YvecRecord(double* pd, IvocVect* y, Object* ppobj) - : PlayRecord(pd, ppobj) { +YvecRecord::YvecRecord(neuron::container::data_handle dh, IvocVect* y, Object* ppobj) + : PlayRecord(std::move(dh), ppobj) { // printf("YvecRecord\n"); y_ = y; ObjObservable::Attach(y_->obj_, this); @@ -6294,6 +6195,9 @@ void YvecRecord::install(Cvode* cv) { } void YvecRecord::record_init() { + if (!pd_) { + hoc_execerr_ext("%s recording from invalid data reference.", hoc_object_name(y_->obj_)); + } y_->resize(0); } @@ -6301,8 +6205,11 @@ void YvecRecord::continuous(double tt) { y_->push_back(*pd_); } -VecRecordDiscrete::VecRecordDiscrete(double* pd, IvocVect* y, IvocVect* t, Object* ppobj) - : PlayRecord(pd, ppobj) { +VecRecordDiscrete::VecRecordDiscrete(neuron::container::data_handle dh, + IvocVect* y, + IvocVect* t, + Object* ppobj) + : PlayRecord(std::move(dh), ppobj) { // printf("VecRecordDiscrete\n"); y_ = y; t_ = t; @@ -6371,8 +6278,11 @@ void VecRecordDiscrete::deliver(double tt, NetCvode* nc) { } } -VecRecordDt::VecRecordDt(double* pd, IvocVect* y, double dt, Object* ppobj) - : PlayRecord(pd, ppobj) { +VecRecordDt::VecRecordDt(neuron::container::data_handle pd, + IvocVect* y, + double dt, + Object* ppobj) + : PlayRecord(std::move(pd), ppobj) { // printf("VecRecordDt\n"); y_ = y; dt_ = dt; @@ -6417,16 +6327,17 @@ void VecRecordDt::frecord_init(TQItem* q) { } void VecRecordDt::deliver(double tt, NetCvode* nc) { - if (pd_ == &t) { + auto* const ptr = static_cast(pd_); + if (ptr == &t) { y_->push_back(tt); } else { - y_->push_back(*pd_); + y_->push_back(*ptr); } e_->send(tt + dt_, nc, nrn_threads); } void NetCvode::vecrecord_add() { - double* pd = hoc_pgetarg(1); + auto const pd = hoc_hgetarg(1); consist_sec_pd("Cvode.record", chk_access(), pd); IvocVect* y = vector_arg(2); IvocVect* t = vector_arg(3); @@ -6448,10 +6359,9 @@ void NetCvode::vec_remove() { } void NetCvode::playrec_setup() { - long i, j, iprl, prlc; - prlc = prl_->count(); - fixed_record_->remove_all(); - fixed_play_->remove_all(); + long i, j; + fixed_record_->clear(); + fixed_play_->clear(); if (gcv_) { gcv_->delete_prl(); } else { @@ -6459,8 +6369,15 @@ void NetCvode::playrec_setup() { p[i].lcv_[j].delete_prl(); } } - for (iprl = 0; iprl < prlc; ++iprl) { - PlayRecord* pr = prl_->item(iprl); + std::vector to_delete{}; + for (auto& pr: *prl_) { + if (!pr->pd_) { + // Presumably the recorded value was invalidated elsewhere, e.g. it + // was a voltage of a deleted node, or a range variable of a deleted + // mechanism instance + to_delete.push_back(pr); + continue; + } bool b = false; if (single_) { pr->install(gcv_); @@ -6482,7 +6399,7 @@ void NetCvode::playrec_setup() { } if (b == false) { hoc_execerror("We were unable to associate a PlayRecord item with a RANGE variable", - nil); + nullptr); } // and need to know the thread owners if (pr->ppobj_) { @@ -6491,25 +6408,31 @@ void NetCvode::playrec_setup() { i = owned_by_thread(pr->pd_); } if (i < 0) { - hoc_execerror("We were unable to associate a PlayRecord item with a thread", nil); + hoc_execerror("We were unable to associate a PlayRecord item with a thread", nullptr); } pr->ith_ = i; } + for (auto* pr: to_delete) { + // Destructor should de-register things + delete pr; + } playrec_change_cnt_ = structure_change_cnt_; } -bool Cvode::is_owner(double* pd) { // is a pointer to range variable in this cell +// is a pointer to range variable in this cell +bool Cvode::is_owner(neuron::container::data_handle const& handle) { int in, it; for (it = 0; it < nrn_nthread; ++it) { CvodeThreadData& z = CTD(it); for (in = 0; in < z.v_node_count_; ++in) { Node* nd = z.v_node_[in]; - if (&NODEV(nd) == pd) { + if (handle == nd->v_handle()) { return true; } + auto* pd = static_cast(handle); Prop* p; for (p = nd->prop; p; p = p->next) { - if (pd >= p->param && pd < (p->param + p->param_size)) { + if (p->owns(handle)) { return true; } } @@ -6528,7 +6451,7 @@ bool Cvode::is_owner(double* pd) { // is a pointer to range variable in this ce return false; } -int NetCvode::owned_by_thread(double* pd) { +int NetCvode::owned_by_thread(neuron::container::data_handle const& handle) { if (nrn_nthread == 1) { return 0; } @@ -6539,16 +6462,16 @@ int NetCvode::owned_by_thread(double* pd) { int i3 = nt.end; for (in = i1; in < i3; ++in) { Node* nd = nt._v_node[in]; - if (&NODEV(nd) == pd) { + if (handle == nd->v_handle()) { return it; } - Prop* p; - for (p = nd->prop; p; p = p->next) { - if (pd >= p->param && pd < (p->param + p->param_size)) { + for (Prop* p = nd->prop; p; p = p->next) { + if (p->owns(handle)) { return it; } } if (nd->extnode) { + auto* pd = static_cast(handle); if (pd >= nd->extnode->v && pd < (nd->extnode->v + nlayer)) { return it; } @@ -6560,7 +6483,9 @@ int NetCvode::owned_by_thread(double* pd) { return -1; } -void NetCvode::consist_sec_pd(const char* msg, Section* sec, double* pd) { +void NetCvode::consist_sec_pd(const char* msg, + Section* sec, + neuron::container::data_handle const& handle) { int in; Node* nd; for (in = -1; in < sec->nnode; ++in) { @@ -6572,12 +6497,13 @@ void NetCvode::consist_sec_pd(const char* msg, Section* sec, double* pd) { } else { nd = sec->pnode[in]; } - if (&NODEV(nd) == pd) { + if (nd->v_handle() == handle) { return; } Prop* p; + auto* const pd = static_cast(handle); for (p = nd->prop; p; p = p->next) { - if (pd >= p->param && pd < (p->param + p->param_size)) { + if (p->owns(handle)) { return; } } @@ -6739,36 +6665,10 @@ double NetCvode::maxstate_analyse(Symbol* sym, double* pamax) { return -1e9; } -void NetCvode::recalc_ptrs() { -#if CACHEVEC - // update PlayRecord pointers to v - int cnt = prl_->count(); - for (int i = 0; i < cnt; ++i) { - PlayRecord* pr = prl_->item(i); - if (pr->pd_) { - pr->update_ptr(nrn_recalc_ptr(pr->pd_)); - } - } - // update PreSyn pointers to v - hoc_Item* q; - if (psl_) - ITERATE(q, psl_) { - PreSyn* ps = (PreSyn*) VOIDITM(q); - if (ps->thvar_) { - double* pd = nrn_recalc_ptr(ps->thvar_); - if (pd != ps->thvar_) { - pst_->erase(ps->thvar_); - (*pst_)[pd] = ps; - ps->update_ptr(pd); - } - } - } -#endif -} - static double lvardt_tout_; -static void* lvardt_integrate(NrnThread* nt) { +static void lvardt_integrate(neuron::model_sorted_token const& token, NrnThread& ntr) { + auto* const nt = &ntr; size_t err = NVI_SUCCESS; int id = nt->id; NetCvode* nc = net_cvode_instance; @@ -6778,13 +6678,13 @@ static void* lvardt_integrate(NrnThread* nt) { double tout = lvardt_tout_; nt->_stop_stepping = 0; while (tq->least_t() < tout || tqe->least_t() <= tout) { - err = nc->local_microstep(nt); + err = nc->local_microstep(token, ntr); if (nt->_stop_stepping) { nt->_stop_stepping = 0; - return (void*) err; + return; } if (err != NVI_SUCCESS || stoprun) { - return (void*) err; + return; } } int n = p.nlcv_; @@ -6797,7 +6697,6 @@ static void* lvardt_integrate(NrnThread* nt) { else { nt->_t = tout; } - return (void*) err; } int NetCvode::solve_when_threads(double tout) { @@ -6805,6 +6704,7 @@ int NetCvode::solve_when_threads(double tout) { int tid; double til; nrn_use_busywait(1); // just a possibility + auto const cache_token = nrn_ensure_model_data_are_sorted(); if (empty_) { if (tout >= 0.) { while (nt_t < tout && !stoprun) { @@ -6861,7 +6761,7 @@ int NetCvode::solve_when_threads(double tout) { // For now just integrate by min delay intervals. lvardt_tout_ = tout; while (nt_t < tout) { - nrn_multithread_job(lvardt_integrate); + nrn_multithread_job(cache_token, lvardt_integrate); if (nrn_allthread_handle) { (*nrn_allthread_handle)(); } @@ -6928,7 +6828,7 @@ int NetCvode::global_microstep_when_threads() { assert(tdiff == 0.0 || (gcv_->tstop_begin_ <= tt && tt <= gcv_->tstop_end_)); deliver_events_when_threads(tt); } else { - err = gcv_->handle_step(this, tt); + err = gcv_->handle_step(nrn_ensure_model_data_are_sorted(), this, tt); } if ((tt = allthread_least_t(tid)) < gcv_->t_) { gcv_->interpolate(tt); diff --git a/src/nrncvode/netcvode.h b/src/nrncvode/netcvode.h index 64b5cef210..afddcf998e 100644 --- a/src/nrncvode/netcvode.h +++ b/src/nrncvode/netcvode.h @@ -6,6 +6,7 @@ #include "mymath.h" #include "cvodeobj.h" +#include "neuron/container/data_handle.hpp" #include "tqueue.h" #include @@ -15,16 +16,15 @@ struct NrnThread; class PreSyn; class HocDataPaths; -using PreSynTable = std::unordered_map; +using PreSynTable = std::unordered_map, PreSyn*>; class NetCon; class DiscreteEvent; class SelfEvent; using SelfEventPool = MutexPool; struct hoc_Item; class PlayRecord; -class PlayRecList; class IvocVect; -class BAMechList; +struct BAMechList; class HTList; // nrn_nthread vectors of HTList* for fixed step method // Thread segregated HTList* of all the CVode.CvodeThreadData.HTList* @@ -97,10 +97,10 @@ class NetCvode { void tstop_event(double); void hoc_event(double, const char* hoc_stmt, - Object* ppobj = nil, + Object* ppobj = nullptr, int reinit = 0, - Object* pyact = nil); - NetCon* install_deliver(double* psrc, + Object* pyact = nullptr); + NetCon* install_deliver(neuron::container::data_handle psrc, Section* ssrc, Object* osrc, Object* target, @@ -121,24 +121,24 @@ class NetCvode { void local_retreat(double, Cvode*); void retreat(double, Cvode*); Object** netconlist(); - int owned_by_thread(double*); + int owned_by_thread(neuron::container::data_handle const&); PlayRecord* playrec_uses(void*); void playrec_add(PlayRecord*); void playrec_remove(PlayRecord*); int playrec_item(PlayRecord*); PlayRecord* playrec_item(int); - PlayRecList* playrec_list() { + std::vector* playrec_list() { return prl_; } void simgraph_remove(); // fixed step continuous play and record - PlayRecList* fixed_play_; - PlayRecList* fixed_record_; + std::vector* fixed_play_; + std::vector* fixed_record_; void vecrecord_add(); // hoc interface functions void vec_remove(); void record_init(); void play_init(); - void fixed_record_continuous(NrnThread*); + void fixed_record_continuous(neuron::model_sorted_token const&, NrnThread& nt); void fixed_play_continuous(NrnThread*); static double eps(double x) { return eps_ * std::abs(x); @@ -151,7 +151,6 @@ class NetCvode { } TQueue* event_queue(NrnThread* nt); void psl_append(PreSyn*); - void recalc_ptrs(); public: void rtol(double); @@ -189,7 +188,7 @@ class NetCvode { // int nlist() { return nlist_; } // Cvode* list() { return list_; } bool initialized_; // for global step solve. - void consist_sec_pd(const char*, Section*, double*); + void consist_sec_pd(const char*, Section*, neuron::container::data_handle const&); double state_magnitudes(); Symbol* name2sym(const char*); const char* sym2name(Symbol*); @@ -203,7 +202,7 @@ class NetCvode { // private: public: static double eps_; - int local_microstep(NrnThread*); + int local_microstep(neuron::model_sorted_token const&, NrnThread&); int global_microstep(); void deliver_least_event(NrnThread*); void evaluate_conditions(); @@ -242,7 +241,7 @@ class NetCvode { PreSynTable* pst_; int pst_cnt_; int playrec_change_cnt_; - PlayRecList* prl_; + std::vector* prl_; IvocVect* vec_event_store_; HocDataPaths create_hdp(int style); @@ -259,8 +258,7 @@ class NetCvode { public: MUTDEC // only for enqueueing_ so far. - void - set_enqueueing(); + void set_enqueueing(); double allthread_least_t(int& tid); int solve_when_threads(double); void deliver_events_when_threads(double); diff --git a/src/nrncvode/nrndaspk.cpp b/src/nrncvode/nrndaspk.cpp index 661a8be746..5434cdc031 100644 --- a/src/nrncvode/nrndaspk.cpp +++ b/src/nrncvode/nrndaspk.cpp @@ -11,6 +11,7 @@ #include "cvodeobj.h" #include "nrndaspk.h" #include "netcvode.h" +#include "nrn_ansi.h" #include "ida/ida.h" #include "ida/ida_impl.h" #include "mymath.h" @@ -29,8 +30,6 @@ double Daspk::dteps_; extern void nrndae_dkres(double*, double*, double*); extern void nrndae_dkpsol(double); -extern void nrn_rhs(NrnThread*); -extern void nrn_lhs(NrnThread*); extern void nrn_solve(NrnThread*); void nrn_daspk_init_step(double, double, int); // this is private in ida.cpp but we want to check if our initialization @@ -145,8 +144,8 @@ Daspk::Daspk(Cvode* cv, int neq) { delta_ = cv->nvnew(neq); parasite_ = cv->nvnew(neq); use_parasite_ = false; - spmat_ = nil; - mem_ = nil; + spmat_ = nullptr; + mem_ = nullptr; } Daspk::~Daspk() { @@ -192,17 +191,17 @@ int Daspk::init_failure_style_; int Daspk::init_try_again_; int Daspk::first_try_init_failures_; -static void* do_ode_thread(NrnThread* nt) { +static void do_ode_thread(neuron::model_sorted_token const& sorted_token, NrnThread& ntr) { + auto* const nt = &ntr; int i; Cvode* cv = thread_cv; nt->_t = cv->t_; - cv->do_ode(nt); + cv->do_ode(sorted_token, ntr); CvodeThreadData& z = cv->ctd_[nt->id]; double* yp = cv->n_vector_data(nvec_yp, nt->id); for (i = z.neq_v_; i < z.nvsize_; ++i) { yp[i] = *(z.pvdot_[i]); } - return 0; } static double check(double t, Daspk* ida) { @@ -271,7 +270,7 @@ cv_->t_, t-cv_->t_, cv_->t0_-cv_->t_); } thread_cv = cv_; nvec_yp = yp_; - nrn_multithread_job(do_ode_thread); + nrn_multithread_job(nrn_ensure_model_data_are_sorted(), do_ode_thread); ida_init(); t = cv_->t_; #if 1 @@ -349,13 +348,12 @@ int Daspk::advance_tn(double tstop) { int Daspk::interpolate(double tt) { // printf("Daspk::interpolate %.15g\n", tt); assert(tt >= cv_->t0_ && tt <= cv_->tn_); - IDASetStopTime(mem_, tt); - int ier = IDASolve(mem_, tt, &cv_->t_, cv_->y_, yp_, IDA_NORMAL); + int ier = IDAGetSolution(mem_, tt, cv_->y_, yp_); if (ier < 0) { Printf("DASPK interpolate error\n"); return ier; } - assert(MyMath::eq(tt, cv_->t_, NetCvode::eps(cv_->t_))); + cv_->t_ = tt; // interpolation does not call res. So we have to. res_gvardt(cv_->t_, cv_->y_, yp_, delta_, cv_); // if(MyMath::eq(t, cv_->t_, NetCvode::eps(cv_->t_))) { @@ -401,15 +399,16 @@ void Cvode::daspk_scatter_y(double* y, int tid) { // not needed since the matrix solve is already with respect to vi,vx // in all cases. (i.e. the solution vector is in the right hand side // and refers to vi, vx. - scatter_y(y, tid); + scatter_y(nrn_ensure_model_data_are_sorted(), y, tid); // transform the vm+vext to vm CvodeThreadData& z = ctd_[tid]; if (z.cmlext_) { - Memb_list* ml = z.cmlext_->ml; + assert(z.cmlext_->ml.size() == 1); + Memb_list* ml = &z.cmlext_->ml[0]; int i, n = ml->nodecount; for (i = 0; i < n; ++i) { Node* nd = ml->nodelist[i]; - NODEV(nd) -= nd->extnode->v[0]; + nd->v() -= nd->extnode->v[0]; } } } @@ -427,7 +426,8 @@ void Cvode::daspk_gather_y(double* y, int tid) { // transform vm to vm+vext CvodeThreadData& z = ctd_[tid]; if (z.cmlext_) { - Memb_list* ml = z.cmlext_->ml; + assert(z.cmlext_->ml.size() == 1); + Memb_list* ml = &z.cmlext_->ml[0]; int i, n = ml->nodecount; for (i = 0; i < n; ++i) { Node* nd = ml->nodelist[i]; @@ -464,8 +464,9 @@ for (i=0; i < z.nvsize_; ++i) { daspk_scatter_y(y, nt->id); // vi, vext, channel states, linmod non-node y. // rhs of cy' = f(y) play_continuous_thread(tt, nt); - nrn_rhs(nt); - do_ode(nt); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + nrn_rhs(sorted_token, *nt); + do_ode(sorted_token, *nt); // accumulate into delta gather_ydot(delta, nt->id); @@ -493,71 +494,70 @@ for (i=0; i < z.nvsize_; ++i) { // assert(use_sparse13 == true && nlayer <= 1); assert(use_sparse13 == true); if (z.cmlcap_) { - Memb_list* ml = z.cmlcap_->ml; + assert(z.cmlcap_->ml.size() == 1); + Memb_list* ml = &z.cmlcap_->ml[0]; int n = ml->nodecount; - double* p = NULL; - if (nt->_nrn_fast_imem) { - p = nt->_nrn_fast_imem->_nrn_sav_rhs; - } + auto const vec_sav_rhs = nt->node_sav_rhs_storage(); for (i = 0; i < n; ++i) { - double* cd = ml->_data[i]; Node* nd = ml->nodelist[i]; int j = nd->eqn_index_ - 1; Extnode* nde = nd->extnode; double cdvm; if (nde) { - cdvm = 1e-3 * cd[0] * (yprime[j] - yprime[j + 1]); + cdvm = 1e-3 * ml->data(i, 0) * (yprime[j] - yprime[j + 1]); delta[j] -= cdvm; delta[j + 1] += cdvm; // i_cap - cd[1] = cdvm; + ml->data(i, 1) = cdvm; #if I_MEMBRANE // add i_cap to i_ion which is in sav_g // this will be copied to i_membrane below - nde->param[3 + 3 * nlayer] += cdvm; + *nde->param[neuron::extracellular::sav_rhs_index_ext()] += cdvm; #endif } else { - cdvm = 1e-3 * cd[0] * yprime[j]; + cdvm = 1e-3 * ml->data(i, 0) * yprime[j]; delta[j] -= cdvm; - cd[1] = cdvm; + ml->data(i, 1) = cdvm; } - if (p) { + if (vec_sav_rhs) { int i = nd->v_node_index; - p[i] += cdvm; - p[i] *= NODEAREA(nd) * 0.01; + vec_sav_rhs[i] += cdvm; + vec_sav_rhs[i] *= NODEAREA(nd) * 0.01; } } } // See nrnoc/excelln.cpp for location of cx. if (z.cmlext_) { - Memb_list* ml = z.cmlext_->ml; + assert(z.cmlext_->ml.size() == 1); + Memb_list* ml = &z.cmlext_->ml[0]; int n = ml->nodecount; for (i = 0; i < n; ++i) { - double* cd = ml->_data[i]; Node* nd = ml->nodelist[i]; int j = nd->eqn_index_; #if EXTRACELLULAR #if I_MEMBRANE // i_membrane = sav_rhs --- even for zero area nodes - cd[1 + 3 * nlayer] = cd[3 + 3 * nlayer]; + ml->data(i, neuron::extracellular::i_membrane_index) = + ml->data(i, neuron::extracellular::sav_rhs_index); #endif /*I_MEMBRANE*/ - if (nlayer == 1) { + if (nrn_nlayer_extracellular == 1) { // only works for one layer // otherwise loop over layer, // xc is (pd + 2*(nlayer))[layer] // and deal with yprime[i+layer]-yprime[i+layer+1] - delta[j] -= 1e-3 * cd[2] * yprime[j]; + delta[j] -= 1e-3 * + ml->data(i, neuron::extracellular::xc_index, 0 /* 0th/only layer */) * + yprime[j]; } else { - int k, jj; - double x; - k = nlayer - 1; - jj = j + k; - delta[jj] -= 1e-3 * cd[2 * nlayer + k] * (yprime[jj]); - for (k = nlayer - 2; k >= 0; --k) { + int k = nrn_nlayer_extracellular - 1; + int jj = j + k; + delta[jj] -= 1e-3 * ml->data(i, neuron::extracellular::xc_index, k) * (yprime[jj]); + for (k = nrn_nlayer_extracellular - 2; k >= 0; --k) { // k=0 refers to stuff between layer 0 and 1 // j is for vext[0] jj = j + k; - x = 1e-3 * cd[2 * nlayer + k] * (yprime[jj] - yprime[jj + 1]); + auto const x = 1e-3 * ml->data(i, neuron::extracellular::xc_index, k) * + (yprime[jj] - yprime[jj + 1]); delta[jj] -= x; delta[jj + 1] += x; // last one in iteration is nlayer-1 } @@ -583,7 +583,7 @@ for (i=0; i < z.nvsize_; ++i) { delta[i] -= tps[i] * fac; } } - before_after(z.after_solve_, nt); + before_after(sorted_token, z.after_solve_, nt); #if 0 printf("Cvode::res exit res_=%d tt=%20.12g\n", res_, tt); for (i=0; i < z.nvsize_; ++i) { @@ -621,7 +621,8 @@ printf("\n"); _nt->_vcv = this; daspk_scatter_y(y, _nt->id); // I'm not sure this is necessary. if (solve_state_ == INVALID) { - nrn_lhs(_nt); // designed to setup M*[dvm+dvext, dvext, dy] = ... + nrn_lhs(nrn_ensure_model_data_are_sorted(), + *_nt); // designed to setup M*[dvm+dvext, dvext, dy] = ... solve_state_ = SETUP; } if (solve_state_ == SETUP) { @@ -647,7 +648,7 @@ for (i=0; i < neq_v_; ++i) { } #endif solve_state_ = INVALID; // but not if using sparse13 - solvemem(_nt); + solvemem(nrn_ensure_model_data_are_sorted(), _nt); gather_ydot(b, _nt->id); // the ode's of the form m' = (minf - m)/mtau in model descriptions compute // b = b/(1 + dt*mtau) since cvode required J = 1 - gam*df/dy diff --git a/src/nrncvode/occvode.cpp b/src/nrncvode/occvode.cpp index 2c05ea1663..889f9fbc69 100644 --- a/src/nrncvode/occvode.cpp +++ b/src/nrncvode/occvode.cpp @@ -1,6 +1,6 @@ #include <../../nrnconf.h> #include -#include +#include "nrn_ansi.h" #include "nrndae_c.h" #include "nrniv_mf.h" #include "nrnoc2iv.h" @@ -11,24 +11,22 @@ #include "vrecitem.h" #include "membfunc.h" #include "nonvintblock.h" + #include extern void setup_topology(), v_setup_vectors(); -extern void nrn_mul_capacity(NrnThread*, Memb_list*); -extern void nrn_div_capacity(NrnThread*, Memb_list*); extern void recalc_diam(); extern int nrn_errno_check(int); // extern double t, dt; #define nt_dt nrn_threads->_dt #define nt_t nrn_threads->_t -extern void long_difus_solve(int, NrnThread*); extern Symlist* hoc_built_in_symlist; #include "spmatrix.h" extern double* sp13mat; -#if 1 || PARANEURON +#if 1 || NRNMPI extern void (*nrnthread_v_transfer_)(NrnThread*); extern void (*nrnmpi_v_transfer_)(); #endif @@ -42,7 +40,7 @@ extern void nrn_multisplit_nocap_v_part1(NrnThread*); extern void nrn_multisplit_nocap_v_part2(NrnThread*); extern void nrn_multisplit_nocap_v_part3(NrnThread*); extern void nrn_multisplit_adjust_rhs(NrnThread*); -#if PARANEURON +#if NRNMPI extern void (*nrn_multisplit_solve_)(); #endif @@ -81,7 +79,7 @@ The variable step method for these cases is handled by daspk. // as well as algebraic nodes (no_cap) bool Cvode::init_global() { -#if PARANEURON +#if NRNMPI if (!use_partrans_ && nrnmpi_numprocs > 1 && (nrnmpi_v_transfer_ || nrn_multisplit_solve_)) { assert(nrn_nthread == 1); // we lack an NVector class for both // threads and mpi together @@ -92,7 +90,7 @@ bool Cvode::init_global() { if (!structure_change_) { return false; } - if (ctd_[0].cv_memb_list_ == nil) { + if (ctd_[0].cv_memb_list_ == nullptr) { neq_ = 0; if (use_daspk_) { return true; @@ -110,8 +108,6 @@ void Cvode::init_eqn() { NrnThread* _nt; CvMembList* cml; - Memb_list* ml; - Memb_func* mf; int i, j, zneq, zneq_v, zneq_cap_v; // printf("Cvode::init_eqn\n"); if (nthsizes_) { @@ -121,8 +117,8 @@ void Cvode::init_eqn() { neq_ = 0; for (int id = 0; id < nctd_; ++id) { CvodeThreadData& z = ctd_[id]; - z.cmlcap_ = nil; - z.cmlext_ = nil; + z.cmlcap_ = nullptr; + z.cmlext_ = nullptr; for (cml = z.cv_memb_list_; cml; cml = cml->next) { if (cml->index == CAP) { z.cmlcap_ = cml; @@ -141,27 +137,26 @@ void Cvode::init_eqn() { CvodeThreadData& z = ctd_[_nt->id]; // how many ode's are there? First ones are non-zero capacitance // nodes with non-zero capacitance - zneq_cap_v = z.cmlcap_ ? z.cmlcap_->ml->nodecount : 0; + zneq_cap_v = 0; + if (z.cmlcap_) { + for (auto& ml: z.cmlcap_->ml) { + zneq_cap_v += ml.nodecount; + } + } zneq = zneq_cap_v; z.neq_v_ = z.nonvint_offset_ = zneq; // now add the membrane mechanism ode's to the count for (cml = z.cv_memb_list_; cml; cml = cml->next) { - nrn_ode_count_t s = memb_func[cml->index].ode_count; - if (s) { - zneq += cml->ml->nodecount * (*s)(cml->index); + if (auto const ode_count = memb_func[cml->index].ode_count; ode_count) { + auto const count = ode_count(cml->index); + for (auto& ml: cml->ml) { + zneq += ml.nodecount * count; + } } } z.nonvint_extra_offset_ = zneq; - if (z.pv_) { - delete[] z.pv_; - delete[] z.pvdot_; - z.pv_ = 0; - z.pvdot_ = 0; - } - if (z.nonvint_extra_offset_) { - z.pv_ = new double*[z.nonvint_extra_offset_]; - z.pvdot_ = new double*[z.nonvint_extra_offset_]; - } + z.pv_.resize(z.nonvint_extra_offset_); + z.pvdot_.resize(z.nonvint_extra_offset_); zneq += nrn_nonvint_block_ode_count(zneq, _nt->id); z.nvsize_ = zneq; z.nvoffset_ = neq_; @@ -175,7 +170,7 @@ printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize= break; } // lvardt } -#if PARANEURON +#if NRNMPI if (use_partrans_) { global_neq_ = nrnmpi_int_sum_reduce(neq_); // printf("%d global_neq_=%d neq=%d\n", nrnmpi_myid, global_neq_, neq_); @@ -185,7 +180,14 @@ printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize= for (int id = 0; id < nctd_; ++id) { CvodeThreadData& z = ctd_[id]; double* atv = n_vector_data(atolnvec_, id); - zneq_cap_v = z.cmlcap_ ? z.cmlcap_->ml->nodecount : 0; + zneq_cap_v = 0; + if (z.cmlcap_) { + for (auto& ml: z.cmlcap_->ml) { + // support `1 x n` and `n x 1` but not `n x m` + assert(z.cmlcap_->ml.size() == 1 || ml.nodecount == 1); + zneq_cap_v += ml.nodecount; + } + } zneq = z.nvsize_; zneq_v = zneq_cap_v; @@ -214,9 +216,10 @@ printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize= NODERHS(z.v_node_[i]) = 1.; } for (i = 0; i < zneq_cap_v; ++i) { - ml = z.cmlcap_->ml; - z.pv_[i] = &NODEV(ml->nodelist[i]); - z.pvdot_[i] = &(NODERHS(ml->nodelist[i])); + auto* const node = z.cmlcap_->ml.size() == 1 ? z.cmlcap_->ml[0].nodelist[i] + : z.cmlcap_->ml[i].nodelist[0]; + z.pv_[i] = node->v_handle(); + z.pvdot_[i] = node->rhs_handle(); *z.pvdot_[i] = 0.; // only ones = 1 are no_cap } @@ -245,27 +248,27 @@ printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize= // map the membrane mechanism ode state and dstate pointers int ieq = zneq_v; for (cml = z.cv_memb_list_; cml; cml = cml->next) { - int n; - ml = cml->ml; - mf = memb_func + cml->index; - nrn_ode_count_t sc = mf->ode_count; - if (sc && ((n = (*sc)(cml->index)) > 0)) { - // Note: if mf->hoc_mech then all cvode related - // callbacks are NULL (including ode_count) - // See src/nrniv/hocmech.cpp. That won't change but - // if it does, hocmech.cpp must follow all the - // nrn_ode_..._t prototypes to avoid segfault - // with Apple M1. - nrn_ode_map_t s = mf->ode_map; - for (j = 0; j < ml->nodecount; ++j) { - (*s)(ieq, - z.pv_ + ieq, - z.pvdot_ + ieq, - ml->_data[j], - ml->pdata[j], - atv + ieq, - cml->index); - ieq += n; + Memb_func& mf = memb_func[cml->index]; + if (!mf.ode_count) { + continue; + } + for (auto& ml: cml->ml) { + if (int n; (n = mf.ode_count(cml->index)) > 0) { + // Note: if mf.hoc_mech then all cvode related + // callbacks are NULL (including ode_count) + // See src/nrniv/hocmech.cpp. That won't change but + // if it does, hocmech.cpp must follow all the + // nrn_ode_..._t prototypes to avoid segfault + // with Apple M1. + for (j = 0; j < ml.nodecount; ++j) { + mf.ode_map(ml.prop[j], + ieq, + z.pv_.data() + ieq, + z.pvdot_.data() + ieq, + atv + ieq, + cml->index); + ieq += n; + } } } } @@ -275,67 +278,60 @@ printf("%d Cvode::init_eqn id=%d neq_v_=%d #nonvint=%d #nonvint_extra=%d nvsize= } void Cvode::new_no_cap_memb(CvodeThreadData& z, NrnThread* _nt) { - int i, n; - CvMembList *cml, *ncm; - Memb_list* ml; z.delete_memb_list(z.no_cap_memb_); - z.no_cap_memb_ = nil; - for (cml = z.cv_memb_list_; cml; cml = cml->next) { - Memb_list* ml = cml->ml; - Memb_func* mf = memb_func + cml->index; + z.no_cap_memb_ = nullptr; + CvMembList* ncm{}; + for (auto* cml = z.cv_memb_list_; cml; cml = cml->next) { + const Memb_func& mf = memb_func[cml->index]; // only point processes with currents are possibilities - if (!mf->is_point || !mf->current) { + if (!mf.is_point || !mf.current) { continue; } // count how many at no cap nodes - n = 0; - for (i = 0; i < ml->nodecount; ++i) { - if (NODERHS(ml->nodelist[i]) > .5) { - ++n; + int n{}; + for (auto& ml: cml->ml) { + for (auto i = 0; i < ml.nodecount; ++i) { + if (NODERHS(ml.nodelist[i]) > .5) { + ++n; + } } } - if (n == 0) + if (n == 0) { continue; + } + // keep same order - if (z.no_cap_memb_ == nil) { - z.no_cap_memb_ = new CvMembList(); + if (!z.no_cap_memb_) { + z.no_cap_memb_ = new CvMembList{cml->index}; ncm = z.no_cap_memb_; } else { - ncm->next = new CvMembList(); + ncm->next = new CvMembList{cml->index}; ncm = ncm->next; } - ncm->next = nil; + ncm->next = nullptr; ncm->index = cml->index; - ncm->ml->nodecount = n; - // allocate - ncm->ml->nodelist = new Node*[n]; -#if CACHEVEC - ncm->ml->nodeindices = new int[n]; -#endif - if (mf->hoc_mech) { - ncm->ml->prop = new Prop*[n]; - } else { - ncm->ml->_data = new double*[n]; - ncm->ml->pdata = new Datum*[n]; - } - ncm->ml->_thread = ml->_thread; // can share this - // fill - n = 0; - for (i = 0; i < ml->nodecount; ++i) { - if (NODERHS(ml->nodelist[i]) > .5) { - ncm->ml->nodelist[n] = ml->nodelist[i]; -#if CACHEVEC - ncm->ml->nodeindices[n] = ml->nodeindices[i]; -#endif - if (mf->hoc_mech) { - ncm->ml->prop[n] = ml->prop[i]; - } else { - ncm->ml->_data[n] = ml->_data[i]; - ncm->ml->pdata[n] = ml->pdata[i]; + // ncm is in non-contiguous mode + ncm->ml.reserve(n); + ncm->ml.clear(); + for (auto& ml: cml->ml) { + for (auto i = 0; i < ml.nodecount; ++i) { + if (NODERHS(ml.nodelist[i]) > .5) { + auto& newml = ncm->ml.emplace_back(cml->index /* mechanism type */); + newml.nodecount = 1; + newml.nodelist = new Node* [1] { ml.nodelist[i] }; + assert(newml.nodelist[0] == ml.nodelist[i]); + newml.nodeindices = new int[1]{ml.nodeindices[i]}; + newml.prop = new Prop* [1] { ml.prop[i] }; + if (!mf.hoc_mech) { + // Danger: this is not stable w.r.t. permutation + newml.set_storage_offset(ml.get_storage_offset() + i); + newml.pdata = new Datum* [1] { ml.pdata[i] }; + } + newml._thread = ml._thread; } - ++n; } } + assert(ncm->ml.size() == n); } } @@ -355,7 +351,6 @@ void Cvode::daspk_init_eqn() { // how many equations are there? neq_ = 0; - Memb_func* mf; CvMembList* cml; // start with all the equations for the fixed step method. if (use_sparse13 == 0 || diam_changed != 0) { @@ -365,9 +360,12 @@ void Cvode::daspk_init_eqn() { z.neq_v_ = z.nonvint_offset_ = zneq; // now add the membrane mechanism ode's to the count for (cml = z.cv_memb_list_; cml; cml = cml->next) { - nrn_ode_count_t s = memb_func[cml->index].ode_count; - if (s) { - zneq += cml->ml->nodecount * (*s)(cml->index); + if (auto ode_count = memb_func[cml->index].ode_count; ode_count) { + zneq += std::accumulate(cml->ml.begin(), + cml->ml.end(), + 0, + [](int total, auto& ml) { return total + ml.nodecount; }) * + ode_count(cml->index); } } z.nonvint_extra_offset_ = zneq; @@ -376,12 +374,8 @@ void Cvode::daspk_init_eqn() { z.nvoffset_ = neq_; neq_ = z.nvsize_; // printf("Cvode::daspk_init_eqn: neq_v_=%d neq_=%d\n", neq_v_, neq_); - if (z.pv_) { - delete[] z.pv_; - delete[] z.pvdot_; - } - z.pv_ = new double*[z.nonvint_extra_offset_]; - z.pvdot_ = new double*[z.nonvint_extra_offset_]; + z.pv_.resize(z.nonvint_extra_offset_); + z.pvdot_.resize(z.nonvint_extra_offset_); atolvec_alloc(neq_); double* atv = n_vector_data(atolnvec_, 0); for (i = 0; i < neq_; ++i) { @@ -408,13 +402,15 @@ void Cvode::daspk_init_eqn() { nd = _nt->_v_node[in]; nde = nd->extnode; i = nd->eqn_index_ - 1; // the sparse matrix index starts at 1 - z.pv_[i] = &NODEV(nd); - z.pvdot_[i] = nd->_rhs; + z.pv_[i] = nd->v_handle(); + z.pvdot_[i] = nd->rhs_handle(); if (nde) { for (ie = 0; ie < nlayer; ++ie) { k = i + ie + 1; - z.pv_[k] = nde->v + ie; - z.pvdot_[k] = nde->_rhs[ie]; + z.pv_[k] = neuron::container::data_handle{nde->v + ie}; + z.pvdot_[k] = + neuron::container::data_handle{neuron::container::do_not_search, + nde->_rhs[ie]}; } } } @@ -427,20 +423,25 @@ void Cvode::daspk_init_eqn() { // map the membrane mechanism ode state and dstate pointers int ieq = z.neq_v_; for (cml = z.cv_memb_list_; cml; cml = cml->next) { - int n; - mf = memb_func + cml->index; - nrn_ode_count_t sc = mf->ode_count; - if (sc && ((n = (*sc)(cml->index)) > 0)) { - Memb_list* ml = cml->ml; - nrn_ode_map_t s = mf->ode_map; - for (j = 0; j < ml->nodecount; ++j) { - (*s)(ieq, - z.pv_ + ieq, - z.pvdot_ + ieq, - ml->_data[j], - ml->pdata[j], - atv + ieq, - cml->index); + auto const& mf = memb_func[cml->index]; + auto const ode_count = mf.ode_count; + if (!ode_count) { + continue; + } + auto const n = ode_count(cml->index); + if (n <= 0) { + continue; + } + auto const ode_map = mf.ode_map; + for (auto& ml: cml->ml) { + for (j = 0; j < ml.nodecount; ++j) { + assert(ode_map); + ode_map(ml.prop[j], + ieq, + z.pv_.data() + ieq, + z.pvdot_.data() + ieq, + atv + ieq, + cml->index); ieq += n; } } @@ -461,20 +462,22 @@ double* Cvode::n_vector_data(N_Vector v, int tid) { extern void nrn_extra_scatter_gather(int, int); -void Cvode::scatter_y(double* y, int tid) { - int i; +void Cvode::scatter_y(neuron::model_sorted_token const& sorted_token, double* y, int tid) { CvodeThreadData& z = CTD(tid); - for (i = 0; i < z.nonvint_extra_offset_; ++i) { - *(z.pv_[i]) = y[i]; + assert(z.nonvint_extra_offset_ == z.pv_.size()); + for (int i = 0; i < z.nonvint_extra_offset_; ++i) { + // TODO: understand why this wasn't needed before + if (z.pv_[i]) { + *(z.pv_[i]) = y[i]; + } // printf("%d scatter_y %d %d %g\n", nrnmpi_myid, tid, i, y[i]); } - CvMembList* cml; - for (cml = z.cv_memb_list_; cml; cml = cml->next) { - Memb_func* mf = memb_func + cml->index; - if (mf->ode_synonym) { - nrn_ode_synonym_t s = mf->ode_synonym; - Memb_list* ml = cml->ml; - (*s)(ml->nodecount, ml->_data, ml->pdata); + for (CvMembList* cml = z.cv_memb_list_; cml; cml = cml->next) { + const Memb_func& mf = memb_func[cml->index]; + if (mf.ode_synonym) { + for (auto& ml: cml->ml) { + mf.ode_synonym(sorted_token, nrn_threads[tid], ml, cml->index); + } } } nrn_extra_scatter_gather(0, tid); @@ -497,11 +500,14 @@ void Cvode::gather_y(N_Vector y) { nrn_multithread_job(gather_y_thread); } void Cvode::gather_y(double* y, int tid) { - int i; CvodeThreadData& z = CTD(tid); nrn_extra_scatter_gather(1, tid); - for (i = 0; i < z.nonvint_extra_offset_; ++i) { - y[i] = *(z.pv_[i]); + assert(z.nonvint_extra_offset_ == z.pv_.size()); + for (int i = 0; i < z.nonvint_extra_offset_; ++i) { + // TODO: understand why this wasn't needed before + if (z.pv_[i]) { + y[i] = *(z.pv_[i]); + } // printf("gather_y %d %d %g\n", tid, i, y[i]); } } @@ -552,7 +558,10 @@ int Cvode::setup(N_Vector ypred, N_Vector fpred) { return 0; } -int Cvode::solvex_thread(double* b, double* y, NrnThread* nt) { +int Cvode::solvex_thread(neuron::model_sorted_token const& sorted_token, + double* b, + double* y, + NrnThread* nt) { // printf("Cvode::solvex_thread %d t=%g t_=%g\n", nt->id, nt->t, t_); // printf("Cvode::solvex_thread %d %g\n", nt->id, gam()); // printf("\tenter b\n"); @@ -564,15 +573,18 @@ int Cvode::solvex_thread(double* b, double* y, NrnThread* nt) { if (z.nvsize_ == 0) { return 0; } - lhs(nt); // special version for cvode. + lhs(sorted_token, nt); // special version for cvode. scatter_ydot(b, nt->id); - if (z.cmlcap_) - nrn_mul_capacity(nt, z.cmlcap_->ml); + if (z.cmlcap_) { + for (auto& ml: z.cmlcap_->ml) { + nrn_mul_capacity(sorted_token, nt, &ml); + } + } for (i = 0; i < z.no_cap_count_; ++i) { NODERHS(z.no_cap_node_[i]) = 0.; } // solve it -#if PARANEURON +#if NRNMPI if (nrn_multisplit_solve_) { (*nrn_multisplit_solve_)(); } else @@ -585,7 +597,7 @@ int Cvode::solvex_thread(double* b, double* y, NrnThread* nt) { // printf("%d rhs %d %g t=%g\n", nrnmpi_myid, i, VEC_RHS(i), t); //} if (ncv_->stiff() == 2) { - solvemem(nt); + solvemem(sorted_token, nt); } else { // bug here should multiply by gam } @@ -608,10 +620,13 @@ int Cvode::solvex_thread_part1(double* b, NrnThread* nt) { if (z.nvsize_ == 0) { return 0; } - lhs(nt); // special version for cvode. + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + lhs(sorted_token, nt); // special version for cvode. scatter_ydot(b, nt->id); - if (z.cmlcap_) - nrn_mul_capacity(nt, z.cmlcap_->ml); + if (z.cmlcap_) { + assert(z.cmlcap_->ml.size() == 1); + nrn_mul_capacity(sorted_token, nt, &z.cmlcap_->ml[0]); + } for (i = 0; i < z.no_cap_count_; ++i) { NODERHS(z.no_cap_node_[i]) = 0.; } @@ -629,7 +644,7 @@ int Cvode::solvex_thread_part3(double* b, NrnThread* nt) { // printf("%d rhs %d %g t=%g\n", nrnmpi_myid, i, VEC_RHS(i), t); //} if (ncv_->stiff() == 2) { - solvemem(nt); + solvemem(nrn_ensure_model_data_are_sorted(), nt); } else { // bug here should multiply by gam } @@ -639,34 +654,39 @@ int Cvode::solvex_thread_part3(double* b, NrnThread* nt) { return 0; } -void Cvode::solvemem(NrnThread* nt) { +void Cvode::solvemem(neuron::model_sorted_token const& sorted_token, NrnThread* nt) { // all the membrane mechanism matrices CvodeThreadData& z = CTD(nt->id); CvMembList* cml; for (cml = z.cv_memb_list_; cml; cml = cml->next) { // probably can start at 6 or hh - Memb_func* mf = memb_func + cml->index; - if (mf->ode_matsol) { - Memb_list* ml = cml->ml; - Pvmi s = mf->ode_matsol; - (*s)(nt, ml, cml->index); - if (errno) { - if (nrn_errno_check(cml->index)) { - hoc_warning("errno set during ode jacobian solve", (char*) 0); + const Memb_func& mf = memb_func[cml->index]; + if (auto const ode_matsol = mf.ode_matsol; ode_matsol) { + for (auto& ml: cml->ml) { + ode_matsol(sorted_token, nt, &ml, cml->index); + if (errno && nrn_errno_check(cml->index)) { + hoc_warning("errno set during ode jacobian solve", nullptr); } } } } - long_difus_solve(2, nt); + long_difus_solve(sorted_token, 2, *nt); } -void Cvode::fun_thread(double tt, double* y, double* ydot, NrnThread* nt) { +void Cvode::fun_thread(neuron::model_sorted_token const& sorted_token, + double tt, + double* y, + double* ydot, + NrnThread* nt) { CvodeThreadData& z = CTD(nt->id); - fun_thread_transfer_part1(tt, y, nt); + fun_thread_transfer_part1(sorted_token, tt, y, nt); nrn_nonvint_block_ode_fun(z.nvsize_, y, ydot, nt->id); - fun_thread_transfer_part2(ydot, nt); + fun_thread_transfer_part2(sorted_token, ydot, nt); } -void Cvode::fun_thread_transfer_part1(double tt, double* y, NrnThread* nt) { +void Cvode::fun_thread_transfer_part1(neuron::model_sorted_token const& sorted_token, + double tt, + double* y, + NrnThread* nt) { CvodeThreadData& z = CTD(nt->id); nt->_t = tt; @@ -681,45 +701,49 @@ void Cvode::fun_thread_transfer_part1(double tt, double* y, NrnThread* nt) { if (z.nvsize_ == 0) { return; } - scatter_y(y, nt->id); -#if PARANEURON + scatter_y(sorted_token, y, nt->id); +#if NRNMPI if (use_partrans_) { nrnmpi_assert_opstep(opmode_, nt->_t); } #endif - nocap_v(nt); // vm at nocap nodes consistent with adjacent vm + nocap_v(sorted_token, nt); // vm at nocap nodes consistent with adjacent vm } -void Cvode::fun_thread_transfer_part2(double* ydot, NrnThread* nt) { +void Cvode::fun_thread_transfer_part2(neuron::model_sorted_token const& sorted_token, + double* ydot, + NrnThread* nt) { CvodeThreadData& z = CTD(nt->id); if (z.nvsize_ == 0) { return; } -#if 1 || PARANEURON +#if 1 || NRNMPI if (nrnthread_v_transfer_) { (*nrnthread_v_transfer_)(nt); } #endif - before_after(z.before_breakpoint_, nt); - rhs(nt); // similar to nrn_rhs in treeset.cpp -#if PARANEURON + before_after(sorted_token, z.before_breakpoint_, nt); + rhs(sorted_token, nt); // similar to nrn_rhs in treeset.cpp +#if NRNMPI if (nrn_multisplit_solve_) { // non-zero area nodes need an adjustment nrn_multisplit_adjust_rhs(nt); } #endif - do_ode(nt); + do_ode(sorted_token, *nt); // divide by cm and compute capacity current - if (z.cmlcap_) - nrn_div_capacity(nt, z.cmlcap_->ml); - if (nt->_nrn_fast_imem) { - double* p = nt->_nrn_fast_imem->_nrn_sav_rhs; + if (z.cmlcap_) { + for (auto& ml: z.cmlcap_->ml) { + nrn_div_capacity(sorted_token, nt, &ml); + } + } + if (auto const vec_sav_rhs = nt->node_sav_rhs_storage(); vec_sav_rhs) { for (int i = 0; i < z.v_node_count_; ++i) { Node* nd = z.v_node_[i]; - p[nd->v_node_index] *= NODEAREA(nd) * 0.01; + vec_sav_rhs[nd->v_node_index] *= NODEAREA(nd) * 0.01; } } gather_ydot(ydot, nt->id); - before_after(z.after_solve_, nt); + before_after(sorted_token, z.after_solve_, nt); // for (int i=0; i < z.neq_; ++i) { printf("\t%d %g %g\n", i, y[i], ydot?ydot[i]:-1e99);} } @@ -735,8 +759,8 @@ void Cvode::fun_thread_ms_part1(double tt, double* y, NrnThread* nt) { // printf("%p fun %d %.15g %g\n", this, neq_, _t, _dt); play_continuous_thread(tt, nt); - scatter_y(y, nt->id); -#if PARANEURON + scatter_y(nrn_ensure_model_data_are_sorted(), y, nt->id); +#if NRNMPI if (use_partrans_) { nrnmpi_assert_opstep(opmode_, nt->_t); } @@ -755,7 +779,7 @@ void Cvode::fun_thread_ms_part3(NrnThread* nt) { // following is true and a gap is in 0 area node } void Cvode::fun_thread_ms_part4(double* ydot, NrnThread* nt) { -#if 1 || PARANEURON +#if 1 || NRNMPI if (nrnthread_v_transfer_) { (*nrnthread_v_transfer_)(nt); } @@ -764,25 +788,28 @@ void Cvode::fun_thread_ms_part4(double* ydot, NrnThread* nt) { if (z.nvsize_ == 0) { return; } - before_after(z.before_breakpoint_, nt); - rhs(nt); // similar to nrn_rhs in treeset.cpp + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + before_after(sorted_token, z.before_breakpoint_, nt); + rhs(sorted_token, nt); // similar to nrn_rhs in treeset.cpp nrn_multisplit_adjust_rhs(nt); - do_ode(nt); + do_ode(sorted_token, *nt); // divide by cm and compute capacity current - nrn_div_capacity(nt, z.cmlcap_->ml); + assert(z.cmlcap_->ml.size() == 1); + nrn_div_capacity(sorted_token, nt, &z.cmlcap_->ml[0]); gather_ydot(ydot, nt->id); - before_after(z.after_solve_, nt); + before_after(sorted_token, z.after_solve_, nt); // for (int i=0; i < z.neq_; ++i) { printf("\t%d %g %g\n", i, y[i], ydot?ydot[i]:-1e99);} } -void Cvode::before_after(BAMechList* baml, NrnThread* nt) { - BAMechList* ba; - int i, j; - for (ba = baml; ba; ba = ba->next) { +void Cvode::before_after(neuron::model_sorted_token const& sorted_token, + BAMechList* baml, + NrnThread* nt) { + for (auto* ba = baml; ba; ba = ba->next) { nrn_bamech_t f = ba->bam->f; - Memb_list* ml = ba->ml; - for (i = 0; i < ml->nodecount; ++i) { - (*f)(ml->nodelist[i], ml->_data[i], ml->pdata[i], ml->_thread, nt); + for (auto* const ml: ba->ml) { + for (int i = 0; i < ml->nodecount; ++i) { + f(ml->nodelist[i], ml->pdata[i], ml->_thread, nt, ml, i, sorted_token); + } } } } @@ -804,7 +831,7 @@ This was done by constructing a list of membrane mechanisms that contribute to the membrane current at the nocap nodes. */ -void Cvode::nocap_v(NrnThread* _nt) { +void Cvode::nocap_v(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) { int i; CvodeThreadData& z = CTD(_nt->id); @@ -814,8 +841,8 @@ void Cvode::nocap_v(NrnThread* _nt) { NODERHS(nd) = 0; } // compute the i(vmold) and di/dv - rhs_memb(z.no_cap_memb_, _nt); - lhs_memb(z.no_cap_memb_, _nt); + rhs_memb(sorted_token, z.no_cap_memb_, _nt); + lhs_memb(sorted_token, z.no_cap_memb_, _nt); for (i = 0; i < z.no_cap_count_; ++i) { // parent axial current Node* nd = z.no_cap_node_[i]; @@ -836,7 +863,7 @@ void Cvode::nocap_v(NrnThread* _nt) { NODED(pnd) -= NODEA(nd); } -#if PARANEURON +#if NRNMPI if (nrn_multisplit_solve_) { // add up the multisplit equations nrn_multisplit_nocap_v(); } @@ -844,7 +871,7 @@ void Cvode::nocap_v(NrnThread* _nt) { for (i = 0; i < z.no_cap_count_; ++i) { Node* nd = z.no_cap_node_[i]; - NODEV(nd) = NODERHS(nd) / NODED(nd); + nd->v() = NODERHS(nd) / NODED(nd); // printf("%d %d %g v=%g\n", nrnmpi_myid, i, _nt->_t, NODEV(nd)); } // no_cap v's are now consistent with adjacent v's @@ -860,8 +887,9 @@ void Cvode::nocap_v_part1(NrnThread* _nt) { NODERHS(nd) = 0; } // compute the i(vmold) and di/dv - rhs_memb(z.no_cap_memb_, _nt); - lhs_memb(z.no_cap_memb_, _nt); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + rhs_memb(sorted_token, z.no_cap_memb_, _nt); + lhs_memb(sorted_token, z.no_cap_memb_, _nt); for (i = 0; i < z.no_cap_count_; ++i) { // parent axial current Node* nd = z.no_cap_node_[i]; @@ -892,44 +920,39 @@ void Cvode::nocap_v_part3(NrnThread* _nt) { CvodeThreadData& z = ctd_[_nt->id]; for (i = 0; i < z.no_cap_count_; ++i) { Node* nd = z.no_cap_node_[i]; - NODEV(nd) = NODERHS(nd) / NODED(nd); + nd->v() = NODERHS(nd) / NODED(nd); // printf("%d %d %g v=%g\n", nrnmpi_myid, i, t, NODEV(nd)); } // no_cap v's are now consistent with adjacent v's } -void Cvode::do_ode(NrnThread* _nt) { +void Cvode::do_ode(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { // all the membrane mechanism ode's - CvodeThreadData& z = CTD(_nt->id); - CvMembList* cml; - Memb_func* mf; - for (cml = z.cv_memb_list_; cml; cml = cml->next) { // probably can start at 6 or hh - mf = memb_func + cml->index; - if (mf->ode_spec) { - Pvmi s = mf->ode_spec; - Memb_list* ml = cml->ml; - (*s)(_nt, ml, cml->index); - if (errno) { - if (nrn_errno_check(cml->index)) { - hoc_warning("errno set during ode evaluation", (char*) 0); + CvodeThreadData& z = CTD(nt.id); + for (auto* cml = z.cv_memb_list_; cml; cml = cml->next) { // probably can start at 6 or hh + if (auto* const ode_spec = memb_func[cml->index].ode_spec; ode_spec) { + for (auto& ml: cml->ml) { + ode_spec(sorted_token, &nt, &ml, cml->index); + if (errno && nrn_errno_check(cml->index)) { + hoc_warning("errno set during ode evaluation", nullptr); } } } } - long_difus_solve(1, _nt); + long_difus_solve(sorted_token, 1, nt); } static Cvode* nonode_cv; -static void* nonode_thread(NrnThread* nt) { - nonode_cv->do_nonode(nt); - return 0; +static void nonode_thread(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { + nonode_cv->do_nonode(sorted_token, &nt); } -void Cvode::do_nonode(NrnThread* _nt) { // all the hacked integrators, etc, in SOLVE procedure - // almost a verbatim copy of nonvint in fadvance.cpp +void Cvode::do_nonode(neuron::model_sorted_token const& sorted_token, NrnThread* _nt) { + // all the hacked integrators, etc, in SOLVE procedure almost a verbatim copy of nonvint in + // fadvance.cpp if (!_nt) { if (nrn_nthread > 1) { nonode_cv = this; - nrn_multithread_job(nonode_thread); + nrn_multithread_job(sorted_token, nonode_thread); return; } _nt = nrn_threads; @@ -937,22 +960,15 @@ void Cvode::do_nonode(NrnThread* _nt) { // all the hacked integrators, etc, in CvodeThreadData& z = CTD(_nt->id); CvMembList* cml; for (cml = z.cv_memb_list_; cml; cml = cml->next) { - Memb_func* mf = memb_func + cml->index; - if (mf->state) { - Memb_list* ml = cml->ml; - if (!mf->ode_spec) { - Pvmi s = mf->state; - (*s)(_nt, ml, cml->index); -#if 0 - if (errno) { - if (nrn_errno_check(cml->index)) { -hoc_warning("errno set during calculation of states", (char*)0); - } - } -#endif - } else if (mf->singchan_) { - Pvmi s = mf->singchan_; - (*s)(_nt, ml, cml->index); + const Memb_func& mf = memb_func[cml->index]; + if (!mf.state) { + continue; + } + for (auto& ml: cml->ml) { + if (!mf.ode_spec) { + mf.state(sorted_token, _nt, &ml, cml->index); + } else if (mf.singchan_) { + mf.singchan_(_nt, &ml, cml->index); } } } @@ -1010,35 +1026,37 @@ void Cvode::delete_prl() { if (z.play_) { delete z.play_; } - z.play_ = nil; + z.play_ = nullptr; if (z.record_) { delete z.record_; } - z.record_ = nil; + z.record_ = nullptr; } } void Cvode::record_add(PlayRecord* pr) { CvodeThreadData& z = CTD(pr->ith_); if (!z.record_) { - z.record_ = new PlayRecList(1); + z.record_ = new std::vector(); + z.record_->reserve(1); } - z.record_->append(pr); + z.record_->push_back(pr); } void Cvode::record_continuous() { if (nth_) { // lvardt record_continuous_thread(nth_); } else { + auto const sorted_token = nrn_ensure_model_data_are_sorted(); for (int i = 0; i < nrn_nthread; ++i) { NrnThread* nt = nrn_threads + i; CvodeThreadData& z = ctd_[i]; if (z.before_step_) { - before_after(z.before_step_, nt); + before_after(sorted_token, z.before_step_, nt); } if (z.record_) { - for (long i = 0; i < z.record_->count(); ++i) { - z.record_->item(i)->continuous(t_); + for (auto& item: *(z.record_)) { + item->continuous(t_); } } } @@ -1048,11 +1066,11 @@ void Cvode::record_continuous() { void Cvode::record_continuous_thread(NrnThread* nt) { CvodeThreadData& z = CTD(nt->id); if (z.before_step_) { - before_after(z.before_step_, nt); + before_after(nrn_ensure_model_data_are_sorted(), z.before_step_, nt); } if (z.record_) { - for (long i = 0; i < z.record_->count(); ++i) { - z.record_->item(i)->continuous(t_); + for (auto& item: *(z.record_)) { + item->continuous(t_); } } } @@ -1060,9 +1078,9 @@ void Cvode::record_continuous_thread(NrnThread* nt) { void Cvode::play_add(PlayRecord* pr) { CvodeThreadData& z = CTD(pr->ith_); if (!z.play_) { - z.play_ = new PlayRecList(1); + z.play_ = new std::vector(); } - z.play_->append(pr); + z.play_->push_back(pr); } void Cvode::play_continuous(double tt) { @@ -1072,8 +1090,8 @@ void Cvode::play_continuous(double tt) { for (int i = 0; i < nrn_nthread; ++i) { CvodeThreadData& z = ctd_[i]; if (z.play_) { - for (long i = 0; i < z.play_->count(); ++i) { - z.play_->item(i)->continuous(tt); + for (auto& item: *(z.play_)) { + item->continuous(tt); } } } @@ -1082,8 +1100,8 @@ void Cvode::play_continuous(double tt) { void Cvode::play_continuous_thread(double tt, NrnThread* nt) { CvodeThreadData& z = CTD(nt->id); if (z.play_) { - for (long i = 0; i < z.play_->count(); ++i) { - z.play_->item(i)->continuous(tt); + for (auto& item: *(z.play_)) { + item->continuous(tt); } } } diff --git a/src/nrncvode/pool.h b/src/nrncvode/pool.h index 5b2f7f5cae..c062e93ad3 100644 --- a/src/nrncvode/pool.h +++ b/src/nrncvode/pool.h @@ -47,9 +47,8 @@ MutexPool::MutexPool(long count, int mkmut) { pool_ = new T[count_]; pool_size_ = count; items_ = new T*[count_]; - { - for (long i = 0; i < count_; ++i) - items_[i] = pool_ + i; + for (long i = 0; i < count_; ++i) { + items_[i] = pool_ + i; } MUTCONSTRUCT(mkmut) } @@ -62,22 +61,15 @@ void MutexPool::grow() { chain_ = p; long newcnt = 2 * count_; T** itms = new T*[newcnt]; - long i, j; put_ += count_; - { - for (i = 0; i < get_; ++i) { - itms[i] = items_[i]; - } + for (long i = 0; i < get_; ++i) { + itms[i] = items_[i]; } - { - for (i = get_, j = 0; j < count_; ++i, ++j) { - itms[i] = p->items_[j]; - } + for (long i = get_, j = 0; j < count_; ++i, ++j) { + itms[i] = p->items_[j]; } - { - for (i = put_, j = get_; j < count_; ++i, ++j) { - itms[i] = items_[j]; - } + for (long i = put_, j = get_; j < count_; ++i, ++j) { + itms[i] = items_[j]; } delete[] items_; delete[] p->items_; @@ -88,35 +80,24 @@ void MutexPool::grow() { template MutexPool::~MutexPool() { - { - if (chain_) { - delete chain_; - } - } + delete chain_; delete[] pool_; - { - if (items_) { - delete[] items_; - } - } + delete[] items_; MUTDESTRUCT } template T* MutexPool::alloc() { - MUTLOCK { - if (nget_ >= count_) { - grow(); - } + MUTLOCK + if (nget_ >= count_) { + grow(); } T* item = items_[get_]; get_ = (get_ + 1) % count_; ++nget_; - { - if (nget_ > maxget_) { - maxget_ = nget_; - } - } + + maxget_ = std::max(nget_, maxget_); + MUTUNLOCK return item; } @@ -139,12 +120,10 @@ void MutexPool::free_all() { nget_ = 0; get_ = 0; put_ = 0; - { - for (pp = this; pp; pp = pp->chain_) { - for (i = 0; i < pp->pool_size_; ++i) { - items_[put_++] = pp->pool_ + i; - pp->pool_[i].clear(); - } + for (pp = this; pp; pp = pp->chain_) { + for (i = 0; i < pp->pool_size_; ++i) { + items_[put_++] = pp->pool_ + i; + pp->pool_[i].clear(); } } assert(put_ == count_); diff --git a/src/nrncvode/spt2queue.cpp b/src/nrncvode/spt2queue.cpp index 2eb1c89c90..07dbcab1d1 100644 --- a/src/nrncvode/spt2queue.cpp +++ b/src/nrncvode/spt2queue.cpp @@ -101,11 +101,11 @@ TQueue::~TQueue() { if (least_) { deleteitem(least_); } - while ((q = spdeq(&sptree_->root)) != nil) { + while ((q = spdeq(&sptree_->root)) != nullptr) { deleteitem(q); } delete sptree_; - while ((q = spdeq(&sptree2_->root)) != nil) { + while ((q = spdeq(&sptree2_->root)) != nullptr) { deleteitem(q); } delete sptree2_; @@ -115,16 +115,16 @@ void TQueue::print() { if (least_) { prnt(least_, 0); } - spscan(prnt, nil, sptree_); - spscan(prnt, nil, sptree2_); + spscan(prnt, static_cast(nullptr), sptree_); + spscan(prnt, static_cast(nullptr), sptree2_); } void TQueue::forall_callback(void (*f)(const TQItem*, int)) { if (least_) { f(least_, 0); } - spscan(f, nil, sptree_); - spscan(f, nil, sptree2_); + spscan(f, nullptr, sptree_); + spscan(f, nullptr, sptree2_); } void TQueue::check(const char* mes) {} @@ -240,7 +240,7 @@ void TQueue::remove(TQItem* q) { } else if (sptree2_->root) { least_ = spdeq(&sptree2_->root); } else { - least_ = nil; + least_ = nullptr; } } else { if (q->cnt_ == -1) { diff --git a/src/nrncvode/sptbinq.cpp b/src/nrncvode/sptbinq.cpp index 5dfdcdf427..49acfc4a0b 100644 --- a/src/nrncvode/sptbinq.cpp +++ b/src/nrncvode/sptbinq.cpp @@ -72,7 +72,7 @@ TQueue::TQueue(TQItemPool* tp, int mkmut) { TQueue::~TQueue() { TQItem *q, *q2; - while ((q = spdeq(&sptree_->root)) != nil) { + while ((q = spdeq(&sptree_->root)) != nullptr) { deleteitem(q); } delete sptree_; @@ -93,7 +93,7 @@ void TQueue::print() { if (least_) { prnt(least_, 0); } - spscan(prnt, static_cast(nil), sptree_); + spscan(prnt, static_cast(nullptr), sptree_); for (TQItem* q = binq_->first(); q; q = binq_->next(q)) { prnt(q, 0); } @@ -105,7 +105,7 @@ void TQueue::forall_callback(void (*f)(const TQItem*, int)) { if (least_) { f(least_, 0); } - spscan(f, static_cast(nil), sptree_); + spscan(f, static_cast(nullptr), sptree_); for (TQItem* q = binq_->first(); q; q = binq_->next(q)) { f(q, 0); } @@ -230,7 +230,7 @@ void TQueue::remove(TQItem* q) { if (sptree_->root) { least_ = spdeq(&sptree_->root); } else { - least_ = nil; + least_ = nullptr; } } else if (q->cnt_ >= 0) { binq_->remove(q); @@ -251,7 +251,7 @@ TQItem* TQueue::atomic_dq(double tt) { if (sptree_->root) { least_ = spdeq(&sptree_->root); } else { - least_ = nil; + least_ = nullptr; } } MUTUNLOCK diff --git a/src/nrncvode/sptqueue.cpp b/src/nrncvode/sptqueue.cpp index 9521487e25..b42f0121e8 100644 --- a/src/nrncvode/sptqueue.cpp +++ b/src/nrncvode/sptqueue.cpp @@ -67,7 +67,7 @@ TQueue::TQueue() { TQueue::~TQueue() { TQItem* q; - while ((q = spdeq(&sptree_->root)) != nil) { + while ((q = spdeq(&sptree_->root)) != nullptr) { deleteitem(q); } delete sptree_; @@ -77,14 +77,14 @@ void TQueue::print() { if (least_) { prnt(least_, 0); } - spscan(prnt, nil, sptree_); + spscan(prnt, nullptr, sptree_); } void TQueue::forall_callback(void (*f)(const TQItem*, int)) { if (least_) { f(least_, 0); } - spscan(f, nil, sptree_); + spscan(f, nullptr, sptree_); } void TQueue::check(const char* mes) {} @@ -174,7 +174,7 @@ void TQueue::remove(TQItem* q) { if (sptree_->root) { least_ = spdeq(&sptree_->root); } else { - least_ = nil; + least_ = nullptr; } } else { spdelete(q, sptree_); diff --git a/src/nrncvode/tqueue.cpp b/src/nrncvode/tqueue.cpp index 383563e57f..92c7990947 100644 --- a/src/nrncvode/tqueue.cpp +++ b/src/nrncvode/tqueue.cpp @@ -117,7 +117,7 @@ void TQueue_reg() { SelfQueue::SelfQueue(TQItemPool* tp, int mkmut) { MUTCONSTRUCT(mkmut) tpool_ = tp; - head_ = nil; + head_ = nullptr; } SelfQueue::~SelfQueue() { remove_all(); @@ -126,7 +126,7 @@ SelfQueue::~SelfQueue() { TQItem* SelfQueue::insert(void* d) { MUTLOCK TQItem* q = tpool_->alloc(); - q->left_ = nil; + q->left_ = nullptr; q->right_ = head_; if (head_) { head_->left_ = q; @@ -156,6 +156,6 @@ void SelfQueue::remove_all() { for (TQItem* q = first(); q; q = next(q)) { tpool_->hpfree(q); } - head_ = nil; + head_ = nullptr; MUTUNLOCK } diff --git a/src/nrncvode/vrecitem.h b/src/nrncvode/vrecitem.h index b48efba9d6..17b8b208a0 100644 --- a/src/nrncvode/vrecitem.h +++ b/src/nrncvode/vrecitem.h @@ -1,7 +1,6 @@ #ifndef vrecitem_h #define vrecitem_h -#include #include #include #include @@ -50,7 +49,7 @@ class PlayRecordEvent: public DiscreteEvent { // common interface for Play and Record for all integration methods. class PlayRecord: public Observer { public: - PlayRecord(double* pd, Object* ppobj = nil); + PlayRecord(neuron::container::data_handle pd, Object* ppobj = nullptr); virtual ~PlayRecord(); virtual void install(Cvode* cv) { cvode_ = cv; @@ -61,19 +60,18 @@ class PlayRecord: public Observer { } // play - every f(y, t) or res(y', y, t); record - advance_tn and initialize flag virtual void deliver(double t, NetCvode*) {} // at associated DiscreteEvent virtual PlayRecordEvent* event() { - return nil; + return nullptr; } virtual void pr(); // print identifying info virtual int type() { return 0; } - // install normally calls one of these. Cvode may be nil. + // install normally calls one of these. Cvode may be nullptr. void play_add(Cvode*); void record_add(Cvode*); // administration - virtual void update_ptr(double*); virtual void disconnect(Observable*); virtual void update(Observable* o) { disconnect(o); @@ -88,14 +86,14 @@ class PlayRecord: public Observer { virtual PlayRecordSave* savestate_save(); static PlayRecordSave* savestate_read(FILE*); - double* pd_; + // pd_ can refer to a voltage, and those are stored in a modern container, + // so we need to use data_handle + neuron::container::data_handle pd_; Object* ppobj_; Cvode* cvode_; int ith_; // The thread index }; -declarePtrList(PlayRecList, PlayRecord) - class PlayRecordSave { public: PlayRecordSave(PlayRecord*); @@ -111,7 +109,7 @@ class PlayRecordSave { class TvecRecord: public PlayRecord { public: - TvecRecord(Section*, IvocVect* tvec, Object* ppobj = nil); + TvecRecord(Section*, IvocVect* tvec, Object* ppobj = nullptr); virtual ~TvecRecord(); virtual void install(Cvode*); virtual void record_init(); @@ -131,7 +129,7 @@ class TvecRecord: public PlayRecord { class YvecRecord: public PlayRecord { public: - YvecRecord(double*, IvocVect* y, Object* ppobj = nil); + YvecRecord(neuron::container::data_handle, IvocVect* y, Object* ppobj = nullptr); virtual ~YvecRecord(); virtual void install(Cvode*); virtual void record_init(); @@ -150,7 +148,10 @@ class YvecRecord: public PlayRecord { class VecRecordDiscrete: public PlayRecord { public: - VecRecordDiscrete(double*, IvocVect* y, IvocVect* t, Object* ppobj = nil); + VecRecordDiscrete(neuron::container::data_handle, + IvocVect* y, + IvocVect* t, + Object* ppobj = nullptr); virtual ~VecRecordDiscrete(); virtual void install(Cvode*); virtual void record_init(); @@ -188,7 +189,10 @@ class VecRecordDiscreteSave: public PlayRecordSave { class VecRecordDt: public PlayRecord { public: - VecRecordDt(double*, IvocVect* y, double dt, Object* ppobj = nil); + VecRecordDt(neuron::container::data_handle, + IvocVect* y, + double dt, + Object* ppobj = nullptr); virtual ~VecRecordDt(); virtual void install(Cvode*); virtual void record_init(); @@ -222,8 +226,12 @@ class VecRecordDtSave: public PlayRecordSave { class VecPlayStep: public PlayRecord { public: - VecPlayStep(double*, IvocVect* y, IvocVect* t, double dt, Object* ppobj = nil); - VecPlayStep(const char* s, IvocVect* y, IvocVect* t, double dt, Object* ppobj = nil); + VecPlayStep(neuron::container::data_handle, + IvocVect* y, + IvocVect* t, + double dt, + Object* ppobj = nullptr); + VecPlayStep(const char* s, IvocVect* y, IvocVect* t, double dt, Object* ppobj = nullptr); void init(IvocVect* y, IvocVect* t, double dt); virtual ~VecPlayStep(); virtual void install(Cvode*); @@ -263,12 +271,16 @@ class VecPlayStepSave: public PlayRecordSave { class VecPlayContinuous: public PlayRecord { public: - VecPlayContinuous(double*, IvocVect* y, IvocVect* t, IvocVect* discon, Object* ppobj = nil); + VecPlayContinuous(neuron::container::data_handle pd, + IvocVect* y, + IvocVect* t, + IvocVect* discon, + Object* ppobj = nullptr); VecPlayContinuous(const char* s, IvocVect* y, IvocVect* t, IvocVect* discon, - Object* ppobj = nil); + Object* ppobj = nullptr); virtual ~VecPlayContinuous(); void init(IvocVect* y, IvocVect* t, IvocVect* tdiscon); virtual void install(Cvode*); diff --git a/src/nrniv/CMakeLists.txt b/src/nrniv/CMakeLists.txt index f03c95a470..48f1c16531 100644 --- a/src/nrniv/CMakeLists.txt +++ b/src/nrniv/CMakeLists.txt @@ -1,6 +1,7 @@ # ============================================================================= # Build nrniv binary and corresponding library # ============================================================================= +include(${CODING_CONV_CMAKE}/build-time-copy.cmake) # Add directory-level default compiler flags -- these should be added to all NEURON targets, but not # targets from included projects like CoreNEURON and NMODL @@ -15,22 +16,27 @@ endif() # ============================================================================= # Build modlunit : Mod file units checker # ============================================================================= +set(BISON_FLEX_WORKING_DIR "${CMAKE_BINARY_DIR}") set(NRN_MODLUNIT_GEN "${CMAKE_CURRENT_BINARY_DIR}/modlunit_generated") file(MAKE_DIRECTORY "${NRN_MODLUNIT_GEN}") -file(RELATIVE_PATH NRN_MODLUNIT_SRC_REL "${NRN_MODLUNIT_GEN}" "${NRN_MODLUNIT_SRC_DIR}") +file(RELATIVE_PATH NRN_MODLUNIT_GEN_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_MODLUNIT_GEN}") +file(RELATIVE_PATH NRN_MODLUNIT_SRC_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_MODLUNIT_SRC_DIR}") # Run flex and bison with relative paths, so absolute paths are not present in the generated source -# file because of include, __FILE__ and so on. This improves ccache performance. +# file because of include, __FILE__ and so on. This improves ccache performance. To generate code +# coverage correctly, the *relative* paths that are present need to be with respect to the build +# directory. add_custom_command( OUTPUT "${NRN_MODLUNIT_GEN}/lex.cpp" - WORKING_DIRECTORY "${NRN_MODLUNIT_GEN}" - COMMAND "${FLEX_EXECUTABLE}" ARGS -o lex.cpp "${NRN_MODLUNIT_SRC_REL}/lex.lpp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${FLEX_EXECUTABLE}" ARGS -o "${NRN_MODLUNIT_GEN_REL}/lex.cpp" + "${NRN_MODLUNIT_SRC_REL}/lex.lpp" DEPENDS "${NRN_MODLUNIT_SRC_DIR}/lex.lpp" COMMENT "[FLEX][modlunitlexer] Building scanner with flex ${FLEX_VERSION}") add_custom_command( OUTPUT "${NRN_MODLUNIT_GEN}/parse1.hpp" "${NRN_MODLUNIT_GEN}/parse1.cpp" - WORKING_DIRECTORY "${NRN_MODLUNIT_GEN}" - COMMAND "${BISON_EXECUTABLE}" ARGS --defines=parse1.hpp -o parse1.cpp - "${NRN_MODLUNIT_SRC_REL}/parse1.ypp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${BISON_EXECUTABLE}" ARGS "--defines=${NRN_MODLUNIT_GEN_REL}/parse1.hpp" -o + "${NRN_MODLUNIT_GEN_REL}/parse1.cpp" "${NRN_MODLUNIT_SRC_REL}/parse1.ypp" DEPENDS "${NRN_MODLUNIT_SRC_DIR}/parse1.ypp" COMMENT "[BISON][modlunitparser] Building parser with bison ${BISON_VERSION}") @@ -54,25 +60,26 @@ endif() # ============================================================================= set(NRN_NMODL_GEN "${CMAKE_CURRENT_BINARY_DIR}/nocmodl_generated") file(MAKE_DIRECTORY "${NRN_NMODL_GEN}") -file(RELATIVE_PATH NRN_NMODL_SRC_REL "${NRN_NMODL_GEN}" "${NRN_NMODL_SRC_DIR}") +file(RELATIVE_PATH NRN_NMODL_GEN_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_NMODL_GEN}") +file(RELATIVE_PATH NRN_NMODL_SRC_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_NMODL_SRC_DIR}") add_custom_command( OUTPUT "${NRN_NMODL_GEN}/lex.cpp" - WORKING_DIRECTORY "${NRN_NMODL_GEN}" - COMMAND "${FLEX_EXECUTABLE}" ARGS -o lex.cpp "${NRN_NMODL_SRC_REL}/lex.lpp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${FLEX_EXECUTABLE}" ARGS -o "${NRN_NMODL_GEN_REL}/lex.cpp" "${NRN_NMODL_SRC_REL}/lex.lpp" DEPENDS "${NRN_NMODL_SRC_DIR}/lex.lpp" COMMENT "[FLEX][nocmodllexer] Building scanner with flex ${FLEX_VERSION}") add_custom_command( OUTPUT "${NRN_NMODL_GEN}/parse1.hpp" "${NRN_NMODL_GEN}/parse1.cpp" - WORKING_DIRECTORY "${NRN_NMODL_GEN}" - COMMAND "${BISON_EXECUTABLE}" ARGS --defines=parse1.hpp -o parse1.cpp - "${NRN_NMODL_SRC_REL}/parse1.ypp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${BISON_EXECUTABLE}" ARGS "--defines=${NRN_NMODL_GEN_REL}/parse1.hpp" -o + "${NRN_NMODL_GEN_REL}/parse1.cpp" "${NRN_NMODL_SRC_REL}/parse1.ypp" DEPENDS "${NRN_NMODL_SRC_DIR}/parse1.ypp" COMMENT "[BISON][nocmodlparser] Building parser with bison ${BISON_VERSION}") add_custom_command( OUTPUT "${NRN_NMODL_GEN}/diffeq.hpp" "${NRN_NMODL_GEN}/diffeq.cpp" - WORKING_DIRECTORY "${NRN_NMODL_GEN}" - COMMAND "${BISON_EXECUTABLE}" ARGS --defines=diffeq.hpp -o diffeq.cpp - "${NRN_NMODL_SRC_REL}/diffeq.ypp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${BISON_EXECUTABLE}" ARGS "--defines=${NRN_NMODL_GEN_REL}/diffeq.hpp" -o + "${NRN_NMODL_GEN_REL}/diffeq.cpp" "${NRN_NMODL_SRC_REL}/diffeq.ypp" DEPENDS "${NRN_NMODL_SRC_DIR}/diffeq.ypp" COMMENT "[BISON][nocmodlparser] Building parser with bison ${BISON_VERSION}") @@ -84,11 +91,15 @@ add_dependencies(generated_source_files nocmodl_generated_files) add_executable(nocmodl ${NRN_NMODL_SRC_FILES} "${NRN_NMODL_GEN}/lex.cpp" "${NRN_NMODL_GEN}/parse1.cpp" "${NRN_NMODL_GEN}/diffeq.cpp") cpp_cc_configure_sanitizers(TARGET nocmodl) -target_compile_definitions(nocmodl PRIVATE COMPILE_DEFINITIONS NMODL=1 CVODE=1 NRN_DYNAMIC_UNITS=1) +target_compile_definitions(nocmodl PRIVATE COMPILE_DEFINITIONS NMODL=1 CVODE=1) # Otherwise the generated code in the binary directory does not find headers in the modlunit source # directory and the source files in the source directory do not find generated headers in the binary -# directory. -target_include_directories(nocmodl PRIVATE "${NRN_NMODL_GEN}" "${NRN_NMODL_SRC_DIR}") +# directory. TODO: (see also coreneuron) fix adding a dependency on CLI11::CLI11 when CLI11 is a +# submodule. Right now this doesn't work because the CLI11 targets are not exported/installed but +# coreneuron-core is. +get_target_property(CLI11_HEADER_DIRECTORY CLI11::CLI11 INTERFACE_INCLUDE_DIRECTORIES) +target_include_directories(nocmodl PRIVATE "${NRN_NMODL_GEN}" "${NRN_NMODL_SRC_DIR}" + "${CLI11_HEADER_DIRECTORY}") if(NRN_NMODL_CXX_FLAGS) target_compile_options(nocmodl PRIVATE ${NRN_NMODL_CXX_FLAGS}) endif() @@ -98,13 +109,18 @@ endif() # ============================================================================= foreach(modfile ${NRN_MODFILE_BASE_NAMES}) nocmodl_mod_to_cpp(${modfile}) - set_property( - SOURCE ${modfile}.cpp - APPEND - PROPERTY COMPILE_OPTIONS ${NRN_EXTRA_MECH_CXX_FLAGS}) list(APPEND NRN_MODFILE_CPP ${PROJECT_BINARY_DIR}/${modfile}.cpp) endforeach() - +set_property( + SOURCE ${NRN_MODFILE_CPP} + APPEND + PROPERTY COMPILE_OPTIONS ${NRN_EXTRA_MECH_CXX_FLAGS}) +# we are basically emulating nrnivmodl with CMake here. nrnivmodl uses headers from the include +# directory in the build or install directory. +set_property( + SOURCE ${NRN_MODFILE_CPP} + APPEND + PROPERTY INCLUDE_DIRECTORIES ${PROJECT_BINARY_DIR}/include) set_source_files_properties(${NRN_MODFILE_CPP} PROPERTIES GENERATED TRUE) # ============================================================================= @@ -112,11 +128,13 @@ set_source_files_properties(${NRN_MODFILE_CPP} PROPERTIES GENERATED TRUE) # ============================================================================= set(NRN_OC_GEN "${CMAKE_CURRENT_BINARY_DIR}/oc_generated") file(MAKE_DIRECTORY "${NRN_OC_GEN}") -file(RELATIVE_PATH NRN_OC_SRC_REL "${NRN_OC_GEN}" "${NRN_OC_SRC_DIR}") +file(RELATIVE_PATH NRN_OC_GEN_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_OC_GEN}") +file(RELATIVE_PATH NRN_OC_SRC_REL "${BISON_FLEX_WORKING_DIR}" "${NRN_OC_SRC_DIR}") add_custom_command( OUTPUT "${NRN_OC_GEN}/parse.hpp" "${NRN_OC_GEN}/parse.cpp" - WORKING_DIRECTORY "${NRN_OC_GEN}" - COMMAND "${BISON_EXECUTABLE}" ARGS --defines=parse.hpp -o parse.cpp "${NRN_OC_SRC_REL}/parse.ypp" + WORKING_DIRECTORY "${BISON_FLEX_WORKING_DIR}" + COMMAND "${BISON_EXECUTABLE}" ARGS "--defines=${NRN_OC_GEN_REL}/parse.hpp" -o + "${NRN_OC_GEN_REL}/parse.cpp" "${NRN_OC_SRC_REL}/parse.ypp" DEPENDS "${NRN_OC_SRC_DIR}/parse.ypp" COMMENT "[BISON][ocparser] Building parser with bison ${BISON_VERSION}") add_custom_target(oc_generated_files DEPENDS "${NRN_OC_GEN}/parse.hpp" "${NRN_OC_GEN}/parse.cpp") @@ -135,12 +153,9 @@ set(NRN_NRNIV_LIB_SRC_FILES ${NRN_NRNIV_SRC_FILES} ${NRN_NRNOC_SRC_FILES} ${NRN_OC_SRC_FILES} - ${NRN_MESCH_SRC_FILES} ${NRN_MODFILE_CPP} - ${NRN_NRNGNU_SRC_FILES} ${NRN_SCOPMATH_SRC_FILES} ${NRN_SPARSE_SRC_FILES} - ${NRN_SPARSE13_SRC_FILES} ${NRN_SUNDIALS_SRC_FILES}) if(NRN_ENABLE_MPI) @@ -196,10 +211,8 @@ set(NRN_INCLUDE_DIRS ${PROJECT_BINARY_DIR}/src/parallel ${PROJECT_BINARY_DIR}/src/sundials ${PROJECT_BINARY_DIR}/src/sundials/shared - ${PROJECT_SOURCE_DIR}/external/Random123/include ${PROJECT_SOURCE_DIR}/src ${PROJECT_SOURCE_DIR}/src/gnu - ${PROJECT_SOURCE_DIR}/src/mesch ${PROJECT_SOURCE_DIR}/src/nrncvode ${PROJECT_SOURCE_DIR}/src/nrnmpi ${PROJECT_SOURCE_DIR}/src/nrnpython @@ -249,7 +262,7 @@ add_custom_command( COMMAND ${CMAKE_C_COMPILER} -E -I${NRN_NRNOC_SRC_DIR} -I${NRN_OC_SRC_DIR} ${NRN_NRNOC_SRC_DIR}/neuron.h > neuron.tmp1 COMMAND sed "/^#/d" neuron.tmp1 > neuron.tmp2 - COMMAND ${PYTHON_EXECUTABLE} ${NRN_OC_SRC_DIR}/mk_hocusr_h.py < neuron.tmp2 > + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${NRN_OC_SRC_DIR}/mk_hocusr_h.py < neuron.tmp2 > ${PROJECT_BINARY_DIR}/src/oc/hocusr.h DEPENDS ${NRN_NRNOC_SRC_DIR}/neuron.h ${NRN_OC_SRC_DIR}/mk_hocusr_h.py) add_custom_target(generate_hocusr_header DEPENDS "${PROJECT_BINARY_DIR}/src/oc/hocusr.h") @@ -273,11 +286,6 @@ set_property( APPEND PROPERTY COMPILE_DEFINITIONS OOP=1 HOC=1 INTERVIEWS=1) -set_property( - SOURCE ${PROJECT_SOURCE_DIR}/src/nrniv/nrnpy.cpp - APPEND - PROPERTY COMPILE_DEFINITIONS USE_LIBNRNPYTHON_MAJORMINOR=${USE_LIBNRNPYTHON_MAJORMINOR}) - set_property( SOURCE ${NRN_IVOC_SRC_FILES} APPEND @@ -317,12 +325,6 @@ if(NRN_USE_BACKWARD) endif() if(NRN_HAVE_NVHPC_COMPILER) - # NVHPC/21.7 cannot compile znorm.c with -O2 or above. See also: - # https://forums.developer.nvidia.com/t/nvc-21-7-regression-internal-compiler-error-can-only-coerce-indirect-args/184847 - if(${CMAKE_CXX_COMPILER_VERSION} VERSION_EQUAL 21.7) - set_source_files_properties(${PROJECT_SOURCE_DIR}/src/mesch/znorm.c PROPERTIES COMPILE_OPTIONS - -Mnovect) - endif() # For NVHPC we will rely on FE exceptions as opposed to errno in order to make use of faster # builtins. One caveat is that if we use an optimization level greater than -O1, the FE exception # is not raised. See https://github.com/neuronsimulator/nrn/pull/1930 @@ -332,12 +334,32 @@ if(NRN_HAVE_NVHPC_COMPILER) endif() set_source_files_properties( ${PROJECT_SOURCE_DIR}/src/oc/math.cpp - PROPERTIES COMPILE_DEFINITIONS NVHPC_CHECK_FE_EXCEPTIONS=1 COMPILE_OPTIONS - "${NVHPC_MATH_COMPILE_OPTIONS}") + PROPERTIES COMPILE_DEFINITIONS NRN_CHECK_FE_EXCEPTIONS=1 COMPILE_OPTIONS + "${NVHPC_MATH_COMPILE_OPTIONS}") # Versions of nvc++ around ~22.5 up to at least 23.1 have problems with this file see # https://github.com/BlueBrain/CoreNeuron/issues/888 set_source_files_properties(${PROJECT_SOURCE_DIR}/src/ivoc/ivocvect.cpp PROPERTIES COMPILE_OPTIONS "-O1") +elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "IntelLLVM") + # Tested with icpx 2022.2.1: + # + # * With -fp-model=fast (the default...), icpx does not set the C++11 macro math_errhandling. + # * With -fp-model=fast and anything but -O0, icpx does not set floating point exception bits. + # * With -fp-model={precise,strict}, floating point exceptions are set at all optimisation levels. + # * With -fp-model=precise and anything but -O0, -fno-math-errno also disables exceptions. + # * When defined, math_errhandling always contains both MATH_ERRNO and MATH_ERREXCEPT. + # + # Based on all of this, forcing strict mode and forcing floating point exceptions to be used (with + # NRN_CHECK_FE_EXCEPTIONS) seems that it should work at all optimisation levels. We might as well + # also pass -fno-math-errno because we will not be checking errno. + set_property( + SOURCE ${PROJECT_SOURCE_DIR}/src/oc/math.cpp + APPEND + PROPERTY COMPILE_DEFINITIONS NRN_CHECK_FE_EXCEPTIONS) + set_property( + SOURCE ${PROJECT_SOURCE_DIR}/src/oc/math.cpp + APPEND + PROPERTY COMPILE_OPTIONS "-fp-model=strict" "-fno-math-errno") elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel") # When optimisation is enabled then icpc apparently does not set errno set_property( @@ -356,6 +378,8 @@ if(NRN_ENABLE_MPI_DYNAMIC) PROPERTIES OBJECT_DEPENDS ${NRNMPI_DYNAMIC_INCLUDE_FILE}) set_source_files_properties(${PROJECT_SOURCE_DIR}/src/nrnmpi/nrnmpi.cpp PROPERTIES OBJECT_DEPENDS ${NRNMPI_DYNAMIC_INCLUDE_FILE}) + set_source_files_properties(${PROJECT_SOURCE_DIR}/src/nrnmpi/memory_usage.cpp + PROPERTIES OBJECT_DEPENDS ${NRNMPI_DYNAMIC_INCLUDE_FILE}) set_source_files_properties(${PROJECT_SOURCE_DIR}/src/nrnmpi/bbsmpipack.cpp PROPERTIES OBJECT_DEPENDS ${NRNMPI_DYNAMIC_INCLUDE_FILE}) endif() @@ -378,20 +402,19 @@ endif() # All source directories to include # ============================================================================= include_directories(${NRN_INCLUDE_DIRS}) -if(NRN_ENABLE_PYTHON) - include_directories(${PYTHON_INCLUDE_DIRS}) -endif() # ============================================================================= # All source directories to include # ============================================================================= add_library(nrniv_lib ${NRN_LIBRARY_TYPE} ${NRN_NRNIV_LIB_SRC_FILES}) +target_link_libraries(nrniv_lib nrngnu) +target_link_libraries(nrniv_lib sparse13) +target_include_directories(nrniv_lib SYSTEM PUBLIC ${PROJECT_SOURCE_DIR}/${NRN_3RDPARTY_DIR}/eigen) cpp_cc_configure_sanitizers(TARGET nrniv_lib) # Source-directory .cpp needs to find generated .hpp. target_include_directories(nrniv_lib PUBLIC "${NRN_OC_GEN}") -# See: https://en.cppreference.com/w/cpp/filesystem#Notes -if(CMAKE_CXX_COMPILER_ID STREQUAL "GNU" AND CMAKE_CXX_COMPILER_VERSION VERSION_LESS 9.1) - target_link_libraries(nrniv_lib stdc++fs) +if(NRN_ENABLE_PYTHON AND NOT NRN_ENABLE_PYTHON_DYNAMIC) + target_include_directories(nrniv_lib PUBLIC "${NRN_DEFAULT_PYTHON_INCLUDES}") endif() if(NRN_ENABLE_THREADS) target_link_libraries(nrniv_lib Threads::Threads) @@ -417,7 +440,7 @@ if(NRN_ENABLE_MUSIC AND NOT NRN_ENABLE_MPI_DYNAMIC) endif() if(NRN_ENABLE_PROFILING) - target_link_libraries(nrniv_lib ${likwid_LIBRARIES} ${CALIPER_LIB}) + target_link_libraries(nrniv_lib ${likwid_LIBRARIES} ${CALIPER_LIB} ${LIKWID_LIB}) endif() set_property(TARGET nrniv_lib PROPERTY OUTPUT_NAME nrniv) @@ -433,7 +456,7 @@ endif() # Link with all libraries # ============================================================================= if(NRN_ENABLE_PYTHON AND NOT NRN_ENABLE_PYTHON_DYNAMIC) - target_link_libraries(nrniv_lib ${PYTHON_LIBRARIES}) + target_link_libraries(nrniv_lib ${NRN_DEFAULT_PYTHON_LIBRARIES}) endif() if(NRN_ENABLE_MPI) @@ -478,7 +501,7 @@ if(NRN_ENABLE_INTERVIEWS) include_directories(${IV_INCLUDE_DIR}) target_link_libraries(nrniv_lib interviews) else() - include_directories(nrniv_lib ${NRN_IVOS_SRC_DIR} ${PROJECT_BINARY_DIR}/src/ivos) + target_include_directories(nrniv_lib PUBLIC ${NRN_IVOS_SRC_DIR} ${PROJECT_BINARY_DIR}/src/ivos) endif() if(IV_ENABLE_X11_DYNAMIC) @@ -558,14 +581,14 @@ endif() # ============================================================================= # For testneuron CTest -file(COPY ${PROJECT_SOURCE_DIR}/src/ivoc/nrnmain.cpp DESTINATION ${CMAKE_BINARY_DIR}/share/nrn) -file(COPY ${PROJECT_BINARY_DIR}/src/oc/nrnmpiuse.h DESTINATION ${CMAKE_BINARY_DIR}/include) -file(COPY ${PROJECT_BINARY_DIR}/src/nrncvode/nrnneosm.h - DESTINATION ${CMAKE_BINARY_DIR}/include/nrncvode) -file(COPY ${PROJECT_BINARY_DIR}/nrnconf.h DESTINATION ${CMAKE_BINARY_DIR}/include) +cpp_cc_build_time_copy(INPUT ${PROJECT_SOURCE_DIR}/src/ivoc/nrnmain.cpp + OUTPUT ${CMAKE_BINARY_DIR}/share/nrn/nrnmain.cpp) +cpp_cc_build_time_copy(INPUT ${PROJECT_BINARY_DIR}/src/oc/nrnmpiuse.h + OUTPUT ${CMAKE_BINARY_DIR}/include/nrnmpiuse.h) +cpp_cc_build_time_copy(INPUT ${PROJECT_BINARY_DIR}/src/nrncvode/nrnneosm.h + OUTPUT ${CMAKE_BINARY_DIR}/include/nrncvode/nrnneosm.h) +cpp_cc_build_time_copy(INPUT ${PROJECT_BINARY_DIR}/nrnconf.h + OUTPUT ${CMAKE_BINARY_DIR}/include/nrnconf.h) # For the installation install(FILES ${PROJECT_SOURCE_DIR}/src/ivoc/nrnmain.cpp DESTINATION share/nrn) -install(FILES ${PROJECT_BINARY_DIR}/src/oc/nrnmpiuse.h DESTINATION include) -install(FILES ${PROJECT_BINARY_DIR}/src/nrncvode/nrnneosm.h DESTINATION include/nrncvode) -install(FILES ${PROJECT_BINARY_DIR}/nrnconf.h DESTINATION include) diff --git a/src/nrniv/arraypool.h b/src/nrniv/arraypool.h index 3ad2e33586..7b0051e29c 100644 --- a/src/nrniv/arraypool.h +++ b/src/nrniv/arraypool.h @@ -1,5 +1,5 @@ -#ifndef arraypool_h -#define arraypool_h +#pragma once +#include "oc_ansi.h" // nrn_cacheline_calloc // create and manage a vector of arrays as a memory pool of those arrays // the idea is to allow the possibility of some extra cache efficiency @@ -183,5 +183,3 @@ void ArrayPool::free_all() { assert(put_ == count_); put_ = 0; } - -#endif diff --git a/src/nrniv/bbsavestate.cpp b/src/nrniv/bbsavestate.cpp index b97ab3720e..7633c4a1b5 100644 --- a/src/nrniv/bbsavestate.cpp +++ b/src/nrniv/bbsavestate.cpp @@ -172,6 +172,7 @@ callback to bbss_early when needed. #include "ndatclas.h" #include "nrncvode.h" #include "nrnoc2iv.h" +#include "nrnran123.h" #include "ocfile.h" #include #include @@ -208,7 +209,6 @@ extern NetCvode* net_cvode_instance; extern TQueue* net_cvode_instance_event_queue(NrnThread*); extern cTemplate** nrn_pnt_template_; extern hoc_Item* net_cvode_instance_psl(); -extern PlayRecList* net_cvode_instance_prl(); extern void nrn_netcon_event(NetCon*, double); extern double t; typedef void (*PFIO)(int, Object*); @@ -313,6 +313,8 @@ class BBSS_Cnt: public BBSS_IO { virtual void i(int& j, int chk = 0) override; virtual void d(int n, double& p) override; virtual void d(int n, double* p) override; + virtual void d(int n, double** p) override; + virtual void d(int n, neuron::container::data_handle h) override; virtual void s(char* cp, int chk = 0) override; virtual Type type() override; int bytecnt(); @@ -338,6 +340,14 @@ void BBSS_Cnt::d(int n, double* p) { nd += n; ++nl; } +void BBSS_Cnt::d(int n, double**) { + nd += n; + ++nl; +} +void BBSS_Cnt::d(int n, neuron::container::data_handle) { + nd += n; + ++nl; +} void BBSS_Cnt::s(char* cp, int chk) { ns += strlen(cp) + 1; } @@ -361,6 +371,8 @@ class BBSS_TxtFileOut: public BBSS_IO { virtual void i(int& j, int chk = 0) override; virtual void d(int n, double& p) override; virtual void d(int n, double* p) override; + virtual void d(int n, double** p) override; + virtual void d(int n, neuron::container::data_handle h) override; virtual void s(char* cp, int chk = 0) override; virtual Type type() override; FILE* f; @@ -384,6 +396,17 @@ void BBSS_TxtFileOut::d(int n, double* p) { } fprintf(f, "\n"); } +void BBSS_TxtFileOut::d(int n, double** p) { + for (int i = 0; i < n; ++i) { + fprintf(f, " %22.15g", *p[i]); + } + fprintf(f, "\n"); +} +void BBSS_TxtFileOut::d(int n, neuron::container::data_handle h) { + assert(n == 1); // Cannot read n values "starting at" a data handle + assert(h); + fprintf(f, " %22.15g\n", *h); +} void BBSS_TxtFileOut::s(char* cp, int chk) { fprintf(f, "%s\n", cp); } @@ -400,6 +423,8 @@ class BBSS_TxtFileIn: public BBSS_IO { d(n, &p); } virtual void d(int n, double* p) override; + virtual void d(int n, double** p) override; + virtual void d(int n, neuron::container::data_handle h) override; virtual void s(char* cp, int chk = 0) override; virtual Type type() override { return BBSS_IO::IN; @@ -431,6 +456,19 @@ void BBSS_TxtFileIn::d(int n, double* p) { } nrn_assert(fscanf(f, "\n") == 0); } +void BBSS_TxtFileIn::d(int n, double** p) { + for (int i = 0; i < n; ++i) { + nrn_assert(fscanf(f, " %lf", p[i]) == 1); + } + nrn_assert(fscanf(f, "\n") == 0); +} +void BBSS_TxtFileIn::d(int n, neuron::container::data_handle h) { + assert(n == 1); + assert(h); + double v{}; + nrn_assert(fscanf(f, " %lf\n", &v) == 1); + *h = v; +} void BBSS_TxtFileIn::s(char* cp, int chk) { char buf[100]; nrn_assert(fscanf(f, "%[^\n]\n", buf) == 1); @@ -452,6 +490,8 @@ class BBSS_BufferOut: public BBSS_IO { virtual void i(int& j, int chk = 0) override; virtual void d(int n, double& p) override; virtual void d(int n, double* p) override; + virtual void d(int n, double** p) override; + virtual void d(int n, neuron::container::data_handle h) override; virtual void s(char* cp, int chk = 0) override; virtual Type type() override; virtual void a(int); @@ -475,6 +515,15 @@ void BBSS_BufferOut::d(int n, double& d) { void BBSS_BufferOut::d(int n, double* d) { cpy(n * sizeof(double), (char*) d); } +void BBSS_BufferOut::d(int n, double** d) { + for (auto i = 0; i < n; ++i) { + cpy(sizeof(double), reinterpret_cast(d[i])); + } +} +void BBSS_BufferOut::d(int n, neuron::container::data_handle h) { + assert(n == 1); + cpy(sizeof(double), reinterpret_cast(static_cast(h))); +} void BBSS_BufferOut::s(char* cp, int chk) { cpy(strlen(cp) + 1, cp); } @@ -966,28 +1015,26 @@ void BBSaveState_reg() { // from savstate.cpp struct StateStructInfo { - int offset; - int size; - Symbol* callback; + int offset{-1}; + int size{}; + Symbol* callback{nullptr}; }; static StateStructInfo* ssi; static cTemplate* nct; static void ssi_def() { + assert(!ssi); if (nct) { return; } Symbol* s = hoc_lookup("NetCon"); nct = s->u.ctemplate; - ssi = new StateStructInfo[n_memb_func]; + ssi = new StateStructInfo[n_memb_func]{}; int sav = v_structure_change; for (int im = 0; im < n_memb_func; ++im) { - ssi[im].offset = -1; - ssi[im].size = 0; - ssi[im].callback = 0; if (!memb_func[im].sym) { continue; } - NrnProperty* np = new NrnProperty(memb_func[im].sym->name); + NrnProperty np{memb_func[im].sym->name}; // generally we only save STATE variables. However for // models containing a NET_RECEIVE block, we also need to // save everything except the parameters @@ -998,14 +1045,16 @@ static void ssi_def() { // param array including PARAMETERs. if (pnt_receive[im]) { ssi[im].offset = 0; - ssi[im].size = np->prop()->param_size; + ssi[im].size = np.prop()->param_size(); // sum over array dims } else { - int type = STATE; - for (Symbol* sym = np->first_var(); np->more_var(); sym = np->next_var()) { - if (np->var_type(sym) == type || np->var_type(sym) == STATE || - sym->subtype == _AMBIGUOUS) { + for (Symbol* sym = np.first_var(); np.more_var(); sym = np.next_var()) { + if (np.var_type(sym) == STATE || sym->subtype == _AMBIGUOUS) { if (ssi[im].offset < 0) { - ssi[im].offset = np->prop_index(sym); + ssi[im].offset = np.prop_index(sym); + } else { + // assert what we assume: that after this code the variables we want are + // `size` contiguous legacy indices starting at `offset` + assert(ssi[im].offset + ssi[im].size == np.prop_index(sym)); } ssi[im].size += hoc_total_array_data(sym, 0); } @@ -1028,7 +1077,6 @@ static void ssi_def() { // printf("callback %s\n", ssi[im].callback->name); //} } - delete np; } // Following set to 1 when NrnProperty constructor calls prop_alloc. // so set back to original value. @@ -1928,7 +1976,7 @@ void BBSaveState::node(Node* nd) { } int i; Prop* p; - f->d(1, NODEV(nd)); + f->d(1, nd->v_handle()); // count // On restore, new point processes may have been inserted in // the section and marked IGNORE. So we need to count only the @@ -1967,7 +2015,7 @@ void BBSaveState::node01(Section* sec, Node* nd) { // It is not clear why the zero area node voltages need to be saved. // Without them, we get correct simulations after a restore for // whole cells but not for split cells. - f->d(1, NODEV(nd)); + f->d(1, nd->v_handle()); // count for (i = 0, p = nd->prop; p; p = p->next) { if (memb_func[p->_type].is_point) { @@ -2007,7 +2055,43 @@ void BBSaveState::mech(Prop* p) { char buf[100]; Sprintf(buf, "//%s", memb_func[type].sym->name); f->s(buf, 1); - f->d(ssi[p->_type].size, p->param + ssi[p->_type].offset); + { + auto const size = ssi[p->_type].size; // sum over array dimensions for range variables + auto& random_indices = nrn_mech_random_indices(p->_type); + auto size_random = random_indices.size(); + std::vector tmp{}; + tmp.reserve(size + size_random); + for (auto i = 0; i < size; ++i) { + tmp.push_back(static_cast(p->param_handle_legacy(ssi[p->_type].offset + i))); + } + + // read or write the RANDOM 34 sequence values by pointing last + // size_random tmp elements to seq34 double slots. + std::vector seq34(size_random, 0); + for (auto i = 0; i < size_random; ++i) { + tmp.push_back(static_cast(&seq34[i])); + } + // if writing, nrnran123_getseq into seq34 + if (f->type() == BBSS_IO::OUT) { // save + for (auto i = 0; i < size_random; ++i) { + uint32_t seq{}; + char which{}; + auto& datum = p->dparam[random_indices[i]]; + nrnran123_State* n123s = (nrnran123_State*) datum.get(); + nrnran123_getseq(n123s, &seq, &which); + seq34[i] = 4.0 * double(seq) + double(which); + } + } + f->d(size + size_random, tmp.data()); + // if reading, seq34 into nrnran123_setseq + if (f->type() == BBSS_IO::IN) { // restore + for (auto i = 0; i < size_random; ++i) { + auto& datum = p->dparam[random_indices[i]]; + nrnran123_State* n123s = (nrnran123_State*) datum.get(); + nrnran123_setseq(n123s, seq34[i]); + } + } + } Point_process* pp{}; if (memb_func[p->_type].is_point) { pp = p->dparam[1].get(); diff --git a/src/nrniv/bbsavestate.h b/src/nrniv/bbsavestate.h index ad16ead53e..1b537f534a 100644 --- a/src/nrniv/bbsavestate.h +++ b/src/nrniv/bbsavestate.h @@ -1,5 +1,6 @@ #ifndef bbsavestate_h #define bbsavestate_h +#include "neuron/container/data_handle.hpp" struct Object; struct Section; @@ -17,6 +18,8 @@ class BBSS_IO { virtual void i(int& j, int chk = 0) = 0; virtual void d(int n, double& p) = 0; virtual void d(int n, double* p) = 0; + virtual void d(int n, double** p) = 0; + virtual void d(int n, neuron::container::data_handle h) = 0; virtual void s(char* cp, int chk = 0) = 0; virtual Type type() = 0; virtual void skip(int) {} // only when reading diff --git a/src/nrniv/cachevec.cpp b/src/nrniv/cachevec.cpp deleted file mode 100644 index ca512482a1..0000000000 --- a/src/nrniv/cachevec.cpp +++ /dev/null @@ -1,118 +0,0 @@ -#include <../../nrnconf.h> - -// some functions needed by CACHEVEC. mostly to steer to the various update_ptrs -// methods. - -#if HAVE_IV -#include -#include -#include -#include -#endif - -#include -#include -#include -#include -#include -#include -#include -#include -#include -#include "treeset.h" - -void nrniv_recalc_ptrs(); -extern NetCvode* net_cvode_instance; - -extern void nrn_linmod_update_ptrs(void*); - -static Symbol* grsym_; -static Symbol* ptrsym_; -static Symbol* lmsym_; -static Symbol* pshpsym_; -static Symbol* ptrvecsym_; - -void nrniv_recalc_ptrs() { - // PlayRecord and PreSyn pointers - net_cvode_instance->recalc_ptrs(); - hoc_List* hl; - hoc_Item* q; -#if HAVE_IV - // update pointers used by Graph - if (!grsym_) { - grsym_ = hoc_lookup("Graph"); - assert(grsym_->type == TEMPLATE); - } - hl = grsym_->u.ctemplate->olist; - ITERATE(q, hl) { - Object* obj = OBJ(q); - Graph* g = (Graph*) obj->u.this_pointer; - if (g) { - g->update_ptrs(); - } - } - // update pointers used by PlotShape - if (!pshpsym_) { - pshpsym_ = hoc_lookup("PlotShape"); - assert(pshpsym_->type == TEMPLATE); - } - hl = pshpsym_->u.ctemplate->olist; - ITERATE(q, hl) { - Object* obj = OBJ(q); - ShapePlot* ps = (ShapePlot*) obj->u.this_pointer; - if (ps) { - ps->update_ptrs(); - } - } - // update pointers used by xpanel - HocPanel::update_ptrs(); -#endif - // update pointers used by Pointer - if (!ptrsym_) { - ptrsym_ = hoc_lookup("Pointer"); - assert(ptrsym_->type == TEMPLATE); - } - hl = ptrsym_->u.ctemplate->olist; - ITERATE(q, hl) { - Object* obj = OBJ(q); - OcPointer* op = (OcPointer*) obj->u.this_pointer; - if (op && op->p_) { - double* pd = nrn_recalc_ptr(op->p_); - if (op->p_ != pd) { - nrn_notify_pointer_disconnect(op); - op->p_ = pd; - op->valid_ = true; - nrn_notify_when_double_freed(op->p_, op); - } - } - } - // update what LinearMechanisms are observing - if (!lmsym_) { - lmsym_ = hoc_lookup("LinearMechanism"); - assert(lmsym_->type == TEMPLATE); - } - hl = lmsym_->u.ctemplate->olist; - ITERATE(q, hl) { - Object* obj = OBJ(q); - void* pt = (void*) obj->u.this_pointer; - if (pt) { - nrn_linmod_update_ptrs(pt); - } - } -} - -void nrn_recalc_ptrvector() { - // update pointers used by PtrVector - hoc_List* hl; - hoc_Item* q; - if (!ptrvecsym_) { - ptrvecsym_ = hoc_lookup("PtrVector"); - assert(ptrvecsym_->type == TEMPLATE); - } - hl = ptrvecsym_->u.ctemplate->olist; - ITERATE(q, hl) { - Object* obj = OBJ(q); - OcPtrVector* op = (OcPtrVector*) obj->u.this_pointer; - op->ptr_update(); - } -} diff --git a/src/nrniv/cxprop.cpp b/src/nrniv/cxprop.cpp index fdc7356b18..08921d655b 100644 --- a/src/nrniv/cxprop.cpp +++ b/src/nrniv/cxprop.cpp @@ -1,123 +1,100 @@ -#include <../../nrnconf.h> +#include "arraypool.h" // ArrayPool +#include "hocdec.h" // Datum +#include "section.h" // Section +#include "structpool.h" // Pool +#include "../neuron/model_data.hpp" -/* -allocate and free property data and Datum arrays for nrniv -this allows for the possibility of -greater cache efficiency -*/ +#include +#include -#include -#include -#include -#include "nrniv_mf.h" -#include -#include -#include -#include -#include -#include +// allocate and free Datum arrays for nrniv this allows for the possibility of +// greater cache efficiency -extern int nrn_is_ion(int); -#if EXTRACELLULAR -extern void nrn_extcell_update_param(); -#endif -extern void nrn_recalc_ptrs(double* (*) (double*) ); - -static constexpr auto APSIZE = 1000; using CharArrayPool = ArrayPool; -using DoubleArrayPool = ArrayPool; using DatumArrayPool = ArrayPool; using SectionPool = Pool
; -static int npools_; -static DoubleArrayPool** dblpools_; -static DatumArrayPool** datumpools_; static SectionPool* secpool_; - -void nrn_delete_prop_pool(int type) { - assert(type < npools_); - if (dblpools_[type]) { - if (dblpools_[type]->nget() > 0) { - hoc_execerror(memb_func[type].sym->name, "prop pool in use"); - } - delete dblpools_[type]; - dblpools_[type] = NULL; - } +// the advantage of this w.r.t `static std::vector<...> datumpools_;` is that the vector destructor +// is not called at application shutdown, see e.g. discussion in +// https://google.github.io/styleguide/cppguide.html#Static_and_Global_Variables around global +// variables that are not trivially destructible +static auto& datumpools() { + static auto& x = *new std::vector>{}; + return x; } void nrn_mk_prop_pools(int n) { - if (n <= npools_) { - return; - } - DoubleArrayPool** p1 = new DoubleArrayPool*[n]; - DatumArrayPool** p2 = new DatumArrayPool*[n]; - for (int i = 0; i < n; ++i) { - p1[i] = 0; - p2[i] = 0; - } - if (dblpools_) { - for (int i = 0; i < npools_; ++i) { - p1[i] = dblpools_[i]; - p2[i] = datumpools_[i]; - } - delete[] dblpools_; - delete[] datumpools_; + if (n > datumpools().size()) { + datumpools().resize(n); } - dblpools_ = p1; - datumpools_ = p2; - npools_ = n; -} - -double* nrn_prop_data_alloc(int type, int count, Prop* p) { - if (!dblpools_[type]) { - dblpools_[type] = new DoubleArrayPool(APSIZE, count); - } - assert(dblpools_[type]->d2() == count); - p->_alloc_seq = dblpools_[type]->ntget(); - double* pd = dblpools_[type]->alloc(); - // if (type > 1) printf("nrn_prop_data_alloc %d %s %d %p\n", type, memb_func[type].sym->name, - // count, pd); - return pd; } Datum* nrn_prop_datum_alloc(int type, int count, Prop* p) { - if (!datumpools_[type]) { - datumpools_[type] = new DatumArrayPool(APSIZE, count); + if (!datumpools()[type]) { + datumpools()[type] = std::make_unique(1000, count); } - assert(datumpools_[type]->d2() == count); - p->_alloc_seq = datumpools_[type]->ntget(); - auto* ppd = datumpools_[type]->alloc(); // allocates storage for the datums - // if (type > 1) printf("nrn_prop_datum_alloc %d %s %d %p\n", type, memb_func[type].sym->name, - // count, ppd); + assert(datumpools()[type]->d2() == count); + p->_alloc_seq = datumpools()[type]->ntget(); + auto* const ppd = datumpools()[type]->alloc(); // allocates storage for the datums for (int i = 0; i < count; ++i) { - ppd[i] = nullptr; + // Call the Datum constructor + new (ppd + i) Datum(); } return ppd; } -void nrn_prop_data_free(int type, double* pd) { - // if (type > 1) printf("nrn_prop_data_free %d %s %p\n", type, memb_func[type].sym->name, pd); - if (pd) { - dblpools_[type]->hpfree(pd); +int nrn_mechanism_prop_datum_count(int type) { + if (type >= datumpools().size() || datumpools()[type] == nullptr) { + return 0; } + return datumpools()[type]->d2(); } void nrn_prop_datum_free(int type, Datum* ppd) { - // if (type > 1) printf("nrn_prop_datum_free %d %s %p\n", type, memb_func[type].sym->name, ppd); + // if (type > 1) printf("nrn_prop_datum_free %d %s %p\n", type, + // memb_func[type].sym->name, ppd); if (ppd) { - datumpools_[type]->hpfree(ppd); + auto* const datumpool = datumpools()[type].get(); + assert(datumpool); + // Destruct the Datums + auto const count = datumpool->d2(); + for (auto i = 0; i < count; ++i) { + ppd[i].~Datum(); + } + // Deallocate them + datumpool->hpfree(ppd); } } +void nrn_delete_mechanism_prop_datum(int type) { + if (type >= datumpools().size() || datumpools()[type] == nullptr) { + return; + } + if (auto const size = datumpools()[type]->nget(); size != 0) { + hoc_execerr_ext( + "nrn_delete_mechanism_prop_datum(%d):" + " refusing to delete storage that still hosts" + " %ld instances", + type, + size); + } + datumpools()[type] = nullptr; +} + Section* nrn_section_alloc() { if (!secpool_) { secpool_ = new SectionPool(1000); } - Section* s = secpool_->alloc(); - return s; + auto* const sec = secpool_->alloc(); + // Call the Section constructor + new (sec) Section(); + return sec; } void nrn_section_free(Section* s) { + // Call the Section destructor + s->~Section(); secpool_->hpfree(s); } @@ -128,290 +105,22 @@ int nrn_is_valid_section_ptr(void* v) { return secpool_->is_valid_ptr(v); } -int nrn_prop_is_cache_efficient() { - DoubleArrayPool** p = new DoubleArrayPool*[npools_]; - int r = 1; - for (int i = 0; i < npools_; ++i) { - p[i] = dblpools_[i]; - } - for (int it = 0; it < nrn_nthread; ++it) { - NrnThread* nt = nrn_threads + it; - for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) { - Memb_list* ml = tml->ml; - int i = tml->index; - if (ml->nodecount > 0) { - if (!p[i]) { - // printf("thread %d mechanism %s pool chain does not exist\n", it, - // memb_func[i].sym->name); - r = 0; - continue; - } - if (p[i]->chain_size() != ml->nodecount) { - // printf("thread %d mechanism %s chain_size %d nodecount=%d\n", it, - // memb_func[i].sym->name, p[i]->chain_size(), ml->nodecount); - r = 0; - p[i] = p[i]->chain(); - continue; - } - for (int j = 0; j < ml->nodecount; ++j) { - if (p[i]->element(j) != ml->_data[j]) { - // printf("thread %d mechanism %s instance %d element %p data %p\n", - // it, memb_func[i].sym->name, j, p[i]->element(j), (ml->_data + j)); - r = 0; - } - } - p[i] = p[i]->chain(); - } - } - } - delete[] p; - return r; -} - -// in-place data reallocation only intended to work when only ion data has -// pointers to it and no one uses pointers to other prop data. If this does -// not hold, then segmentation violations are likely to occur. -// Note that the tml list is already ordered properly so ions are first. -// We can therefore call the mechanism pdata_update function (which normally -// contains pointers to ion variables) and the ion variables will exist -// in their new proper locations. - -// we do one type (for all threads) at a time. Sadly, we have to keep the -// original pool in existence til the new pool is complete. And we have to -// keep old ion pools til the end. - -static DoubleArrayPool** oldpools_; // only here to allow assertion checking - -// extending to gui pointers and other well-known things that hold pointers -// the idea is that there are not *that* many. -static int recalc_index_; // the one we are working on -static double* recalc_ptr(double* old) { - // is old in the op pool? - for (DoubleArrayPool* op = oldpools_[recalc_index_]; op; op = op->chain()) { - long ds = op->chain_size() * op->d2(); - if (old >= op->pool() && old < (op->pool() + ds)) { - // if so then the value gives us the pointer in the new pool - long offset = old - op->pool(); - offset %= op->d2(); - DoubleArrayPool* np = dblpools_[recalc_index_]; - long i = (long) (*old); - // if (i < 0 || i >= np->size()) abort(); - assert(i >= 0 && i < np->size()); - double* n = np->items()[i] + offset; - // printf("recalc_ptr old=%p new=%p value=%g\n", old, n, *n); - return n; - } - } - return old; -} - -static hoc_List* mechstanlist_; - -static int in_place_data_realloc() { - int status = 0; - NrnThread* nt; - // what types are in use - int* types = new int[n_memb_func]; - oldpools_ = new DoubleArrayPool*[n_memb_func]; - for (int i = 0; i < n_memb_func; ++i) { - types[i] = 0; - oldpools_[i] = dblpools_[i]; - } - FOR_THREADS(nt) { - for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) { - ++types[tml->index]; - } - } - // iterate over those types - for (int i = 0; i < n_memb_func; ++i) - if (types[i]) { - int ision = nrn_is_ion(i); - DoubleArrayPool* oldpool = dblpools_[i]; - DoubleArrayPool* newpool = 0; - // create the pool with proper chain sizes - int ml_total_count = 0; // so we can know if we get them all - FOR_THREADS(nt) { - for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) - if (i == tml->index) { - ml_total_count += tml->ml->nodecount; - if (!newpool) { - newpool = new DoubleArrayPool(tml->ml->nodecount, oldpool->d2()); - } else { - newpool->grow(tml->ml->nodecount); - } - break; - } - } - // Any extras? Put them in a new last chain. - // actually ml_total_count cannot be 0. - int extra = oldpool->nget() - ml_total_count; - assert(extra >= 0 && newpool); - if (extra) { - newpool->grow(extra); - } - newpool->free_all(); // items in pool data order - // reset ml->_data pointers to the new pool and copy the values - FOR_THREADS(nt) { - for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) - if (i == tml->index) { - Memb_list* ml = tml->ml; - for (int j = 0; j < ml->nodecount; ++j) { - double* data = ml->_data[j]; - int ntget = newpool->ntget(); - ml->_data[j] = newpool->alloc(); - for (int k = 0; k < newpool->d2(); ++k) { - ml->_data[j][k] = data[k]; - } - // store in old location enough info - // to construct a pointer to the new location - for (int k = 0; k < newpool->d2(); ++k) { - data[k] = double(ntget); - } - } - // update any Datum pointers to ion variable locations - void (*s)(Datum*) = memb_func[i]._update_ion_pointers; - if (s) - for (int j = 0; j < ml->nodecount; ++j) { - (*s)(ml->pdata[j]); - } - } - } - // the newpool items are in the correct order for thread cache - // efficiency. But there may be other old items, - // e.g MechanismStandard that are not part of the memb lists. - // We made the chain item for them above. Now, sadly, we - // need to iterate over the MechanismStandards to find the - // old pool data. - if (extra) { - // should assert that newpool last chain is empty. - if (!mechstanlist_) { - Symbol* s = hoc_lookup("MechanismStandard"); - mechstanlist_ = s->u.ctemplate->olist; - } - hoc_Item* q; - int found = 0; - int looked_at = 0; - ITERATE(q, mechstanlist_) { - ++looked_at; - MechanismStandard* ms = (MechanismStandard*) (OBJ(q)->u.this_pointer); - NrnProperty* np = ms->np(); - if (np->type() == i && np->deleteable()) { - Prop* p = np->prop(); - double* data = p->param; - int ntget = newpool->ntget(); - p->param = newpool->alloc(); - for (int k = 0; k < newpool->d2(); ++k) { - p->param[k] = data[k]; - } - // store in old location enough info - // to construct a pointer to the new location - for (int k = 0; k < newpool->d2(); ++k) { - data[k] = double(ntget); - } - ++found; - } - } - // unless we missed something that holds a prop pointer. - // printf("%d extra %s, looked at %d, found %d\n", extra, - // memb_func[i].sym->name, looked_at, found); - assert(extra == found); - } - - dblpools_[i] = newpool; - // let the gui and other things update - recalc_index_ = i; - nrn_recalc_ptrs(recalc_ptr); - - if (0 && !ision) { - delete oldpool; - assert(oldpool == oldpools_[i]); - oldpools_[i] = 0; - } - } - // update p->param for the node properties - Memb_list** mlmap = new Memb_list*[n_memb_func]; - FOR_THREADS(nt) { - // make a map - for (int i = 0; i < n_memb_func; ++i) { - mlmap[i] = 0; - } - for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) { - mlmap[tml->index] = tml->ml; - tml->ml->nodecount = 0; - } - // fill - for (int i = 0; i < nt->end; ++i) { - Node* nd = nt->_v_node[i]; - for (Prop* p = nd->prop; p; p = p->next) { - if (memb_func[p->_type].current || memb_func[p->_type].state || - memb_func[p->_type].has_initialize()) { - Memb_list* ml = mlmap[p->_type]; - assert(ml->nodelist[ml->nodecount] == nd); - if (!memb_func[p->_type].hoc_mech) { - p->param = ml->_data[ml->nodecount]; - } - ++ml->nodecount; - } - } - } - } - // one more thing to do for extracellular -#if EXTRACELLULAR - nrn_extcell_update_param(); -#endif - // finally get rid of the old ion pools - for (int i = 0; i < n_memb_func; ++i) - if (types[i] && oldpools_[i]) { - delete oldpools_[i]; - } - delete[] oldpools_; - delete[] types; - delete[] mlmap; - // if useful, we could now realloc the Datum pools and just make - // bit copies of the Datum values. - return status; -} - -void nrn_update_ion_pointer(Symbol* sion, Datum* dp, int id, int ip) { - int iontype = sion->subtype; - DoubleArrayPool* np = dblpools_[iontype]; - DoubleArrayPool* op = oldpools_[iontype]; - assert(np); - assert(op); - assert(ip < op->d2()); - assert(1); // should point into pool() for one of the op pool chains - // and the index should be a pointer to the double in np - long i = *dp[id].get(); - assert(i >= 0 && i < np->size()); - double* pvar = np->items()[i]; - dp[id] = pvar + ip; -} - - void nrn_poolshrink(int shrink) { if (shrink) { - for (int i = 0; i < npools_; ++i) { - auto& pdbl = dblpools_[i]; - auto& pdatum = datumpools_[i]; - if ((pdbl && pdbl->nget() == 0)) { - nrn_delete_prop_pool(i); - } + for (auto& pdatum: datumpools()) { if (pdatum && pdatum->nget() == 0) { - delete datumpools_[i]; - datumpools_[i] = NULL; + pdatum.reset(); } } + neuron::model().shrink_to_fit(); } else { Printf("poolshrink --- type name (dbluse, size) (datumuse, size)\n"); - for (int i = 0; i < npools_; ++i) { - auto& pdbl = dblpools_[i]; - auto& pdatum = datumpools_[i]; - if (pdbl || pdatum) { - Printf("%d %s (%ld, %d) (%ld, %d)\n", + for (auto i = 0; i < datumpools().size(); ++i) { + auto const& pdatum = datumpools()[i]; + if (pdatum) { + Printf("%d %s (%ld, %d)\n", i, (memb_func[i].sym ? memb_func[i].sym->name : "noname"), - (pdbl ? pdbl->nget() : 0), - (pdbl ? pdbl->size() : 0), (pdatum ? pdatum->nget() : 0), (pdatum ? pdatum->size() : 0)); } @@ -419,12 +128,6 @@ void nrn_poolshrink(int shrink) { } } -void nrn_cache_prop_realloc() { - if (!nrn_prop_is_cache_efficient()) { - in_place_data_realloc(); - } -} - // for avoiding interthread cache line sharing // each thread needs its own pool instance void* nrn_pool_create(long count, int itemsize) { diff --git a/src/nrniv/finithnd.cpp b/src/nrniv/finithnd.cpp index 7ba8acd5c9..0bf58a2e32 100644 --- a/src/nrniv/finithnd.cpp +++ b/src/nrniv/finithnd.cpp @@ -15,10 +15,10 @@ Type 3 are at the very beginning of finitialize. ie structure changes #include #include -#include #include #include #include +#include "utils/enumerate.h" class FInitialHandler { public: @@ -106,10 +106,5 @@ FInitialHandler::FInitialHandler(int i, const char* s, Object* obj, Object* pyac FInitialHandler::~FInitialHandler() { delete stmt_; - for (auto it = fihlist_[type_].begin(); it != fihlist_[type_].end(); ++it) { - if ((*it) == this) { - fihlist_[type_].erase(it); - return; - } - } + erase_first(fihlist_[type_], this); } diff --git a/src/nrniv/geometry3d.cpp b/src/nrniv/geometry3d.cpp deleted file mode 100644 index 4889131070..0000000000 --- a/src/nrniv/geometry3d.cpp +++ /dev/null @@ -1,826 +0,0 @@ -/* - This file contains code adapted from p3d.py in - http://code.google.com/p/pythonisosurfaces/source/checkout - which was released under the new BSD license. - accessed 31 July 2012 -*/ - -#include -#include -#include -#include -#include -//#include - -int geometry3d_find_triangles(double value0, - double value1, - double value2, - double value3, - double value4, - double value5, - double value6, - double value7, - double x0, - double x1, - double y0, - double y1, - double z0, - double z1, - double* out, - int offset); - -double geometry3d_llgramarea(double* p0, double* p1, double* p2); -double geometry3d_sum_area_of_triangles(double* tri_vec, int len); - -const int edgeTable[] = { - 0x0, 0x109, 0x203, 0x30a, 0x406, 0x50f, 0x605, 0x70c, 0x80c, 0x905, 0xa0f, 0xb06, 0xc0a, - 0xd03, 0xe09, 0xf00, 0x190, 0x99, 0x393, 0x29a, 0x596, 0x49f, 0x795, 0x69c, 0x99c, 0x895, - 0xb9f, 0xa96, 0xd9a, 0xc93, 0xf99, 0xe90, 0x230, 0x339, 0x33, 0x13a, 0x636, 0x73f, 0x435, - 0x53c, 0xa3c, 0xb35, 0x83f, 0x936, 0xe3a, 0xf33, 0xc39, 0xd30, 0x3a0, 0x2a9, 0x1a3, 0xaa, - 0x7a6, 0x6af, 0x5a5, 0x4ac, 0xbac, 0xaa5, 0x9af, 0x8a6, 0xfaa, 0xea3, 0xda9, 0xca0, 0x460, - 0x569, 0x663, 0x76a, 0x66, 0x16f, 0x265, 0x36c, 0xc6c, 0xd65, 0xe6f, 0xf66, 0x86a, 0x963, - 0xa69, 0xb60, 0x5f0, 0x4f9, 0x7f3, 0x6fa, 0x1f6, 0xff, 0x3f5, 0x2fc, 0xdfc, 0xcf5, 0xfff, - 0xef6, 0x9fa, 0x8f3, 0xbf9, 0xaf0, 0x650, 0x759, 0x453, 0x55a, 0x256, 0x35f, 0x55, 0x15c, - 0xe5c, 0xf55, 0xc5f, 0xd56, 0xa5a, 0xb53, 0x859, 0x950, 0x7c0, 0x6c9, 0x5c3, 0x4ca, 0x3c6, - 0x2cf, 0x1c5, 0xcc, 0xfcc, 0xec5, 0xdcf, 0xcc6, 0xbca, 0xac3, 0x9c9, 0x8c0, 0x8c0, 0x9c9, - 0xac3, 0xbca, 0xcc6, 0xdcf, 0xec5, 0xfcc, 0xcc, 0x1c5, 0x2cf, 0x3c6, 0x4ca, 0x5c3, 0x6c9, - 0x7c0, 0x950, 0x859, 0xb53, 0xa5a, 0xd56, 0xc5f, 0xf55, 0xe5c, 0x15c, 0x55, 0x35f, 0x256, - 0x55a, 0x453, 0x759, 0x650, 0xaf0, 0xbf9, 0x8f3, 0x9fa, 0xef6, 0xfff, 0xcf5, 0xdfc, 0x2fc, - 0x3f5, 0xff, 0x1f6, 0x6fa, 0x7f3, 0x4f9, 0x5f0, 0xb60, 0xa69, 0x963, 0x86a, 0xf66, 0xe6f, - 0xd65, 0xc6c, 0x36c, 0x265, 0x16f, 0x66, 0x76a, 0x663, 0x569, 0x460, 0xca0, 0xda9, 0xea3, - 0xfaa, 0x8a6, 0x9af, 0xaa5, 0xbac, 0x4ac, 0x5a5, 0x6af, 0x7a6, 0xaa, 0x1a3, 0x2a9, 0x3a0, - 0xd30, 0xc39, 0xf33, 0xe3a, 0x936, 0x83f, 0xb35, 0xa3c, 0x53c, 0x435, 0x73f, 0x636, 0x13a, - 0x33, 0x339, 0x230, 0xe90, 0xf99, 0xc93, 0xd9a, 0xa96, 0xb9f, 0x895, 0x99c, 0x69c, 0x795, - 0x49f, 0x596, 0x29a, 0x393, 0x99, 0x190, 0xf00, 0xe09, 0xd03, 0xc0a, 0xb06, 0xa0f, 0x905, - 0x80c, 0x70c, 0x605, 0x50f, 0x406, 0x30a, 0x203, 0x109, 0x0}; - -/* CTNG:tritable */ -const int triTable[256][16] = {{-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 1, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 8, 3, 9, 8, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 2, 10, 0, 2, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {2, 8, 3, 2, 10, 8, 10, 9, 8, -1, -1, -1, -1, -1, -1, -1}, - {3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 11, 2, 8, 11, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 9, 0, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 11, 2, 1, 9, 11, 9, 8, 11, -1, -1, -1, -1, -1, -1, -1}, - {3, 10, 1, 11, 10, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 10, 1, 0, 8, 10, 8, 11, 10, -1, -1, -1, -1, -1, -1, -1}, - {3, 9, 0, 3, 11, 9, 11, 10, 9, -1, -1, -1, -1, -1, -1, -1}, - {9, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 3, 0, 7, 3, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 1, 9, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 1, 9, 4, 7, 1, 7, 3, 1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 4, 7, 3, 0, 4, 1, 2, 10, -1, -1, -1, -1, -1, -1, -1}, - {9, 2, 10, 9, 0, 2, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1}, - {2, 10, 9, 2, 9, 7, 2, 7, 3, 7, 9, 4, -1, -1, -1, -1}, - {8, 4, 7, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {11, 4, 7, 11, 2, 4, 2, 0, 4, -1, -1, -1, -1, -1, -1, -1}, - {9, 0, 1, 8, 4, 7, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1}, - {4, 7, 11, 9, 4, 11, 9, 11, 2, 9, 2, 1, -1, -1, -1, -1}, - {3, 10, 1, 3, 11, 10, 7, 8, 4, -1, -1, -1, -1, -1, -1, -1}, - {1, 11, 10, 1, 4, 11, 1, 0, 4, 7, 11, 4, -1, -1, -1, -1}, - {4, 7, 8, 9, 0, 11, 9, 11, 10, 11, 0, 3, -1, -1, -1, -1}, - {4, 7, 11, 4, 11, 9, 9, 11, 10, -1, -1, -1, -1, -1, -1, -1}, - {9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 5, 4, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 5, 4, 1, 5, 0, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {8, 5, 4, 8, 3, 5, 3, 1, 5, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 0, 8, 1, 2, 10, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1}, - {5, 2, 10, 5, 4, 2, 4, 0, 2, -1, -1, -1, -1, -1, -1, -1}, - {2, 10, 5, 3, 2, 5, 3, 5, 4, 3, 4, 8, -1, -1, -1, -1}, - {9, 5, 4, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 11, 2, 0, 8, 11, 4, 9, 5, -1, -1, -1, -1, -1, -1, -1}, - {0, 5, 4, 0, 1, 5, 2, 3, 11, -1, -1, -1, -1, -1, -1, -1}, - {2, 1, 5, 2, 5, 8, 2, 8, 11, 4, 8, 5, -1, -1, -1, -1}, - {10, 3, 11, 10, 1, 3, 9, 5, 4, -1, -1, -1, -1, -1, -1, -1}, - {4, 9, 5, 0, 8, 1, 8, 10, 1, 8, 11, 10, -1, -1, -1, -1}, - {5, 4, 0, 5, 0, 11, 5, 11, 10, 11, 0, 3, -1, -1, -1, -1}, - {5, 4, 8, 5, 8, 10, 10, 8, 11, -1, -1, -1, -1, -1, -1, -1}, - {9, 7, 8, 5, 7, 9, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 3, 0, 9, 5, 3, 5, 7, 3, -1, -1, -1, -1, -1, -1, -1}, - {0, 7, 8, 0, 1, 7, 1, 5, 7, -1, -1, -1, -1, -1, -1, -1}, - {1, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 7, 8, 9, 5, 7, 10, 1, 2, -1, -1, -1, -1, -1, -1, -1}, - {10, 1, 2, 9, 5, 0, 5, 3, 0, 5, 7, 3, -1, -1, -1, -1}, - {8, 0, 2, 8, 2, 5, 8, 5, 7, 10, 5, 2, -1, -1, -1, -1}, - {2, 10, 5, 2, 5, 3, 3, 5, 7, -1, -1, -1, -1, -1, -1, -1}, - {7, 9, 5, 7, 8, 9, 3, 11, 2, -1, -1, -1, -1, -1, -1, -1}, - {9, 5, 7, 9, 7, 2, 9, 2, 0, 2, 7, 11, -1, -1, -1, -1}, - {2, 3, 11, 0, 1, 8, 1, 7, 8, 1, 5, 7, -1, -1, -1, -1}, - {11, 2, 1, 11, 1, 7, 7, 1, 5, -1, -1, -1, -1, -1, -1, -1}, - {9, 5, 8, 8, 5, 7, 10, 1, 3, 10, 3, 11, -1, -1, -1, -1}, - {5, 7, 0, 5, 0, 9, 7, 11, 0, 1, 0, 10, 11, 10, 0, -1}, - {11, 10, 0, 11, 0, 3, 10, 5, 0, 8, 0, 7, 5, 7, 0, -1}, - {11, 10, 5, 7, 11, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 0, 1, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 8, 3, 1, 9, 8, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1}, - {1, 6, 5, 2, 6, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 6, 5, 1, 2, 6, 3, 0, 8, -1, -1, -1, -1, -1, -1, -1}, - {9, 6, 5, 9, 0, 6, 0, 2, 6, -1, -1, -1, -1, -1, -1, -1}, - {5, 9, 8, 5, 8, 2, 5, 2, 6, 3, 2, 8, -1, -1, -1, -1}, - {2, 3, 11, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {11, 0, 8, 11, 2, 0, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1}, - {0, 1, 9, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1, -1, -1, -1}, - {5, 10, 6, 1, 9, 2, 9, 11, 2, 9, 8, 11, -1, -1, -1, -1}, - {6, 3, 11, 6, 5, 3, 5, 1, 3, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 11, 0, 11, 5, 0, 5, 1, 5, 11, 6, -1, -1, -1, -1}, - {3, 11, 6, 0, 3, 6, 0, 6, 5, 0, 5, 9, -1, -1, -1, -1}, - {6, 5, 9, 6, 9, 11, 11, 9, 8, -1, -1, -1, -1, -1, -1, -1}, - {5, 10, 6, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 3, 0, 4, 7, 3, 6, 5, 10, -1, -1, -1, -1, -1, -1, -1}, - {1, 9, 0, 5, 10, 6, 8, 4, 7, -1, -1, -1, -1, -1, -1, -1}, - {10, 6, 5, 1, 9, 7, 1, 7, 3, 7, 9, 4, -1, -1, -1, -1}, - {6, 1, 2, 6, 5, 1, 4, 7, 8, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 5, 5, 2, 6, 3, 0, 4, 3, 4, 7, -1, -1, -1, -1}, - {8, 4, 7, 9, 0, 5, 0, 6, 5, 0, 2, 6, -1, -1, -1, -1}, - {7, 3, 9, 7, 9, 4, 3, 2, 9, 5, 9, 6, 2, 6, 9, -1}, - {3, 11, 2, 7, 8, 4, 10, 6, 5, -1, -1, -1, -1, -1, -1, -1}, - {5, 10, 6, 4, 7, 2, 4, 2, 0, 2, 7, 11, -1, -1, -1, -1}, - {0, 1, 9, 4, 7, 8, 2, 3, 11, 5, 10, 6, -1, -1, -1, -1}, - {9, 2, 1, 9, 11, 2, 9, 4, 11, 7, 11, 4, 5, 10, 6, -1}, - {8, 4, 7, 3, 11, 5, 3, 5, 1, 5, 11, 6, -1, -1, -1, -1}, - {5, 1, 11, 5, 11, 6, 1, 0, 11, 7, 11, 4, 0, 4, 11, -1}, - {0, 5, 9, 0, 6, 5, 0, 3, 6, 11, 6, 3, 8, 4, 7, -1}, - {6, 5, 9, 6, 9, 11, 4, 7, 9, 7, 11, 9, -1, -1, -1, -1}, - {10, 4, 9, 6, 4, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 10, 6, 4, 9, 10, 0, 8, 3, -1, -1, -1, -1, -1, -1, -1}, - {10, 0, 1, 10, 6, 0, 6, 4, 0, -1, -1, -1, -1, -1, -1, -1}, - {8, 3, 1, 8, 1, 6, 8, 6, 4, 6, 1, 10, -1, -1, -1, -1}, - {1, 4, 9, 1, 2, 4, 2, 6, 4, -1, -1, -1, -1, -1, -1, -1}, - {3, 0, 8, 1, 2, 9, 2, 4, 9, 2, 6, 4, -1, -1, -1, -1}, - {0, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {8, 3, 2, 8, 2, 4, 4, 2, 6, -1, -1, -1, -1, -1, -1, -1}, - {10, 4, 9, 10, 6, 4, 11, 2, 3, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 2, 2, 8, 11, 4, 9, 10, 4, 10, 6, -1, -1, -1, -1}, - {3, 11, 2, 0, 1, 6, 0, 6, 4, 6, 1, 10, -1, -1, -1, -1}, - {6, 4, 1, 6, 1, 10, 4, 8, 1, 2, 1, 11, 8, 11, 1, -1}, - {9, 6, 4, 9, 3, 6, 9, 1, 3, 11, 6, 3, -1, -1, -1, -1}, - {8, 11, 1, 8, 1, 0, 11, 6, 1, 9, 1, 4, 6, 4, 1, -1}, - {3, 11, 6, 3, 6, 0, 0, 6, 4, -1, -1, -1, -1, -1, -1, -1}, - {6, 4, 8, 11, 6, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {7, 10, 6, 7, 8, 10, 8, 9, 10, -1, -1, -1, -1, -1, -1, -1}, - {0, 7, 3, 0, 10, 7, 0, 9, 10, 6, 7, 10, -1, -1, -1, -1}, - {10, 6, 7, 1, 10, 7, 1, 7, 8, 1, 8, 0, -1, -1, -1, -1}, - {10, 6, 7, 10, 7, 1, 1, 7, 3, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 6, 1, 6, 8, 1, 8, 9, 8, 6, 7, -1, -1, -1, -1}, - {2, 6, 9, 2, 9, 1, 6, 7, 9, 0, 9, 3, 7, 3, 9, -1}, - {7, 8, 0, 7, 0, 6, 6, 0, 2, -1, -1, -1, -1, -1, -1, -1}, - {7, 3, 2, 6, 7, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {2, 3, 11, 10, 6, 8, 10, 8, 9, 8, 6, 7, -1, -1, -1, -1}, - {2, 0, 7, 2, 7, 11, 0, 9, 7, 6, 7, 10, 9, 10, 7, -1}, - {1, 8, 0, 1, 7, 8, 1, 10, 7, 6, 7, 10, 2, 3, 11, -1}, - {11, 2, 1, 11, 1, 7, 10, 6, 1, 6, 7, 1, -1, -1, -1, -1}, - {8, 9, 6, 8, 6, 7, 9, 1, 6, 11, 6, 3, 1, 3, 6, -1}, - {0, 9, 1, 11, 6, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {7, 8, 0, 7, 0, 6, 3, 11, 0, 11, 6, 0, -1, -1, -1, -1}, - {7, 11, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 0, 8, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 1, 9, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {8, 1, 9, 8, 3, 1, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1}, - {10, 1, 2, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, 3, 0, 8, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1}, - {2, 9, 0, 2, 10, 9, 6, 11, 7, -1, -1, -1, -1, -1, -1, -1}, - {6, 11, 7, 2, 10, 3, 10, 8, 3, 10, 9, 8, -1, -1, -1, -1}, - {7, 2, 3, 6, 2, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {7, 0, 8, 7, 6, 0, 6, 2, 0, -1, -1, -1, -1, -1, -1, -1}, - {2, 7, 6, 2, 3, 7, 0, 1, 9, -1, -1, -1, -1, -1, -1, -1}, - {1, 6, 2, 1, 8, 6, 1, 9, 8, 8, 7, 6, -1, -1, -1, -1}, - {10, 7, 6, 10, 1, 7, 1, 3, 7, -1, -1, -1, -1, -1, -1, -1}, - {10, 7, 6, 1, 7, 10, 1, 8, 7, 1, 0, 8, -1, -1, -1, -1}, - {0, 3, 7, 0, 7, 10, 0, 10, 9, 6, 10, 7, -1, -1, -1, -1}, - {7, 6, 10, 7, 10, 8, 8, 10, 9, -1, -1, -1, -1, -1, -1, -1}, - {6, 8, 4, 11, 8, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 6, 11, 3, 0, 6, 0, 4, 6, -1, -1, -1, -1, -1, -1, -1}, - {8, 6, 11, 8, 4, 6, 9, 0, 1, -1, -1, -1, -1, -1, -1, -1}, - {9, 4, 6, 9, 6, 3, 9, 3, 1, 11, 3, 6, -1, -1, -1, -1}, - {6, 8, 4, 6, 11, 8, 2, 10, 1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, 3, 0, 11, 0, 6, 11, 0, 4, 6, -1, -1, -1, -1}, - {4, 11, 8, 4, 6, 11, 0, 2, 9, 2, 10, 9, -1, -1, -1, -1}, - {10, 9, 3, 10, 3, 2, 9, 4, 3, 11, 3, 6, 4, 6, 3, -1}, - {8, 2, 3, 8, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1}, - {0, 4, 2, 4, 6, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 9, 0, 2, 3, 4, 2, 4, 6, 4, 3, 8, -1, -1, -1, -1}, - {1, 9, 4, 1, 4, 2, 2, 4, 6, -1, -1, -1, -1, -1, -1, -1}, - {8, 1, 3, 8, 6, 1, 8, 4, 6, 6, 10, 1, -1, -1, -1, -1}, - {10, 1, 0, 10, 0, 6, 6, 0, 4, -1, -1, -1, -1, -1, -1, -1}, - {4, 6, 3, 4, 3, 8, 6, 10, 3, 0, 3, 9, 10, 9, 3, -1}, - {10, 9, 4, 6, 10, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 9, 5, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, 4, 9, 5, 11, 7, 6, -1, -1, -1, -1, -1, -1, -1}, - {5, 0, 1, 5, 4, 0, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1}, - {11, 7, 6, 8, 3, 4, 3, 5, 4, 3, 1, 5, -1, -1, -1, -1}, - {9, 5, 4, 10, 1, 2, 7, 6, 11, -1, -1, -1, -1, -1, -1, -1}, - {6, 11, 7, 1, 2, 10, 0, 8, 3, 4, 9, 5, -1, -1, -1, -1}, - {7, 6, 11, 5, 4, 10, 4, 2, 10, 4, 0, 2, -1, -1, -1, -1}, - {3, 4, 8, 3, 5, 4, 3, 2, 5, 10, 5, 2, 11, 7, 6, -1}, - {7, 2, 3, 7, 6, 2, 5, 4, 9, -1, -1, -1, -1, -1, -1, -1}, - {9, 5, 4, 0, 8, 6, 0, 6, 2, 6, 8, 7, -1, -1, -1, -1}, - {3, 6, 2, 3, 7, 6, 1, 5, 0, 5, 4, 0, -1, -1, -1, -1}, - {6, 2, 8, 6, 8, 7, 2, 1, 8, 4, 8, 5, 1, 5, 8, -1}, - {9, 5, 4, 10, 1, 6, 1, 7, 6, 1, 3, 7, -1, -1, -1, -1}, - {1, 6, 10, 1, 7, 6, 1, 0, 7, 8, 7, 0, 9, 5, 4, -1}, - {4, 0, 10, 4, 10, 5, 0, 3, 10, 6, 10, 7, 3, 7, 10, -1}, - {7, 6, 10, 7, 10, 8, 5, 4, 10, 4, 8, 10, -1, -1, -1, -1}, - {6, 9, 5, 6, 11, 9, 11, 8, 9, -1, -1, -1, -1, -1, -1, -1}, - {3, 6, 11, 0, 6, 3, 0, 5, 6, 0, 9, 5, -1, -1, -1, -1}, - {0, 11, 8, 0, 5, 11, 0, 1, 5, 5, 6, 11, -1, -1, -1, -1}, - {6, 11, 3, 6, 3, 5, 5, 3, 1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 10, 9, 5, 11, 9, 11, 8, 11, 5, 6, -1, -1, -1, -1}, - {0, 11, 3, 0, 6, 11, 0, 9, 6, 5, 6, 9, 1, 2, 10, -1}, - {11, 8, 5, 11, 5, 6, 8, 0, 5, 10, 5, 2, 0, 2, 5, -1}, - {6, 11, 3, 6, 3, 5, 2, 10, 3, 10, 5, 3, -1, -1, -1, -1}, - {5, 8, 9, 5, 2, 8, 5, 6, 2, 3, 8, 2, -1, -1, -1, -1}, - {9, 5, 6, 9, 6, 0, 0, 6, 2, -1, -1, -1, -1, -1, -1, -1}, - {1, 5, 8, 1, 8, 0, 5, 6, 8, 3, 8, 2, 6, 2, 8, -1}, - {1, 5, 6, 2, 1, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 3, 6, 1, 6, 10, 3, 8, 6, 5, 6, 9, 8, 9, 6, -1}, - {10, 1, 0, 10, 0, 6, 9, 5, 0, 5, 6, 0, -1, -1, -1, -1}, - {0, 3, 8, 5, 6, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {10, 5, 6, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {11, 5, 10, 7, 5, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {11, 5, 10, 11, 7, 5, 8, 3, 0, -1, -1, -1, -1, -1, -1, -1}, - {5, 11, 7, 5, 10, 11, 1, 9, 0, -1, -1, -1, -1, -1, -1, -1}, - {10, 7, 5, 10, 11, 7, 9, 8, 1, 8, 3, 1, -1, -1, -1, -1}, - {11, 1, 2, 11, 7, 1, 7, 5, 1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, 1, 2, 7, 1, 7, 5, 7, 2, 11, -1, -1, -1, -1}, - {9, 7, 5, 9, 2, 7, 9, 0, 2, 2, 11, 7, -1, -1, -1, -1}, - {7, 5, 2, 7, 2, 11, 5, 9, 2, 3, 2, 8, 9, 8, 2, -1}, - {2, 5, 10, 2, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1}, - {8, 2, 0, 8, 5, 2, 8, 7, 5, 10, 2, 5, -1, -1, -1, -1}, - {9, 0, 1, 5, 10, 3, 5, 3, 7, 3, 10, 2, -1, -1, -1, -1}, - {9, 8, 2, 9, 2, 1, 8, 7, 2, 10, 2, 5, 7, 5, 2, -1}, - {1, 3, 5, 3, 7, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 7, 0, 7, 1, 1, 7, 5, -1, -1, -1, -1, -1, -1, -1}, - {9, 0, 3, 9, 3, 5, 5, 3, 7, -1, -1, -1, -1, -1, -1, -1}, - {9, 8, 7, 5, 9, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {5, 8, 4, 5, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1}, - {5, 0, 4, 5, 11, 0, 5, 10, 11, 11, 3, 0, -1, -1, -1, -1}, - {0, 1, 9, 8, 4, 10, 8, 10, 11, 10, 4, 5, -1, -1, -1, -1}, - {10, 11, 4, 10, 4, 5, 11, 3, 4, 9, 4, 1, 3, 1, 4, -1}, - {2, 5, 1, 2, 8, 5, 2, 11, 8, 4, 5, 8, -1, -1, -1, -1}, - {0, 4, 11, 0, 11, 3, 4, 5, 11, 2, 11, 1, 5, 1, 11, -1}, - {0, 2, 5, 0, 5, 9, 2, 11, 5, 4, 5, 8, 11, 8, 5, -1}, - {9, 4, 5, 2, 11, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {2, 5, 10, 3, 5, 2, 3, 4, 5, 3, 8, 4, -1, -1, -1, -1}, - {5, 10, 2, 5, 2, 4, 4, 2, 0, -1, -1, -1, -1, -1, -1, -1}, - {3, 10, 2, 3, 5, 10, 3, 8, 5, 4, 5, 8, 0, 1, 9, -1}, - {5, 10, 2, 5, 2, 4, 1, 9, 2, 9, 4, 2, -1, -1, -1, -1}, - {8, 4, 5, 8, 5, 3, 3, 5, 1, -1, -1, -1, -1, -1, -1, -1}, - {0, 4, 5, 1, 0, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {8, 4, 5, 8, 5, 3, 9, 0, 5, 0, 3, 5, -1, -1, -1, -1}, - {9, 4, 5, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 11, 7, 4, 9, 11, 9, 10, 11, -1, -1, -1, -1, -1, -1, -1}, - {0, 8, 3, 4, 9, 7, 9, 11, 7, 9, 10, 11, -1, -1, -1, -1}, - {1, 10, 11, 1, 11, 4, 1, 4, 0, 7, 4, 11, -1, -1, -1, -1}, - {3, 1, 4, 3, 4, 8, 1, 10, 4, 7, 4, 11, 10, 11, 4, -1}, - {4, 11, 7, 9, 11, 4, 9, 2, 11, 9, 1, 2, -1, -1, -1, -1}, - {9, 7, 4, 9, 11, 7, 9, 1, 11, 2, 11, 1, 0, 8, 3, -1}, - {11, 7, 4, 11, 4, 2, 2, 4, 0, -1, -1, -1, -1, -1, -1, -1}, - {11, 7, 4, 11, 4, 2, 8, 3, 4, 3, 2, 4, -1, -1, -1, -1}, - {2, 9, 10, 2, 7, 9, 2, 3, 7, 7, 4, 9, -1, -1, -1, -1}, - {9, 10, 7, 9, 7, 4, 10, 2, 7, 8, 7, 0, 2, 0, 7, -1}, - {3, 7, 10, 3, 10, 2, 7, 4, 10, 1, 10, 0, 4, 0, 10, -1}, - {1, 10, 2, 8, 7, 4, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 9, 1, 4, 1, 7, 7, 1, 3, -1, -1, -1, -1, -1, -1, -1}, - {4, 9, 1, 4, 1, 7, 0, 8, 1, 8, 7, 1, -1, -1, -1, -1}, - {4, 0, 3, 7, 4, 3, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {4, 8, 7, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {9, 10, 8, 10, 11, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 0, 9, 3, 9, 11, 11, 9, 10, -1, -1, -1, -1, -1, -1, -1}, - {0, 1, 10, 0, 10, 8, 8, 10, 11, -1, -1, -1, -1, -1, -1, -1}, - {3, 1, 10, 11, 3, 10, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 2, 11, 1, 11, 9, 9, 11, 8, -1, -1, -1, -1, -1, -1, -1}, - {3, 0, 9, 3, 9, 11, 1, 2, 9, 2, 11, 9, -1, -1, -1, -1}, - {0, 2, 11, 8, 0, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {3, 2, 11, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {2, 3, 8, 2, 8, 10, 10, 8, 9, -1, -1, -1, -1, -1, -1, -1}, - {9, 10, 2, 0, 9, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {2, 3, 8, 2, 8, 10, 0, 1, 8, 1, 10, 8, -1, -1, -1, -1}, - {1, 10, 2, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {1, 3, 8, 9, 1, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 9, 1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {0, 3, 8, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}, - {-1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1, -1}}; - - -/* CTNG:interpedge */ -void geometry3d_vi(double* p1, double* p2, double v1, double v2, double* out) { - if (fabs(v1) < 0.000000000001) { - out[0] = p1[0]; - out[1] = p1[1]; - out[2] = p1[2]; - return; - } - if (fabs(v2) < 0.000000000001) { - out[0] = p2[0]; - out[1] = p2[1]; - out[2] = p2[2]; - return; - } - double delta_v = v1 - v2; - if (fabs(delta_v) < 0.0000000001) { - out[0] = p1[0]; - out[1] = p1[1]; - out[2] = p1[2]; - return; - } - double mu = v1 / delta_v; - if (std::isnan(mu)) { - printf("geometry3d_vi error. delta_v = %g, v1 = %g, v2 = %g\n", delta_v, v1, v2); - } - out[0] = p1[0] + mu * (p2[0] - p1[0]); - out[1] = p1[1] + mu * (p2[1] - p1[1]); - out[2] = p1[2] + mu * (p2[2] - p1[2]); -} - -int geometry3d_find_triangles(double value0, - double value1, - double value2, - double value3, - double value4, - double value5, - double value6, - double value7, - double x0, - double x1, - double y0, - double y1, - double z0, - double z1, - double* out, - int offset) { - out = out + offset; - double position[8][3] = {{x0, y0, z0}, - {x1, y0, z0}, - {x1, y1, z0}, - {x0, y1, z0}, - {x0, y0, z1}, - {x1, y0, z1}, - {x1, y1, z1}, - {x0, y1, z1}}; - - /* CTNG:domarch */ - int cubeIndex = 0; - if (value0 < 0) - cubeIndex |= 1; - if (value1 < 0) - cubeIndex |= 2; - if (value2 < 0) - cubeIndex |= 4; - if (value3 < 0) - cubeIndex |= 8; - if (value4 < 0) - cubeIndex |= 16; - if (value5 < 0) - cubeIndex |= 32; - if (value6 < 0) - cubeIndex |= 64; - if (value7 < 0) - cubeIndex |= 128; - - int et = edgeTable[cubeIndex]; - - if (et == 0) - return 0; - - double vertexList[12][3]; - if (et & 1) - geometry3d_vi(position[0], position[1], value0, value1, vertexList[0]); - if (et & 2) - geometry3d_vi(position[1], position[2], value1, value2, vertexList[1]); - if (et & 4) - geometry3d_vi(position[2], position[3], value2, value3, vertexList[2]); - if (et & 8) - geometry3d_vi(position[3], position[0], value3, value0, vertexList[3]); - if (et & 16) - geometry3d_vi(position[4], position[5], value4, value5, vertexList[4]); - if (et & 32) - geometry3d_vi(position[5], position[6], value5, value6, vertexList[5]); - if (et & 64) - geometry3d_vi(position[6], position[7], value6, value7, vertexList[6]); - if (et & 128) - geometry3d_vi(position[7], position[4], value7, value4, vertexList[7]); - if (et & 256) - geometry3d_vi(position[0], position[4], value0, value4, vertexList[8]); - if (et & 512) - geometry3d_vi(position[1], position[5], value1, value5, vertexList[9]); - if (et & 1024) - geometry3d_vi(position[2], position[6], value2, value6, vertexList[10]); - if (et & 2048) - geometry3d_vi(position[3], position[7], value3, value7, vertexList[11]); - - int const* const tt = triTable[cubeIndex]; - int i, j, k, count; - for (i = 0, count = 0; i < 16; i += 3, count++) { - if (tt[i] == -1) - break; - for (k = 0; k < 3; k++) { - for (j = 0; j < 3; j++) - out[j] = vertexList[tt[i + k]][j]; - out += 3; - } - } - return count; -} - - -double geometry3d_llgramarea(double* p0, double* p1, double* p2) { - /* setup the vectors */ - double a[] = {p0[0] - p1[0], p0[1] - p1[1], p0[2] - p1[2]}; - double b[] = {p0[0] - p2[0], p0[1] - p2[1], p0[2] - p2[2]}; - - /* take the cross-product */ - double cpx = a[1] * b[2] - a[2] * b[1]; - double cpy = a[2] * b[0] - a[0] * b[2]; - double cpz = a[0] * b[1] - a[1] * b[0]; - return std::sqrt(cpx * cpx + cpy * cpy + cpz * cpz); -} - - -double geometry3d_sum_area_of_triangles(double* tri_vec, int len) { - double sum = 0; - for (int i = 0; i < len; i += 9) { - sum += geometry3d_llgramarea(tri_vec + i, tri_vec + i + 3, tri_vec + i + 6); - } - return sum / 2.; -} - - -/***************************************************************************** - * this contains cone, and cylinder code translated and modified - * from Barbier and Galin 2004's example code - * see /u/ramcd/spatial/experiments/one_time_tests/2012-06-28/cone.cpp - * on 7 june 2012, the original code was available online at - * http://jgt.akpeters.com/papers/BarbierGalin04/Cone-Sphere.zip - ****************************************************************************/ - - -class geometry3d_Cylinder { - public: - geometry3d_Cylinder(double x0, double y0, double z0, double x1, double y1, double z1, double r); - //~geometry3d_Cylinder(); - double signed_distance(double px, double py, double pz); - - private: - // double x0, y0, z0, x1, y1, z1, r; - double r, rr, axisx, axisy, axisz, cx, cy, cz, h; -}; - -geometry3d_Cylinder::geometry3d_Cylinder(double x0, - double y0, - double z0, - double x1, - double y1, - double z1, - double r) - : r(r) - , cx((x0 + x1) / 2.) - , cy((y0 + y1) / 2.) - , cz((z0 + z1) / 2.) - , rr(r * r) { - axisx = x1 - x0; - axisy = y1 - y0; - axisz = z1 - z0; - double axislength = std::sqrt(axisx * axisx + axisy * axisy + axisz * axisz); - axisx /= axislength; - axisy /= axislength; - axisz /= axislength; - h = axislength / 2.; -} - -double geometry3d_Cylinder::signed_distance(double px, double py, double pz) { - double const nx{px - cx}; - double const ny{py - cy}; - double const nz{pz - cz}; - double y{std::abs(axisx * nx + axisy * ny + axisz * nz)}; - double yy{y * y}; - double const xx{nx * nx + ny * ny + nz * nz - yy}; - if (y < h) { - return std::max(-std::abs(h - y), std::sqrt(xx) - r); - } else { - y -= h; - yy = y * y; - if (xx < rr) { - return std::abs(y); - } else { - double const x{std::sqrt(xx) - r}; - return std::sqrt(yy + x * x); - } - } -} - -void* geometry3d_new_Cylinder(double x0, - double y0, - double z0, - double x1, - double y1, - double z1, - double r) { - return new geometry3d_Cylinder(x0, y0, z0, x1, y1, z1, r); -} -void geometry3d_delete_Cylinder(void* ptr) { - delete (geometry3d_Cylinder*) ptr; -} -// TODO: add validation for ptr -double geometry3d_Cylinder_signed_distance(void* ptr, double px, double py, double pz) { - return ((geometry3d_Cylinder*) ptr)->signed_distance(px, py, pz); -} - - -class geometry3d_Cone { - public: - geometry3d_Cone(double x0, - double y0, - double z0, - double r0, - double x1, - double y1, - double z1, - double r1); - double signed_distance(double px, double py, double pz); - - private: - double axisx, axisy, axisz, h, rra, rrb, conelength; - double side1, side2, x0, y0, z0, r0, axislength; -}; - -geometry3d_Cone::geometry3d_Cone(double x0, - double y0, - double z0, - double r0, - double x1, - double y1, - double z1, - double r1) - : rra(r0 * r0) - , rrb(r1 * r1) - , x0(x0) - , y0(y0) - , z0(z0) - , r0(r0) { - // TODO: these are preconditions; the python assures them, but could/should - // take care of that here - assert(r1 <= r0); - assert(r1 >= 0); - axisx = x1 - x0; - axisy = y1 - y0; - axisz = z1 - z0; - axislength = std::sqrt(axisx * axisx + axisy * axisy + axisz * axisz); - axisx /= axislength; - axisy /= axislength; - axisz /= axislength; - h = axislength / 2.; - rra = r0 * r0; - rrb = r1 * r1; - conelength = std::sqrt((r1 - r0) * (r1 - r0) + axislength * axislength); - side1 = (r1 - r0) / conelength; - side2 = axislength / conelength; -} - -double geometry3d_Cone::signed_distance(double px, double py, double pz) { - double nx, ny, nz, y, yy, xx, x, rx, ry; - nx = px - x0; - ny = py - y0; - nz = pz - z0; - y = axisx * nx + axisy * ny + axisz * nz; - yy = y * y; - xx = nx * nx + ny * ny + nz * nz - yy; - // in principle, xx >= 0, but roundoff errors may cause trouble - if (xx < 0) - xx = 0; - if (y < 0) { - if (xx < rra) - return -y; - x = std::sqrt(xx) - r0; - return std::sqrt(x * x + yy); - } else if (xx < rrb) { - return y - axislength; - } else { - x = std::sqrt(xx) - r0; - if (y < 0) { - if (x < 0) - return y; - return std::sqrt(x * x + yy); - } else { - ry = x * side1 + y * side2; - if (ry < 0) - return std::sqrt(x * x + yy); - rx = x * side2 - y * side1; - if (ry > conelength) { - ry -= conelength; - return std::sqrt(rx * rx + ry * ry); - } else { - return rx; - } - } - } -} - - -void* geometry3d_new_Cone(double x0, - double y0, - double z0, - double r0, - double x1, - double y1, - double z1, - double r1) { - return new geometry3d_Cone(x0, y0, z0, r0, x1, y1, z1, r1); -} -void geometry3d_delete_Cone(void* ptr) { - delete (geometry3d_Cone*) ptr; -} -// TODO: add validation for ptr -double geometry3d_Cone_signed_distance(void* ptr, double px, double py, double pz) { - return ((geometry3d_Cone*) ptr)->signed_distance(px, py, pz); -} - - -class geometry3d_Sphere { - public: - geometry3d_Sphere(double x, double y, double z, double r); - double signed_distance(double px, double py, double pz); - - private: - double x, y, z, r; -}; - -geometry3d_Sphere::geometry3d_Sphere(double x, double y, double z, double r) - : x(x) - , y(y) - , z(z) - , r(r) {} - -double geometry3d_Sphere::signed_distance(double px, double py, double pz) { - return std::sqrt(std::pow(x - px, 2) + std::pow(y - py, 2) + std::pow(z - pz, 2)) - r; -} - -void* geometry3d_new_Sphere(double x, double y, double z, double r) { - return new geometry3d_Sphere(x, y, z, r); -} -void geometry3d_delete_Sphere(void* ptr) { - delete (geometry3d_Sphere*) ptr; -} -// TODO: add validation for ptr -double geometry3d_Sphere_signed_distance(void* ptr, double px, double py, double pz) { - return ((geometry3d_Sphere*) ptr)->signed_distance(px, py, pz); -} - -class geometry3d_Plane { - public: - geometry3d_Plane(double x, double y, double z, double nx, double ny, double nz); - double signed_distance(double px, double py, double pz); - - private: - double nx, ny, nz; - double d, mul; -}; - -geometry3d_Plane::geometry3d_Plane(double x, double y, double z, double nx, double ny, double nz) - : nx(nx) - , ny(ny) - , nz(nz) - , d(-(nx * x + ny * y + nz * z)) - , mul(1. / std::sqrt(nx * nx + ny * ny + nz * nz)) {} - -double geometry3d_Plane::signed_distance(double px, double py, double pz) { - return (nx * px + ny * py + nz * pz + d) * mul; -} - -void* geometry3d_new_Plane(double x, double y, double z, double nx, double ny, double nz) { - return new geometry3d_Plane(x, y, z, nx, ny, nz); -} -void geometry3d_delete_Plane(void* ptr) { - delete (geometry3d_Plane*) ptr; -} -// TODO: add validation for ptr -double geometry3d_Plane_signed_distance(void* ptr, double px, double py, double pz) { - return ((geometry3d_Plane*) ptr)->signed_distance(px, py, pz); -} - -/* - PyObject* nrnpy_pyCallObject(PyObject*, PyObject*); - - void print_numbers(PyObject *p) { - for (Py_ssize_t i = 0; i< PyList_Size(p); i++) { - PyObject* obj = PyList_GetItem(p, i); - printf("%g ", PyFloat_AsDouble(obj)); - } - printf("\n"); - } - - // TODO: it would be nice to remove the python dependence, because that - // limits us to mostly single threaded due to the global interpreter - // lock - int geometry3d_process_cell(int i, int j, int k, PyObject* objects, double* xs, double* ys, -double* zs, double* tridata, int start) { double x, y, z, x1, y1, z1, xx, yy, zz; x = xs[i]; y = -ys[j]; z = zs[k]; x1 = xs[i + 1]; y1 = ys[j + 1]; z1 = zs[k + 1]; double value[8], current_value; - PyObject* result; - PyObject* obj; - printf("inside process_cell\n"); - - // march around the cube - for (int m = 0; m < 8; m++) { - // NOTE: only describing changes from previous case - switch(m) { - case 0: xx = x; yy = y; zz = z; break; - case 1: xx = x1; break; - case 2: yy = y1; break; - case 3: xx = x; break; - case 4: yy = y; zz = z1; break; - case 5: xx = x1; break; - case 6: yy = y1; break; - case 7: xx = x; break; - } - printf("phase 0, len(objects) = %ld\n", PyList_Size(objects)); - obj = PyList_GetItem(objects, 0); - printf("phase 0a, obj = %x\n", (void*) obj); - result = PyEval_CallMethod(obj, "distance", "ddd", xx, yy, zz); - //Py_DECREF(obj); - printf("phase 1\n"); - current_value = PyFloat_AsDouble(result); - //Py_DECREF(result); - for (Py_ssize_t n = 1; n < PyList_Size(objects); n++) { - printf("phase 2, n = %ld\n", n); - obj = PyList_GetItem(objects, n); - result = PyEval_CallMethod(obj, "distance", "ddd", xx, yy, zz); - Py_DECREF(obj); - current_value = min(current_value, PyFloat_AsDouble(result)); - //Py_DECREF(result); - } - value[m] = current_value; - } - printf("finishing up; start = %d\n", start); - return start + 9 * geometry3d_find_triangles(value[0], value[1], value[2], value[3], -value[4], value[5], value[6], value[7], x, x1, y, y1, z, z1, tridata, start); - } - - - int geometry3d_test_call_function(PyObject* obj) { - printf("inside\n"); - if (obj == NULL) printf("obj is NULL\n"); - Py_INCREF(obj); - PyEval_CallObject(obj, obj); - return 0; - } - - int geometry3d_test_call_function3(int (*func) (void)) { - return func(); - } - - int geometry3d_test_call_method(PyObject* list, PyObject* obj) { - PyEval_CallMethod(list, "insert", "O", obj); - return 0; - } - - // this works, but is slower than the python version - int geometry3d_contains_surface(int i, int j, int k, double (*objdist)(double, double, double), -double* xs, double* ys, double* zs, double dx, double r_inner, double r_outer) { bool has_neg = -false, has_pos = false; double xbar = xs[i] + dx / 2; double ybar = ys[j] + dx / 2; double zbar = -zs[k] + dx / 2; - - double d = fabs(objdist(xbar, ybar, zbar)); - //if (i == 586 && j == 2169 && k == 83) {printf("at magic point, d=%g\n", d);} - //printf("sphere test: d = %g, r_inner = %g, r_outer = %g\n", d, r_inner, r_outer); - if (d <= r_inner) return 1; - if (d >= r_outer) return 0; -// // spheres alone are indeterminant; check corners -// for (int di = 0; di < 2; di++) { -// for (int dj = 0; dj < 2; dj++) { -// for (int dk = 0; dk < 2; dk++) { -// d = objdist(xs[i + di], ys[j + dj], zs[k + dk]); -// //printf("d = %g\n", d); -// if (d <= 0) has_neg = true; -// if (d >= 0) has_pos = true; -// if (has_neg && has_pos) return 1; -// } -// } -// } - - // spheres alone are indeterminant; check corners - for (double* x = xs + i; x < xs + i + 2; x++) { - for (double* y = ys + j; y < ys + j + 2; y++) { - for (double* z = zs + k; z < zs + k + 2; z++) { - d = objdist(*x, *y, *z); - if (d <= 0) has_neg = true; - if (d >= 0) has_pos = true; - if (has_neg && has_pos) return 1; - } - } - } - - return 0; - } - -} -*/ diff --git a/src/nrniv/glinerec.cpp b/src/nrniv/glinerec.cpp index f1cf38f309..f224b658b2 100644 --- a/src/nrniv/glinerec.cpp +++ b/src/nrniv/glinerec.cpp @@ -13,11 +13,11 @@ #undef begin #undef add -#include #include "nrnoc2iv.h" #include "vrecitem.h" #include "netcvode.h" #include "cvodeobj.h" +#include "utils/enumerate.h" #if HAVE_IV // to end of file #include "graph.h" @@ -26,11 +26,7 @@ extern NetCvode* net_cvode_instance; -class GLineRecordList; - -declarePtrList(GLineRecordList, GLineRecord) -implementPtrList(GLineRecordList, GLineRecord) -static GLineRecordList* grl; +static std::vector* grl; // Since GraphLine is not an observable, its destructor calls this. // So ivoc will work, a stub is placed in ivoc/datapath.cpp @@ -38,11 +34,9 @@ void graphLineRecDeleted(GraphLine* gl) { if (!grl) { return; } - int i, cnt = grl->count(); - for (i = 0; i < cnt; ++i) { - GLineRecord* r = grl->item(i); - if (r->uses(gl)) { - delete r; + for (auto& item: *grl) { + if (item->uses(gl)) { + delete item; return; } } @@ -52,25 +46,22 @@ void NetCvode::simgraph_remove() { if (!grl) { return; } - while (grl->count()) { - delete grl->item(grl->count() - 1); + for (auto& item: *grl) { + delete item; } } void Graph::simgraph() { - int i, cnt; if (!grl) { - grl = new GLineRecordList(); + grl = new std::vector(); } - cnt = line_list_.count(); - for (i = 0; i < cnt; ++i) { - GraphLine* gl = line_list_.item(i); + for (auto& gl: line_list_) { PlayRecord* pr = net_cvode_instance->playrec_uses(gl); if (pr) { delete pr; } GLineRecord* r = new GLineRecord(gl); - grl->append(r); + grl->push_back(r); } } @@ -116,13 +107,10 @@ void GLineRecord::fill_pd1() { } void GLineRecord::fill_pd() { - // Call only if cache_efficient will not change pointers before useing - // the results of his computation. - // Get rid of old pd_and_vec_ info. - for (GLineRecordEData::iterator it = pd_and_vec_.begin(); it != pd_and_vec_.end(); ++it) { - if ((*it).second) { - delete (*it).second; + for (auto& [_, elem]: pd_and_vec_) { + if (elem) { + delete elem; } } pd_and_vec_.resize(0); @@ -143,7 +131,7 @@ void GLineRecord::fill_pd() { } GLineRecord::GLineRecord(GraphLine* gl) - : PlayRecord(NULL) { + : PlayRecord({}) { // shouldnt be necessary but just in case // printf("GLineRecord %p name=%s\n", this, gl->name()); gl_ = gl; @@ -153,53 +141,34 @@ GLineRecord::GLineRecord(GraphLine* gl) } GVectorRecord::GVectorRecord(GraphVector* gv) - : PlayRecord(NULL) { - // printf("GVectorRecord %p\n", this); - gv_ = gv; -} + : PlayRecord({}) + , gv_{gv} {} void GraphVector::record_install() { - // printf("GraphVector::record_install()\n"); - GVectorRecord* gvr = new GVectorRecord(this); + new GVectorRecord(this); } -void GraphVector::record_uninstall() { - // printf("GraphVector::record_uninstall()\n"); -} +void GraphVector::record_uninstall() {} GLineRecord::~GLineRecord() { - // printf("~GLineRecord %p\n", this); - int i; if (v_) { delete v_; - v_ = NULL; + v_ = nullptr; } - for (GLineRecordEData::iterator it = pd_and_vec_.begin(); it != pd_and_vec_.end(); ++it) { - if ((*it).second) { - delete (*it).second; + for (auto& [_, vec]: pd_and_vec_) { + if (vec) { + delete vec; } } - for (i = grl->count() - 1; i >= 0; --i) { - if (grl->item(i) == this) { - gl_->simgraph_activate(false); - grl->remove(i); - return; - } + if (auto it = std::find(grl->rbegin(), grl->rend(), this); it != grl->rend()) { + gl_->simgraph_activate(false); + grl->erase(std::next(it).base()); // Reverse iterator need that } } -GVectorRecord::~GVectorRecord() { - printf("~GVectorRecord %p\n", this); -#if 0 // for now not allowing vector buffering - for (GLineRecordEData::iterator it = pd_and_vec_.begin(); it != pd_and_vec_.end(); ++it) { - if ((*it).second) { - delete (*it).second; - } - } -#endif -} +GVectorRecord::~GVectorRecord() {} void GLineRecord::record_init() { gl_->simgraph_init(); @@ -216,7 +185,7 @@ void GVectorRecord::continuous(double t) {} int GVectorRecord::count() { return gv_->py_data()->count(); } -double* GVectorRecord::pdata(int i) { +neuron::container::data_handle GVectorRecord::pdata(int i) { return gv_->py_data()->p(i); } @@ -236,10 +205,8 @@ void GLineRecord::plot(int vecsz, double tstop) { ObjectContext obc(NULL); for (int i = 0; i < vecsz; ++i) { x->add(dt * i); - for (GLineRecordEData::iterator it = pd_and_vec_.begin(); it != pd_and_vec_.end(); - ++it) { - double* pd = (*it).first; - *pd = (*it).second->elem(i); + for (auto& [pd, elem]: pd_and_vec_) { + *pd = elem->elem(i); } gl_->plot(); } diff --git a/src/nrniv/glinerec.h b/src/nrniv/glinerec.h index 6c81f1718c..287358c620 100644 --- a/src/nrniv/glinerec.h +++ b/src/nrniv/glinerec.h @@ -53,7 +53,7 @@ class GVectorRecord: public PlayRecord { } int count(); - double* pdata(int); + neuron::container::data_handle pdata(int); GraphVector* gv_; }; diff --git a/src/nrniv/have2want.cpp b/src/nrniv/have2want.cpp deleted file mode 100644 index 5ac55709aa..0000000000 --- a/src/nrniv/have2want.cpp +++ /dev/null @@ -1,259 +0,0 @@ -/* -To be included by a file that desires rendezvous rank exchange functionality. -Need to define HAVEWANT_t, HAVEWANT_alltoallv, and HAVEWANT2Int -The latter is a map or unordered_map. -E.g. std::unordered_map -*/ - -#ifdef have2want_cpp -#error "This implementation can only be included once" -// The static function names used to involve a macro name (NrnHash) but now, -// with the use of std::..., it may be the case this could be included -// multiple times or even transformed into a template. -#endif - -#define have2want_cpp - -/* - -A rank owns a set of HAVEWANT_t keys and wants information associated with -a set of HAVEWANT_t keys owned by unknown ranks. Owners do not know which -ranks want their information. Ranks that want info do not know which ranks -own that info. - -The have_to_want function returns two new vectors of keys along with -associated count and displacement vectors of length nhost and nhost+1 -respectively. Note that a send_to_want_displ[i+1] = - send_to_want_cnt[i] + send_to_want_displ[i] . - -send_to_want[send_to_want_displ[i] to send_to_want_displ[i+1]] contains -the keys from this rank for which rank i wants information. - -recv_from_have[recv_from_have_displ[i] to recv_from_have_displ[i+1] contains -the keys from which rank i is sending information to this rank. - -Note that on rank i, the order of keys in the rank j area of send_to_want -is the same order of keys on rank j in the ith area in recv_from_have. - -The rendezvous_rank function is used to parallelize this computation -and minimize memory usage so that no single rank ever needs to know all keys. -*/ - -#ifndef HAVEWANT_t -#define HAVEWANT_t int -#endif - -// round robin default rendezvous rank function -static int default_rendezvous(HAVEWANT_t key) { - return key % nrnmpi_numprocs; -} - -static int* cnt2displ(int* cnt) { - int* displ = new int[nrnmpi_numprocs + 1]; - displ[0] = 0; - for (int i = 0; i < nrnmpi_numprocs; ++i) { - displ[i + 1] = displ[i] + cnt[i]; - } - return displ; -} - -static int* srccnt2destcnt(int* srccnt) { - int* destcnt = new int[nrnmpi_numprocs]; - nrnmpi_int_alltoall(srccnt, destcnt, 1); - return destcnt; -} - -static void rendezvous_rank_get(HAVEWANT_t* data, - int size, - HAVEWANT_t*& sdata, - int*& scnt, - int*& sdispl, - HAVEWANT_t*& rdata, - int*& rcnt, - int*& rdispl, - int (*rendezvous_rank)(HAVEWANT_t)) { - int nhost = nrnmpi_numprocs; - - // count what gets sent - scnt = new int[nhost]; - for (int i = 0; i < nhost; ++i) { - scnt[i] = 0; - } - for (int i = 0; i < size; ++i) { - int r = (*rendezvous_rank)(data[i]); - ++scnt[r]; - } - - sdispl = cnt2displ(scnt); - rcnt = srccnt2destcnt(scnt); - rdispl = cnt2displ(rcnt); - sdata = new HAVEWANT_t[sdispl[nhost] + 1]; // ensure not 0 size - rdata = new HAVEWANT_t[rdispl[nhost] + 1]; // ensure not 0 size - // scatter data into sdata by recalculating scnt. - for (int i = 0; i < nhost; ++i) { - scnt[i] = 0; - } - for (int i = 0; i < size; ++i) { - int r = (*rendezvous_rank)(data[i]); - sdata[sdispl[r] + scnt[r]] = data[i]; - ++scnt[r]; - } - HAVEWANT_alltoallv(sdata, scnt, sdispl, rdata, rcnt, rdispl); -} - -static void have_to_want(HAVEWANT_t* have, - int have_size, - HAVEWANT_t* want, - int want_size, - HAVEWANT_t*& send_to_want, - int*& send_to_want_cnt, - int*& send_to_want_displ, - HAVEWANT_t*& recv_from_have, - int*& recv_from_have_cnt, - int*& recv_from_have_displ, - int (*rendezvous_rank)(HAVEWANT_t)) { - // 1) Send have and want to the rendezvous ranks. - // 2) Rendezvous rank matches have and want. - // 3) Rendezvous ranks tell the want ranks which ranks own the keys - // 4) Ranks that want tell owner ranks where to send. - - int nhost = nrnmpi_numprocs; - - // 1) Send have and want to the rendezvous ranks. - HAVEWANT_t *have_s_data, *have_r_data; - int *have_s_cnt, *have_s_displ, *have_r_cnt, *have_r_displ; - rendezvous_rank_get(have, - have_size, - have_s_data, - have_s_cnt, - have_s_displ, - have_r_data, - have_r_cnt, - have_r_displ, - rendezvous_rank); - delete[] have_s_cnt; - delete[] have_s_displ; - delete[] have_s_data; - // assume it is an error if two ranks have the same key so create - // hash table of key2rank. Will also need it for matching have and want - HAVEWANT2Int havekey2rank = HAVEWANT2Int(have_r_displ[nhost] + 1); // ensure not empty. - for (int r = 0; r < nhost; ++r) { - for (int i = 0; i < have_r_cnt[r]; ++i) { - HAVEWANT_t key = have_r_data[have_r_displ[r] + i]; - if (havekey2rank.find(key) != havekey2rank.end()) { - hoc_execerr_ext( - "internal error in have_to_want: key %lld owned by multiple ranks\n", - (long long) key); - } - havekey2rank[key] = r; - } - } - delete[] have_r_data; - delete[] have_r_cnt; - delete[] have_r_displ; - - HAVEWANT_t *want_s_data, *want_r_data; - int *want_s_cnt, *want_s_displ, *want_r_cnt, *want_r_displ; - rendezvous_rank_get(want, - want_size, - want_s_data, - want_s_cnt, - want_s_displ, - want_r_data, - want_r_cnt, - want_r_displ, - rendezvous_rank); - - // 2) Rendezvous rank matches have and want. - // we already have made the havekey2rank map. - // Create an array parallel to want_r_data which contains the ranks that - // have that data. - int n = want_r_displ[nhost]; - int* want_r_ownerranks = new int[n]; - for (int r = 0; r < nhost; ++r) { - for (int i = 0; i < want_r_cnt[r]; ++i) { - int ix = want_r_displ[r] + i; - HAVEWANT_t key = want_r_data[ix]; - auto search = havekey2rank.find(key); - if (search == havekey2rank.end()) { - hoc_execerr_ext( - "internal error in have_to_want: key = %lld is wanted but does not exist\n", - (long long) key); - } - want_r_ownerranks[ix] = search->second; - } - } - delete[] want_r_data; - - // 3) Rendezvous ranks tell the want ranks which ranks own the keys - // The ranks that want keys need to know the ranks that own those keys. - // The want_s_ownerranks will be parallel to the want_s_data. - // That is, each item defines the rank from which information associated - // with that key is coming from - int* want_s_ownerranks = new int[want_s_displ[nhost]]; - if (nrn_sparse_partrans > 0) { - nrnmpi_int_alltoallv_sparse(want_r_ownerranks, - want_r_cnt, - want_r_displ, - want_s_ownerranks, - want_s_cnt, - want_s_displ); - } else { - nrnmpi_int_alltoallv(want_r_ownerranks, - want_r_cnt, - want_r_displ, - want_s_ownerranks, - want_s_cnt, - want_s_displ); - } - - delete[] want_r_ownerranks; - delete[] want_r_cnt; - delete[] want_r_displ; - - // 4) Ranks that want tell owner ranks where to send. - // Finished with the rendezvous ranks. The ranks that want keys know the - // owner ranks for those keys. The next step is for the want ranks to - // tell the owner ranks where to send. - // The parallel want_s_ownerranks and want_s_data are now uselessly ordered - // by rendezvous rank. Reorganize so that want ranks can tell owner ranks - // what they want. - n = want_s_displ[nhost]; - delete[] want_s_displ; - for (int i = 0; i < nhost; ++i) { - want_s_cnt[i] = 0; - } - HAVEWANT_t* old_want_s_data = want_s_data; - want_s_data = new HAVEWANT_t[n]; - // compute the counts - for (int i = 0; i < n; ++i) { - int r = want_s_ownerranks[i]; - ++want_s_cnt[r]; - } - want_s_displ = cnt2displ(want_s_cnt); - for (int i = 0; i < nhost; ++i) { - want_s_cnt[i] = 0; - } // recount while filling - for (int i = 0; i < n; ++i) { - int r = want_s_ownerranks[i]; - HAVEWANT_t key = old_want_s_data[i]; - want_s_data[want_s_displ[r] + want_s_cnt[r]] = key; - ++want_s_cnt[r]; - } - delete[] want_s_ownerranks; - delete[] old_want_s_data; - want_r_cnt = srccnt2destcnt(want_s_cnt); - want_r_displ = cnt2displ(want_r_cnt); - want_r_data = new HAVEWANT_t[want_r_displ[nhost]]; - HAVEWANT_alltoallv( - want_s_data, want_s_cnt, want_s_displ, want_r_data, want_r_cnt, want_r_displ); - // now the want_r_data on the have_ranks are grouped according to the ranks - // that want those keys. - - send_to_want = want_r_data; - send_to_want_cnt = want_r_cnt; - send_to_want_displ = want_r_displ; - recv_from_have = want_s_data; - recv_from_have_cnt = want_s_cnt; - recv_from_have_displ = want_s_displ; -} diff --git a/src/nrniv/have2want.hpp b/src/nrniv/have2want.hpp new file mode 100644 index 0000000000..11b22d28ed --- /dev/null +++ b/src/nrniv/have2want.hpp @@ -0,0 +1,202 @@ +#pragma once + +#include +#include +#include + +/* +A rank owns a set of T keys and wants information associated with +a set of T keys owned by unknown ranks. Owners do not know which +ranks want their information. Ranks that want info do not know which ranks +own that info. + +The have_to_want function returns two new vectors of keys along with +associated count and displacement vectors of length nhost and nhost+1 +respectively. Note that a send_to_want_displ[i+1] = + send_to_want_cnt[i] + send_to_want_displ[i] . + +send_to_want[send_to_want_displ[i] to send_to_want_displ[i+1]] contains +the keys from this rank for which rank i wants information. + +recv_from_have[recv_from_have_displ[i] to recv_from_have_displ[i+1] contains +the keys from which rank i is sending information to this rank. + +Note that on rank i, the order of keys in the rank j area of send_to_want +is the same order of keys on rank j in the ith area in recv_from_have. + +The rendezvous_rank function is used to parallelize this computation +and minimize memory usage so that no single rank ever needs to know all keys. +*/ + +// round robin rendezvous rank function +template +int rendezvous_rank(const T& key) { + return key % nrnmpi_numprocs; +} + +template +struct Data { + std::vector data{}; + std::vector cnt{}; + std::vector displ{}; +}; + +static std::vector cnt2displ(const std::vector& cnt) { + std::vector displ(nrnmpi_numprocs + 1); + std::partial_sum(cnt.cbegin(), cnt.cend(), displ.begin() + 1); + return displ; +} + +static std::vector srccnt2destcnt(std::vector srccnt) { + std::vector destcnt(nrnmpi_numprocs); + nrnmpi_int_alltoall(srccnt.data(), destcnt.data(), 1); + return destcnt; +} + +template +static std::tuple, Data> rendezvous_rank_get(const std::vector& data, + F alltoall_function) { + int nhost = nrnmpi_numprocs; + + Data s; + // count what gets sent + s.cnt = std::vector(nhost); + + for (const auto& e: data) { + int r = rendezvous_rank(e); + s.cnt[r] += 1; + } + + s.displ = cnt2displ(s.cnt); + s.data.resize(s.displ[nhost] + 1); + + Data r; + r.cnt = srccnt2destcnt(s.cnt); + r.displ = cnt2displ(r.cnt); + r.data.resize(r.displ[nhost]); + // scatter data into sdata by recalculating s.cnt. + std::fill(s.cnt.begin(), s.cnt.end(), 0); + for (const auto& e: data) { + int rank = rendezvous_rank(e); + s.data[s.displ[rank] + s.cnt[rank]] = e; + s.cnt[rank] += 1; + } + alltoall_function(s, r); + return {s, r}; +} + +template +std::pair, Data> have_to_want(const std::vector& have, + const std::vector& want, + F alltoall_function) { + // 1) Send have and want to the rendezvous ranks. + // 2) Rendezvous rank matches have and want. + // 3) Rendezvous ranks tell the want ranks which ranks own the keys + // 4) Ranks that want tell owner ranks where to send. + + int nhost = nrnmpi_numprocs; + + // 1) Send have and want to the rendezvous ranks. + + // hash table of key2rank. Will also need it for matching have and want + std::unordered_map havekey2rank{}; + { + auto [_, have_r] = rendezvous_rank_get(have, alltoall_function); + // assume it is an error if two ranks have the same key so create + havekey2rank.reserve(have_r.displ[nhost] + 1); + for (int r = 0; r < nhost; ++r) { + for (int i = 0; i < have_r.cnt[r]; ++i) { + T key = have_r.data[have_r.displ[r] + i]; + if (havekey2rank.find(key) != havekey2rank.end()) { + hoc_execerr_ext( + "internal error in have_to_want: key %lld owned by multiple ranks\n", + (long long) key); + } + havekey2rank[key] = r; + } + } + } + + auto [want_s, want_r] = rendezvous_rank_get(want, alltoall_function); + + // 2) Rendezvous rank matches have and want. + // we already have made the havekey2rank map. + // Create an array parallel to want_r_data which contains the ranks that + // have that data. + int n = want_r.displ[nhost]; + std::vector want_r_ownerranks(n); + for (int r = 0; r < nhost; ++r) { + for (int i = 0; i < want_r.cnt[r]; ++i) { + int ix = want_r.displ[r] + i; + T key = want_r.data[ix]; + auto search = havekey2rank.find(key); + if (search == havekey2rank.end()) { + hoc_execerr_ext( + "internal error in have_to_want: key = %lld is wanted but does not exist\n", + (long long) key); + } + want_r_ownerranks[ix] = search->second; + } + } + + // 3) Rendezvous ranks tell the want ranks which ranks own the keys + // The ranks that want keys need to know the ranks that own those keys. + // The want_s_ownerranks will be parallel to the want_s_data. + // That is, each item defines the rank from which information associated + // with that key is coming from + std::vector want_s_ownerranks(want_s.displ[nhost]); + if (nrn_sparse_partrans > 0) { + nrnmpi_int_alltoallv_sparse(want_r_ownerranks.data(), + want_r.cnt.data(), + want_r.displ.data(), + want_s_ownerranks.data(), + want_s.cnt.data(), + want_s.displ.data()); + } else { + nrnmpi_int_alltoallv(want_r_ownerranks.data(), + want_r.cnt.data(), + want_r.displ.data(), + want_s_ownerranks.data(), + want_s.cnt.data(), + want_s.displ.data()); + } + + // 4) Ranks that want tell owner ranks where to send. + // Finished with the rendezvous ranks. The ranks that want keys know the + // owner ranks for those keys. The next step is for the want ranks to + // tell the owner ranks where to send. + // The parallel want_s_ownerranks and want_s_data are now uselessly ordered + // by rendezvous rank. Reorganize so that want ranks can tell owner ranks + // what they want. + n = want_s.displ[nhost]; + for (int i = 0; i < nhost; ++i) { + want_s.cnt[i] = 0; + } + std::vector old_want_s_data(n); + std::swap(old_want_s_data, want_s.data); + // compute the counts + for (int i = 0; i < n; ++i) { + int r = want_s_ownerranks[i]; + ++want_s.cnt[r]; + } + want_s.displ = cnt2displ(want_s.cnt); + for (int i = 0; i < nhost; ++i) { + want_s.cnt[i] = 0; + } // recount while filling + for (int i = 0; i < n; ++i) { + int r = want_s_ownerranks[i]; + T key = old_want_s_data[i]; + want_s.data[want_s.displ[r] + want_s.cnt[r]] = key; + ++want_s.cnt[r]; + } + + Data new_want_r{}; + new_want_r.cnt = srccnt2destcnt(want_s.cnt); + new_want_r.displ = cnt2displ(new_want_r.cnt); + new_want_r.data.resize(new_want_r.displ[nhost]); + alltoall_function(want_s, new_want_r); + // now the want_r_data on the have_ranks are grouped according to the ranks + // that want those keys. + + return {new_want_r, want_s}; +} diff --git a/src/nrniv/hocmech.cpp b/src/nrniv/hocmech.cpp index 221921ca0b..e72027c07e 100644 --- a/src/nrniv/hocmech.cpp +++ b/src/nrniv/hocmech.cpp @@ -2,6 +2,7 @@ #undef check #include #include +#include "membfunc.h" #include "nrnoc2iv.h" #include "nrniv_mf.h" @@ -133,7 +134,7 @@ static void alloc_pnt(Prop* p) { p->ob = nrn_point_prop_->ob; // printf("p->ob comes from nrn_point_prop_ %s\n", hoc_object_name(p->ob)); } else { - p->dparam = (Datum*) hoc_Ecalloc(2, sizeof(Datum)); + nrn_prop_datum_alloc(p->_type, 2, p); if (last_created_pp_ob_) { p->ob = last_created_pp_ob_; // printf("p->ob comes from last_created %s\n", hoc_object_name(p->ob)); @@ -164,7 +165,7 @@ static void call(Symbol* s, Node* nd, Prop* p) { nrn_popsec(); } -static void initial(void* nt, Memb_list* ml, int type) { +static void initial(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { HocMech* hm = (HocMech*) memb_func[type].hoc_mech; int i, cnt = ml->nodecount; for (i = 0; i < cnt; ++i) { @@ -172,7 +173,7 @@ static void initial(void* nt, Memb_list* ml, int type) { } } -static void after_step(void* nt, Memb_list* ml, int type) { +static void after_step(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { HocMech* hm = (HocMech*) memb_func[type].hoc_mech; int i, cnt = ml->nodecount; for (i = 0; i < cnt; ++i) { @@ -187,23 +188,24 @@ static HocMech* common_register(const char** m, Symlist* slist, void(hm_alloc)(Prop*), int& type) { - Pvmi cur, jacob, stat, initialize; - cur = NULL; - jacob = NULL; - stat = NULL; - initialize = NULL; + nrn_cur_t cur{}; + nrn_init_t initialize{}; + nrn_jacob_t jacob{}; + nrn_state_t stat{}; HocMech* hm = new HocMech(); hm->slist = NULL; hm->mech = classsym; hm->initial = hoc_table_lookup("initial", slist); hm->after_step = hoc_table_lookup("after_step", slist); - if (hm->initial) - initialize = (Pvmi) initial; - if (hm->after_step) - stat = (Pvmi) after_step; + if (hm->initial) { + initialize = initial; + } + if (hm->after_step) { + stat = after_step; + } register_mech(m, hm_alloc, cur, jacob, stat, initialize, -1, 0); type = nrn_get_mechtype(m[1]); - hoc_register_cvode(type, NULL, NULL, NULL, NULL); + hoc_register_cvode(type, nullptr, nullptr, nullptr, nullptr); memb_func[type].hoc_mech = hm; return hm; } diff --git a/src/nrniv/impedanc.cpp b/src/nrniv/impedanc.cpp index 0206030191..9b2d7a12d9 100644 --- a/src/nrniv/impedanc.cpp +++ b/src/nrniv/impedanc.cpp @@ -2,6 +2,7 @@ #undef check #include "nrniv_mf.h" #include "nrnmpi.h" +#include "nrn_ansi.h" #include "nonlinz.h" #include #include @@ -9,8 +10,6 @@ #include "classreg.h" #include #include "membfunc.h" -extern void nrn_rhs(NrnThread*); -extern void nrn_lhs(NrnThread*); extern void setup_topology(); extern void recalc_diam(); @@ -300,22 +299,23 @@ void Imp::setmat1() { The calculated g is good til someone else changes something having to do with the matrix. */ + auto const sorted_token = nrn_ensure_model_data_are_sorted(); const NrnThread* _nt = nrn_threads; const Memb_list* mlc = _nt->tml->ml; assert(_nt->tml->index == CAP); for (int i = 0; i < nrn_nthread; ++i) { double cj = nrn_threads[i].cj; nrn_threads[i].cj = 0; - nrn_rhs(nrn_threads + i); // not useful except that many model description set g while - // computing i - nrn_lhs(nrn_threads + i); + // not useful except that many model description set g while computing i + nrn_rhs(sorted_token, nrn_threads[i]); + nrn_lhs(sorted_token, nrn_threads[i]); nrn_threads[i].cj = cj; } for (int i = 0; i < n; ++i) { NODERHS(_nt->_v_node[i]) = 0; } for (int i = 0; i < mlc->nodecount; ++i) { - NODERHS(mlc->nodelist[i]) = mlc->_data[i][0]; + NODERHS(mlc->nodelist[i]) = mlc->data(i, 0); } } diff --git a/src/nrniv/kschan.cpp b/src/nrniv/kschan.cpp index 4e3c13f127..b3ab59ddc1 100644 --- a/src/nrniv/kschan.cpp +++ b/src/nrniv/kschan.cpp @@ -1,7 +1,6 @@ #include <../../nrnconf.h> #include #include -#include #include #include "nrnoc2iv.h" #include "classreg.h" @@ -12,10 +11,6 @@ #include "nrniv_mf.h" #define NSingleIndex 0 -#if defined(__MWERKS__) && !defined(_MSC_VER) -#include -#define strdup _strdup -#endif using KSChanList = std::vector; static KSChanList* channels; @@ -54,9 +49,15 @@ static void chkobj(void* v) { } } -static void check_table_thread_(double* p, Datum* ppvar, Datum* thread, NrnThread* vnt, int type) { +static void check_table_thread_(Memb_list*, + std::size_t, + Datum*, + Datum*, + NrnThread* vnt, + int type, + neuron::model_sorted_token const&) { KSChan* c = (*channels)[type]; - c->check_table_thread((NrnThread*) vnt); + c->check_table_thread(vnt); } static void nrn_alloc(Prop* prop) { @@ -64,49 +65,28 @@ static void nrn_alloc(Prop* prop) { c->alloc(prop); } -static void nrn_init(NrnThread* nt, Memb_list* ml, int type) { +static void nrn_init(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { // printf("nrn_init\n"); KSChan* c = (*channels)[type]; - c->init(ml->nodecount, ml->nodelist, ml->_data, ml->pdata, nt); + c->init(nt, ml); } -static void nrn_cur(NrnThread* nt, Memb_list* ml, int type) { +static void nrn_cur(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { // printf("nrn_cur\n"); KSChan* c = (*channels)[type]; -#if CACHEVEC - if (use_cachevec) { - c->cur(ml->nodecount, ml->nodeindices, ml->_data, ml->pdata, nt); - } else -#endif /* CACHEVEC */ - { - c->cur(ml->nodecount, ml->nodelist, ml->_data, ml->pdata); - } + c->cur(nt, ml); } -static void nrn_jacob(NrnThread* nt, Memb_list* ml, int type) { +static void nrn_jacob(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { // printf("nrn_jacob\n"); KSChan* c = (*channels)[type]; -#if CACHEVEC - if (use_cachevec) { - c->jacob(ml->nodecount, ml->nodeindices, ml->_data, ml->pdata, nt); - } else -#endif /* CACHEVEC */ - { - c->jacob(ml->nodecount, ml->nodelist, ml->_data, ml->pdata); - } + c->jacob(nt, ml); } -static void nrn_state(NrnThread* nt, Memb_list* ml, int type) { +static void nrn_state(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { // printf("nrn_state\n"); KSChan* c = (*channels)[type]; -#if CACHEVEC - if (use_cachevec) { - c->state(ml->nodecount, ml->nodeindices, ml->nodelist, ml->_data, ml->pdata, nt); - } else -#endif /* CACHEVEC */ - { - c->state(ml->nodecount, ml->nodelist, ml->_data, ml->pdata, nt); - } + c->state(nt, ml); } static int ode_count(int type) { @@ -114,26 +94,33 @@ static int ode_count(int type) { KSChan* c = (*channels)[type]; return c->count(); } -static void -ode_map(int ieq, double** pv, double** pvdot, double* p, Datum* pd, double* atol, int type) { +static void ode_map(Prop* prop, + int ieq, + neuron::container::data_handle* pv, + neuron::container::data_handle* pvdot, + double* atol, + int type) { // printf("ode_map\n"); KSChan* c = (*channels)[type]; - c->map(ieq, pv, pvdot, p, pd, atol); + c->map(prop, ieq, pv, pvdot, atol); } -static void ode_spec(NrnThread*, Memb_list* ml, int type) { +static void ode_spec(neuron::model_sorted_token const& token, NrnThread*, Memb_list* ml, int type) { // printf("ode_spec\n"); KSChan* c = (*channels)[type]; - c->spec(ml->nodecount, ml->nodelist, ml->_data, ml->pdata); + c->spec(ml); } -static void ode_matsol(NrnThread* nt, Memb_list* ml, int type) { +static void ode_matsol(neuron::model_sorted_token const& token, + NrnThread* nt, + Memb_list* ml, + int type) { // printf("ode_matsol\n"); KSChan* c = (*channels)[type]; - c->matsol(ml->nodecount, ml->nodelist, ml->_data, ml->pdata, nt); + c->matsol(nt, ml); } static void singchan(NrnThread* nt, Memb_list* ml, int type) { // printf("singchan_\n"); KSChan* c = (*channels)[type]; - c->cv_sc_update(ml->nodecount, ml->nodelist, ml->_data, ml->pdata, nt); + c->cv_sc_update(nt, ml); } static void* hoc_create_pnt(Object* ho) { return create_point_process(ho->ctemplate->is_point_, ho); @@ -350,10 +337,7 @@ static Object** ks_add_ksstate(void* v) { static Object** ks_add_transition(void* v) { KSChan* ks = (KSChan*) v; - const char* lig = NULL; - if (ifarg(3)) { - lig = gargstr(3); - } + // Does not deal here with ligands. int src, target; if (hoc_is_double_arg(1)) { src = (int) chkarg(1, ks->nhhstate_, ks->nstate_ - 1); @@ -366,7 +350,7 @@ static Object** ks_add_transition(void* v) { check_objtype(obj, ksstate_sym); target = ((KSState*) obj->u.this_pointer)->index_; } - KSTransition* kst = ks->add_transition(src, target, lig); + KSTransition* kst = ks->add_transition(src, target); return temp_objvar("KSTrans", kst, &kst->obj_); } @@ -383,7 +367,11 @@ static Object** ks_trans(void* v) { obj = *hoc_objgetarg(2); check_objtype(obj, ksstate_sym); target = ((KSState*) obj->u.this_pointer)->index_; - kst = ks->trans_ + ks->trans_index(src, target); + int index = ks->trans_index(src, target); + if (index < 0) { + hoc_execerr_ext("no transition between state index %d and %d", src, target); + } + kst = ks->trans_ + index; } return temp_objvar("KSTrans", kst, &kst->obj_); } @@ -406,7 +394,7 @@ static const char** ks_name(void* v) { ks->setname(gargstr(1)); } char** ps = hoc_temp_charptr(); - *ps = (char*) ks->name_.string(); + *ps = (char*) ks->name_.c_str(); return (const char**) ps; } @@ -416,7 +404,7 @@ static const char** ks_ion(void* v) { ks->setion(gargstr(1)); } char** ps = hoc_temp_charptr(); - *ps = (char*) ks->ion_.string(); + *ps = (char*) ks->ion_.c_str(); return (const char**) ps; } @@ -646,9 +634,9 @@ static double ks_pr(void* v) { Printf("%s type properties\n", hoc_object_name(ks->obj_)); Printf("name=%s is_point_=%s ion_=%s cond_model_=%d\n", - ks->name_.string(), + ks->name_.c_str(), (ks->is_point() ? "true" : "false"), - ks->ion_.string(), + ks->ion_.c_str(), ks->cond_model_); Printf(" ngate=%d nstate=%d nhhstate=%d nligand=%d ntrans=%d ivkstrans=%d iligtrans=%d\n", ks->ngate_, @@ -826,10 +814,8 @@ static const char* m_kschan[9]; // gmax=0 g=1 i=1 state names will be modltype 2, there are no pointer variables void KSChan::add_channel(const char** m) { - KSChan* c = (KSChan*) this; Symlist* sav = hoc_symlist; - hoc_symlist = hoc_built_in_symlist; - hoc_built_in_symlist = 0; + hoc_symlist = std::exchange(hoc_built_in_symlist, nullptr); if (is_point()) { pointtype_ = point_register_mech(m, nrn_alloc, @@ -856,7 +842,8 @@ void KSChan::add_channel(const char** m) { while (channels->size() < mechtype_) { channels->push_back(nullptr); } - channels->push_back(c); + channels->push_back(this); + neuron::model().add_mechanism(mechtype_, m[1]); // no floating point fields } KSChan::KSChan(Object* obj, bool is_p) { @@ -884,7 +871,7 @@ KSChan::KSChan(Object* obj, bool is_p) { ligands_ = NULL; mechsym_ = NULL; rlsym_ = NULL; - char buf[50]; + char buf[100]; Sprintf(buf, "Chan%d", obj_->index); name_ = buf; ion_ = "NonSpecific"; @@ -894,31 +881,12 @@ KSChan::KSChan(Object* obj, bool is_p) { gmax_deflt_ = 0.; erev_deflt_ = 0.; soffset_ = 4; // gmax, e, g, i before the first state in p array - build(); -} - -KSChan::~KSChan() {} - -void KSChan::build() { - if (mechsym_) { - return; - } - int i; - char buf[100]; - if (strcmp(ion_.string(), "NonSpecific") != 0) { - ion_reg(ion_.string(), -10000.); - Sprintf(buf, "%s_ion", ion_.string()); - ion_sym_ = looksym(buf); - if (!ion_sym_) { - hoc_execerror(buf, " is not an ion mechanism"); - } - } - const char* suffix = name_.string(); + const char* suffix = name_.c_str(); char unsuffix[100]; if (is_point()) { unsuffix[0] = '\0'; } else { - Sprintf(unsuffix, "_%s", name_.string()); + Sprintf(unsuffix, "_%s", name_.c_str()); } if (looksym(suffix)) { hoc_execerror(suffix, "already exists"); @@ -942,7 +910,9 @@ void KSChan::build() { m_kschan[7 + aoff] = 0; soffset_ = 3 + aoff; // first state points here in p array add_channel(m_kschan); - for (i = 0; i < 9; ++i) + must_allow_size_update(is_single_, ion_sym_ != nullptr, nligand_, nstate_); + update_size(); + for (int i = 0; i < 9; ++i) if (m_kschan[i]) { free((void*) m_kschan[i]); } @@ -954,21 +924,21 @@ void KSChan::build() { } setcond(); sname_install(); - // printf("%s allowed in insert statement\n", name_.string()); + // printf("%s allowed in insert statement\n", name_.c_str()); } void KSChan::setname(const char* s) { // printf("KSChan::setname\n"); int i; - if (strcmp(s, name_.string()) == 0) { + if (strcmp(s, name_.c_str()) == 0) { return; } name_ = s; if (mechsym_) { char old_suffix[100]; i = 0; - while (strcmp(mechsym_->name, name_.string()) != 0 && looksym(name_.string())) { - Printf("KSChan::setname %s already in use\n", name_.string()); + while (strcmp(mechsym_->name, name_.c_str()) != 0 && looksym(name_.c_str())) { + Printf("KSChan::setname %s already in use\n", name_.c_str()); Sprintf(old_suffix, "%s%d", s, i); name_ = old_suffix; ++i; @@ -977,7 +947,7 @@ void KSChan::setname(const char* s) { // return; } Sprintf(old_suffix, "_%s", mechsym_->name); - const char* suffix = name_.string(); + const char* suffix = name_.c_str(); free(mechsym_->name); mechsym_->name = strdup(suffix); if (is_point()) { @@ -1002,20 +972,10 @@ void KSChan::setname(const char* s) { sp->name = s1; } } - // printf("%s renamed to %s\n", old_suffix+1, name_.string()); + // printf("%s renamed to %s\n", old_suffix+1, name_.c_str()); } } -int KSChan::state(const char* s) { - int i; - for (i = 0; i < nstate_; ++i) { - if (strcmp(state_[i].string(), s) == 0) { - return i; - } - } - return -1; -} - void KSChan::power(KSGateComplex* gc, int p) { if (is_single() && p != 1) { set_single(false); @@ -1035,6 +995,9 @@ void KSChan::set_single(bool b, bool update) { "power", 0); } + // check before changing structure + must_allow_size_update(b, ion_sym_ != nullptr, nligand_, nstate_); + if (is_single()) { memb_func[mechtype_].singchan_ = NULL; delete_schan_node_data(); @@ -1056,17 +1019,6 @@ const char* KSChan::state(int i) { return state_[i].string(); } -int KSChan::trans_index(const char* s, const char* t) { - int i; - for (i = 0; i < ntrans_; ++i) { - if (strcmp(state_[trans_[i].src_].string(), s) == 0 && - strcmp(state_[trans_[i].target_].string(), t) == 0) { - return i; - } - } - return -1; -} - int KSChan::trans_index(int s, int t) { int i; for (i = 0; i < ntrans_; ++i) { @@ -1089,8 +1041,36 @@ int KSChan::gate_index(int is) { void KSChan::update_prop() { // prop.param is [Nsingle], gmax, [e], g, i, states - // prop.dparam for density is [4ion], [4ligands] - // prop.dparam for point is area, pnt, [singledata], [4ion], [4ligands] + // prop.dparam for density is [5ion], [2ligands] + // prop.dparam for point is area, pnt, [singledata], [5ion], [2ligands] + + // before doing anything destructive, see if update_size will succeed. + auto new_psize = 3; // prop->param: gmax, g, i + auto new_dsize = 0; // prop->dparam: empty + auto new_ppoff = 0; + auto new_soffset = 3; + auto new_gmaxoffset = 0; + if (is_single()) { + new_psize += 1; // Nsingle exists + new_dsize += 1; // KSSingleNodeData* exists + new_gmaxoffset = 1; + new_ppoff += 1; + new_soffset += 1; + } + if (is_point()) { + new_dsize += 2; // area, Point_process* exists + new_ppoff += 2; + } + if (ion_sym_ == NULL) { + new_psize += 1; // e exists + new_soffset += 1; + } else { + new_dsize += 5; // ion current + } + new_dsize += 2 * nligand_; + new_psize += nstate_; + must_allow_size_update(is_single_, ion_sym_ != nullptr, nligand_, nstate_); + int i; Symbol* searchsym = (is_point() ? mechsym_ : NULL); @@ -1103,31 +1083,14 @@ void KSChan::update_prop() { int old_soffset = soffset_; int old_svarn = rlsym_->s_varn; - // sizes and offsets - psize_ = 3; // prop->param: gmax, g, i - dsize_ = 0; // prop->dparam: empty - ppoff_ = 0; - soffset_ = 3; - gmaxoffset_ = 0; - if (is_single()) { - psize_ += 1; // Nsingle exists - dsize_ += 1; // KSSingleNodeData* exists - gmaxoffset_ = 1; - ppoff_ += 1; - soffset_ += 1; - } - if (is_point()) { - dsize_ += 2; // area, Point_process* exists - ppoff_ += 2; - } - if (ion_sym_ == NULL) { - psize_ += 1; // e exists - soffset_ += 1; - } else { - dsize_ += 4; // ion current - } - dsize_ += 4 * nligand_; - psize_ += nstate_; + // update sizes and offsets + psize_ = new_psize; + dsize_ = new_dsize; + ppoff_ = new_ppoff; + soffset_ = new_soffset; + gmaxoffset_ = new_gmaxoffset; + + update_size(); // range variable names associated with prop->param rlsym_->s_varn = psize_; // problem here @@ -1156,35 +1119,49 @@ void KSChan::update_prop() { ppsym[gmaxoffset_ + 1] = esym; esym->u.rng.index = gmaxoffset_ + 1; } - int j; - for (j = soffset_, i = old_soffset; i < old_svarn; ++i, ++j) { - ppsym[j] = rlsym_->u.ppsym[i]; + // The state list may have changed (e.g. removal), so remove entirely + // and reconstruct from kschan information. + for (int i = old_soffset; i < old_svarn; ++i) { + freesym(rlsym_->u.ppsym[i], searchsym); + } + for (int i = 0; i < nstate_; ++i) { + std::string name{state(i)}; + if (!is_point()) { // only called from set_single so never reached. + name += "_"; + name += name_.c_str(); + } + int j = i + soffset_; + + ppsym[j] = installsym(name.c_str(), RANGEVAR, searchsym); + ppsym[j]->subtype = STATE; + ppsym[j]->u.rng.type = rlsym_->subtype; ppsym[j]->u.rng.index = j; } free(rlsym_->u.ppsym); rlsym_->u.ppsym = ppsym; setcond(); - state_consist(gmaxoffset_ - old_gmaxoffset); ion_consist(); } void KSChan::setion(const char* s) { // printf("KSChan::setion\n"); int i; - if (strcmp(ion_.string(), s) == 0) { + if (strcmp(ion_.c_str(), s) == 0) { return; } + // before doing anything destructive, check if update_size is going to succeed below + std::string new_ion{strlen(s) == 0 ? "NonSpecific" : s}; + must_allow_size_update(is_single_, new_ion != "NonSpecific", nligand_, nstate_); + + // now we know update_size will succeed, we can start modifying member data Symbol* searchsym = (is_point() ? mechsym_ : NULL); - if (strlen(s) == 0) { - ion_ = "NonSpecific"; - } else { - ion_ = s; - } + ion_ = new_ion; char buf[100]; int pdoff = ppoff_; int io = gmaxoffset_; - if (strcmp(ion_.string(), "NonSpecific") == 0) { // non-specific - if (ion_sym_) { // switch from useion to non-specific + if (new_ion == "NonSpecific") { + if (ion_sym_) { + // switch from useion to non-specific printf("switch from useion to non-specific\n"); rlsym_->s_varn += 1; Symbol** ppsym = newppsym(rlsym_->s_varn); @@ -1211,9 +1188,8 @@ void KSChan::setion(const char* s) { } free(rlsym_->u.ppsym); rlsym_->u.ppsym = ppsym; - soffset_ += 1; + ++soffset_; setcond(); - state_consist(); ion_consist(); } } else { // want useion @@ -1228,9 +1204,8 @@ void KSChan::setion(const char* s) { if (ion_sym_) { // there already is an ion if (strcmp(ion_sym_->name, buf) != 0) { // is it different // printf(" mechanism %s now uses %s instead of %s\n", - // name_.string(), sym->name, ion_sym_->name); + // name_.c_str(), sym->name, ion_sym_->name); ion_sym_ = sym; - state_consist(); ion_consist(); } // if same do nothing @@ -1251,10 +1226,10 @@ void KSChan::setion(const char* s) { rlsym_->u.ppsym = ppsym; --soffset_; setcond(); - state_consist(); ion_consist(); } } + update_size(); for (i = iligtrans_; i < ntrans_; ++i) { trans_[i].lig2pd(pdoff); } @@ -1359,26 +1334,6 @@ void KSChan::setcond() { } } -void KSChan::setligand(int i, const char* lig) { - char buf[100]; - // printf("KSChan::setligand %d %s\n", i, lig); - Sprintf(buf, "%s_ion", lig); - Symbol* s = looksym(buf); - if (!s) { - ion_reg(lig, 0); - s = looksym(buf); - } - if (s->type != MECHANISM || - memb_func[s->subtype].alloc != memb_func[looksym("na_ion")->subtype].alloc) { - hoc_execerror(buf, "is already in use and is not an ion."); - } - ligands_[i] = s; - if (mechsym_) { - state_consist(); - ion_consist(); - } -} - void KSChan::settype(KSTransition* t, int type, const char* lig) { int i, j; // if no ligands involved then it is just a type change. @@ -1442,7 +1397,6 @@ void KSChan::settype(KSTransition* t, int type, const char* lig) { tt.f0 = NULL; tt.f1 = NULL; check_struct(); - state_consist(); ion_consist(); setupmat(); return; @@ -1551,13 +1505,13 @@ hoc_object_name(trans_[i].obj_)); #endif } check_struct(); - state_consist(); ion_consist(); setupmat(); } KSState* KSChan::add_hhstate(const char* name) { int i; + must_allow_size_update(false, ion_sym_ != nullptr, nligand_, nstate_ + 1); usetable(false); // new state, transition, gate, and f int is = nhhstate_; @@ -1578,12 +1532,12 @@ KSState* KSChan::add_hhstate(const char* name) { set_single(false); check_struct(); sname_install(); - state_consist(); setupmat(); return state_ + is; } KSState* KSChan::add_ksstate(int ig, const char* name) { + must_allow_size_update(false, ion_sym_ != nullptr, nligand_, nstate_ + 1); // states must be added so that the gate states are in sequence int i, is; usetable(false); @@ -1615,13 +1569,13 @@ KSState* KSChan::add_ksstate(int ig, const char* name) { check_struct(); sname_install(); set_single(false); - state_consist(); setupmat(); return state_ + is; } void KSChan::remove_state(int is) { int i; + must_allow_size_update(false, ion_sym_ != nullptr, nligand_, nstate_ - 1); usetable(false); if (is < nhhstate_) { state_remove(is); @@ -1679,14 +1633,14 @@ void KSChan::remove_state(int is) { set_single(false); check_struct(); sname_install(); - state_consist(); setupmat(); } -KSTransition* KSChan::add_transition(int src, int target, const char* ligand) { +KSTransition* KSChan::add_transition(int src, int target) { + // does not deal here with ligands + must_allow_size_update(false, ion_sym_ != nullptr, nligand_, nstate_); usetable(false); - assert(ligand == NULL); - int it = (ligand ? ntrans_ : iligtrans_); + int it = iligtrans_; trans_insert(it, src, target); trans_[it].ligand_index_ = -1; trans_[it].type_ = 0; @@ -1697,6 +1651,8 @@ KSTransition* KSChan::add_transition(int src, int target, const char* ligand) { } void KSChan::remove_transition(int it) { + // might reduce nstate, might reduce nligand. + must_allow_size_update(false, ion_sym_ != nullptr, nligand_, nstate_ - 1); usetable(false); assert(it >= ivkstrans_); set_single(false); @@ -1758,6 +1714,9 @@ void KSChan::check_struct() { } KSState* KSChan::state_insert(int i, const char* n, double d) { + // before mutating any state, check if the call to update_size below will succeed + auto const new_nstate = nstate_ + 1; + must_allow_size_update(is_single_, ion_sym_ != nullptr, nligand_, new_nstate); int j; usetable(false); if (nstate_ >= state_size_) { @@ -1783,6 +1742,8 @@ KSState* KSChan::state_insert(int i, const char* n, double d) { ++nksstate_; } ++nstate_; + assert(new_nstate == nstate_); + update_size(); for (j = 0; j < nstate_; ++j) { state_[j].index_ = j; if (state_[j].obj_) { @@ -1793,6 +1754,9 @@ KSState* KSChan::state_insert(int i, const char* n, double d) { } void KSChan::state_remove(int i) { + // before mutating any state, check if the call to update_size below will succeed + auto const new_nstate = nstate_ - 1; + must_allow_size_update(is_single_, ion_sym_ != nullptr, nligand_, new_nstate); int j; usetable(false); unref(state_[i].obj_); @@ -1808,6 +1772,8 @@ void KSChan::state_remove(int i) { --nksstate_; } --nstate_; + assert(new_nstate == nstate_); + update_size(); state_[nstate_].obj_ = NULL; for (j = 0; j < nstate_; ++j) { state_[j].index_ = j; @@ -1931,13 +1897,11 @@ void KSChan::trans_remove(int i) { } void KSChan::setstructure(Vect* vec) { + // before mutating any state, check if the call to update_size below will succeed + int const new_nstate = vec->elem(2); + int const new_nligand = vec->elem(5); + must_allow_size_update(is_single_, ion_sym_ != nullptr, new_nligand, new_nstate); int i, j, ii, idx, ns; -// printf("setstructure called for KSChan %p %s\n", this, name_.string()); -#if 0 -for (i=0; i < vec->size(); ++i) { - printf("%d %g\n", i, vec->elem(i)); -} -#endif usetable(false); int nstate_old = nstate_; KSState* state_old = state_; @@ -1949,6 +1913,8 @@ for (i=0; i < vec->size(); ++i) { setcond(); ngate_ = (int) vec->elem(j++); nstate_ = (int) vec->elem(j++); + assert(new_nstate == nstate_); + update_size(); nhhstate_ = (int) vec->elem(j++); nksstate_ = nstate_ - nhhstate_; ivkstrans_ = nhhstate_; @@ -2021,7 +1987,6 @@ for (i=0; i < vec->size(); ++i) { if (mechsym_) { set_single(false, false); sname_install(); - state_consist(); setupmat(); } } @@ -2197,20 +2162,26 @@ void KSChan::fillmat(double v, Datum* pd) { // spPrint(mat_, 0, 1, 0); } -void KSChan::mat_dt(double dt, double* p) { +void KSChan::mat_dt(double dt, Memb_list* ml, std::size_t instance, std::size_t offset) { // y' = m*y this part add the dt for the form ynew/dt - yold/dt =m*ynew // the matrix ends up as (m-1/dt)ynew = -1/dt*yold int i; double dt1 = -1. / dt; - for (i = 0; i < nksstate_; ++i) { + for (int i = 0; i < nksstate_; ++i) { *(diag_[i]) += dt1; - p[i] *= dt1; + ml->data(instance, offset + i) *= dt1; } } -void KSChan::solvemat(double* s) { - int e; - e = spFactor(mat_); +void KSChan::solvemat(Memb_list* ml, std::size_t instance, std::size_t offset) { + // spSolve seems to require that the parameters are contiguous, which + // they're not anymore in the real NEURON data structure + std::vector s(nksstate_ + 1); // +1 so the pointer arithmetic to account for 1-based + // indexing is valid + for (auto j = 0; j < nksstate_; ++j) { + s[j + 1] = ml->data(instance, offset + j); + } + auto const e = spFactor(mat_); if (e != spOKAY) { switch (e) { case spZERO_DIAG: @@ -2221,30 +2192,118 @@ void KSChan::solvemat(double* s) { hoc_execerror("spFactor error:", "Singular"); } } - spSolve(mat_, s - 1, s - 1); -} - -void KSChan::mulmat(double* s, double* ds) { - spMultiply(mat_, ds - 1, s - 1); + spSolve(mat_, s.data(), s.data()); + // Propgate the solution back to the mechanism data + for (auto j = 0; j < nksstate_; ++j) { + ml->data(instance, offset + j) = s[j + 1]; + } +} + +void KSChan::mulmat(Memb_list* ml, + std::size_t instance, + std::size_t offset_s, + std::size_t offset_ds) { + std::vector s, ds; + s.resize(nksstate_ + 1); // +1 so the pointer arithmetic to account for 1-based indexing is + // valid + ds.resize(nksstate_ + 1); + for (auto j = 0; j < nksstate_; ++j) { + s[j + 1] = ml->data(instance, offset_s + j); + ds[j + 1] = ml->data(instance, offset_ds + j); + } + spMultiply(mat_, ds.data(), s.data()); + // Propagate the results + for (auto j = 0; j < nksstate_; ++j) { + ml->data(instance, offset_s + j) = s[j + 1]; + ml->data(instance, offset_ds + j) = ds[j + 1]; + } +} + +/** + * @brief Error if instances exist and number of variables will change. + * + * This is intended to allow methods that change the number of variables + * to check if success is possible before they make structure changes prior + * to calling update_size. + * Size changes are disallowed if any of following changes while + * instances exist: is_single, ion_sym_ NULL or not, nligand_, nstate_. + */ +void KSChan::must_allow_size_update(bool single, bool ion, int nligand, int nstate) const { + auto& mech_data = neuron::model().mechanism_data(mechtype_); + if (mech_data.empty()) { // size changes allowed since no existing instances + return; + } + std::string s{""}; + if (single != is_single_) { + s = s + "single channel will change: "; + } + if (ion != (ion_sym_ != nullptr)) { + s = s + "switched beween ion and nonspecific: "; + } + if (nligand != nligand_) { + s = s + "number of ligands will change: "; + } + if (nstate != nstate_) { + s = s + "number of states will change: "; + } + if (s == "") { + return; + } + throw std::runtime_error( + "KSChan:: " + s + "Cannot change the number of mechanism variables while " + + std::to_string(neuron::model().mechanism_data(mechtype_).size()) + " instances are active"); +} + +/** @brief Propagate changes to the number of param and dparam. + */ +void KSChan::update_size() { + auto& mech_data = neuron::model().mechanism_data(mechtype_); + std::size_t const new_param_size = soffset_ + 2 * nstate_; + std::size_t const new_dparam_size = (is_point_ ? 2 : 0) + (is_single_ ? 1 : 0) + + (ion_sym_ != nullptr ? 5 : 0) + 2 * nligand_; + auto const old_param_size = + mech_data.get_tag().num_variables(); + auto const old_dparam_size = nrn_mechanism_prop_datum_count(mechtype_); + if (new_param_size == old_param_size && new_dparam_size == old_dparam_size) { + // no change + return; + } + if (mech_data.size() > 0) { + // Should not happen since must_allow_size_update should have been + // called earlier. + throw std::runtime_error( + "KSChan::update_size() internal error: cannot change the number " + "of floating point fields from " + + std::to_string(old_param_size) + " to " + std::to_string(new_param_size) + + " or the number of Datum items from " + std::to_string(old_dparam_size) + " to " + + std::to_string(new_dparam_size) + " while " + std::to_string(mech_data.size()) + + " instances are active"); + } + std::vector new_param_info; + new_param_info.resize(new_param_size, {"kschan_field", 1}); + std::string mech_name{mech_data.name()}; + neuron::model().delete_mechanism(mechtype_); + nrn_delete_mechanism_prop_datum(mechtype_); + neuron::model().add_mechanism(mechtype_, std::move(mech_name), std::move(new_param_info)); } void KSChan::alloc(Prop* prop) { // printf("KSChan::alloc nstate_=%d nligand_=%d\n", nstate_, nligand_); - // printf("KSChan::alloc %s param=%p\n", name_.string(), prop->param); + // printf("KSChan::alloc %s param=%p\n", name_.c_str(), prop->param); int j; - prop->param_size = soffset_ + 2 * nstate_; + assert(prop->param_size() == prop->param_num_vars()); // no array vars + assert(prop->param_num_vars() == soffset_ + 2 * nstate_); if (is_point() && nrn_point_prop_) { - assert(nrn_point_prop_->param_size == prop->param_size); - prop->param = nrn_point_prop_->param; + assert(nrn_point_prop_->param_size() == prop->param_size()); + // prop->param = nrn_point_prop_->param; prop->dparam = nrn_point_prop_->dparam; } else { - prop->param = nrn_prop_data_alloc(prop->_type, prop->param_size, prop); - prop->param[gmaxoffset_] = gmax_deflt_; + prop->param(gmaxoffset_) = gmax_deflt_; if (is_point()) { - prop->param[NSingleIndex] = 1.; + prop->param(NSingleIndex) = 1.; } if (!ion_sym_) { - prop->param[1 + gmaxoffset_] = erev_deflt_; + prop->param(1 + gmaxoffset_) = erev_deflt_; } } int ppsize = ppoff_; @@ -2274,18 +2333,18 @@ void KSChan::alloc(Prop* prop) { } else { // ghk nrn_promote(prop_ion, 1, 0); } - pp[ppoff_ + 0] = prop_ion->param + 0; // ena - pp[ppoff_ + 1] = prop_ion->param + 3; // ina - pp[ppoff_ + 2] = prop_ion->param + 4; // dinadv - pp[ppoff_ + 3] = prop_ion->param + 1; // nai - pp[ppoff_ + 4] = prop_ion->param + 2; // nao + pp[ppoff_ + 0] = prop_ion->param_handle(0); // ena + pp[ppoff_ + 1] = prop_ion->param_handle(3); // ina + pp[ppoff_ + 2] = prop_ion->param_handle(4); // dinadv + pp[ppoff_ + 3] = prop_ion->param_handle(1); // nai + pp[ppoff_ + 4] = prop_ion->param_handle(2); // nao poff += 5; } for (j = 0; j < nligand_; ++j) { Prop* pion = need_memb(ligands_[j]); nrn_promote(pion, 1, 0); - pp[poff + 2 * j] = pion->param + 2; // nao - pp[poff + 2 * j + 1] = pion->param + 1; // nai + pp[poff + 2 * j] = pion->param_handle(2); // nao + pp[poff + 2 * j + 1] = pion->param_handle(1); // nai } if (single_ && !prop->dparam[2].get()) { single_->alloc(prop, soffset_); @@ -2321,6 +2380,18 @@ Prop* KSChan::needion(Symbol* s, Node* nd, Prop* pm) { return pion; } +/** Almost obsolete: No longer allow KSChan structure changes when instances + * exist. However only a change to is_single_, ion_sym != NULL, or + * nligand affects the dparam size and that circumstance raises an error if + * instances exist prior to a call to ion_consist. So if ion_consist is + * called, either there are no instances, or there were no changes + * to the above three indicators and dparam size has not changed. However, + * even though ion_sym != NULL is the same, that does not mean that the + * specific ion used has not changed. And similarly for nligand. + * Thus, unless the must_allow_size_update is exended to include a change + * in ions used, ion_consist must continue to update the ion usage. But it no + * longer needs to realloc p->dparam. + */ void KSChan::ion_consist() { // printf("KSChan::ion_consist\n"); int i, j; @@ -2349,7 +2420,7 @@ void KSChan::ion_consist() { if (!p) { continue; } - p->dparam = (Datum*) erealloc(p->dparam, ppsize * sizeof(Datum)); + if (is_point() && is_single() && !single_) { // Leave nullptr in KSSingleNodeData slot. p->dparam[2] = nullptr; @@ -2364,11 +2435,11 @@ void KSChan::ion_consist() { nrn_promote(pion, 1, 0); } Datum* pp = p->dparam; - pp[ppoff_ + 0] = pion->param + 0; // ena - pp[ppoff_ + 1] = pion->param + 3; // ina - pp[ppoff_ + 2] = pion->param + 4; // dinadv - pp[ppoff_ + 3] = pion->param + 1; // nai - pp[ppoff_ + 4] = pion->param + 2; // nao + pp[ppoff_ + 0] = pion->param_handle(0); // ena + pp[ppoff_ + 1] = pion->param_handle(3); // ina + pp[ppoff_ + 2] = pion->param_handle(4); // dinadv + pp[ppoff_ + 3] = pion->param_handle(1); // nai + pp[ppoff_ + 4] = pion->param_handle(2); // nao } for (j = 0; j < nligand_; ++j) { ligand_consist(j, poff, p, nd); @@ -2381,50 +2452,8 @@ void KSChan::ligand_consist(int j, int poff, Prop* p, Node* nd) { Prop* pion; pion = needion(ligands_[j], nd, p); nrn_promote(pion, 1, 0); - p->dparam[poff + 2 * j] = pion->param + 2; // nao - p->dparam[poff + 2 * j + 1] = pion->param + 1; // nai -} - -void KSChan::state_consist(int shift) { // shift when Nsingle winks in and out of existence - // printf("KSChan::state_consist\n"); - int i, j, ns; - Node* nd; - hoc_Item* qsec; - int mtype = rlsym_->subtype; - ns = soffset_ + 2 * nstate_; - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); - for (i = 0; i < sec->nnode; ++i) { - nd = sec->pnode[i]; - Prop* p; - for (p = nd->prop; p; p = p->next) { - if (p->_type == mtype) { - if (p->param_size != ns) { - v_structure_change = 1; - double* oldp = p->param; - p->param = (double*) erealloc(oldp, ns * sizeof(double)); - if (oldp != p->param || shift != 0) { - // printf("KSChan::state_consist realloc changed location\n"); - notify_freed_val_array(oldp, p->param_size); - } - p->param_size = ns; - if (shift == 1) { - for (j = ns - 1; j > 0; --j) { - p->param[j] = p->param[j - 1]; - } - p->param[0] = 1; - } else if (shift == -1) { - for (j = 1; j < ns; ++j) { - p->param[j - 1] = p->param[j]; - } - } - } - break; - } - } - } - } + p->dparam[poff + 2 * j] = pion->param_handle(2); // nao + p->dparam[poff + 2 * j + 1] = pion->param_handle(1); // nai } void KSChan::delete_schan_node_data() { @@ -2452,301 +2481,277 @@ void KSChan::alloc_schan_node_data() { } } -void KSChan::init(int n, Node** nd, double** pp, Datum** ppd, NrnThread* nt) { - int i, j; - if (nstate_) - for (i = 0; i < n; ++i) { +void KSChan::init(NrnThread* nt, Memb_list* ml) { + int n = ml->nodecount; + Node** nd = ml->nodelist; + Datum** ppd = ml->pdata; + if (nstate_) { + for (int i = 0; i < n; ++i) { double v = NODEV(nd[i]); - double* s = pp[i] + soffset_; - for (j = 0; j < nstate_; ++j) { - s[j] = 0; + auto offset = soffset_; + for (int j = 0; j < nstate_; ++j) { + ml->data(i, offset + j) = 0; } - for (j = 0; j < ngate_; ++j) { - s[gc_[j].sindex_] = 1; + for (int j = 0; j < ngate_; ++j) { + ml->data(i, offset + gc_[j].sindex_) = 1; } - for (j = 0; j < nhhstate_; ++j) { - s[j] = trans_[j].inf(v); + for (int j = 0; j < nhhstate_; ++j) { + ml->data(i, offset + j) = trans_[j].inf(v); } if (nksstate_) { - s += nhhstate_; + offset += nhhstate_; fillmat(v, ppd[i]); - mat_dt(1e9, s); - solvemat(s); + mat_dt(1e9, ml, i, offset); + solvemat(ml, i, offset); } if (is_single()) { auto* snd = ppd[i][2].get(); - snd->nsingle_ = int(pp[i][NSingleIndex] + .5); - pp[i][NSingleIndex] = double(snd->nsingle_); + snd->nsingle_ = int(ml->data(i, NSingleIndex) + .5); + ml->data(i, NSingleIndex) = double(snd->nsingle_); if (snd->nsingle_ > 0) { // replace population fraction with integers. - single_->init(v, s, snd, nt); - } - } - // printf("KSChan::init\n"); - // s = pp[i] + soffset_; - // for (j=0; j < nstate_; ++j) { - // printf("%d %g\n", j, s[j]); - //} - } -} - -void KSChan::state(int n, Node** nd, double** pp, Datum** ppd, NrnThread* nt) { - int i, j; - double* s; - if (nstate_) { - for (i = 0; i < n; ++i) { - if (is_single() && pp[i][NSingleIndex] > .999) { - single_->state(nd[i], pp[i], ppd[i], nt); - continue; - } - double v = NODEV(nd[i]); - s = pp[i] + soffset_; - if (usetable_) { - double inf, tau; - int k; - double x, y; - x = (v - vmin_) * dvinv_; - y = floor(x); - k = int(y); - x -= y; - if (k < 0) { - for (j = 0; j < nhhstate_; ++j) { - trans_[j].inftau_hh_table(0, inf, tau); - s[j] += (inf - s[j]) * tau; - } - } else if (k >= hh_tab_size_) { - for (j = 0; j < nhhstate_; ++j) { - trans_[j].inftau_hh_table(hh_tab_size_ - 1, inf, tau); - s[j] += (inf - s[j]) * tau; - } - } else { - for (j = 0; j < nhhstate_; ++j) { - trans_[j].inftau_hh_table(k, x, inf, tau); - s[j] += (inf - s[j]) * tau; - } + single_->init(v, snd, nt, ml, i, offset); } - } else { - for (j = 0; j < nhhstate_; ++j) { - double inf, tau; - trans_[j].inftau(v, inf, tau); - tau = 1. - KSChanFunction::Exp(-nt->_dt / tau); - s[j] += (inf - s[j]) * tau; - } - } - if (nksstate_) { - s += nhhstate_; - fillmat(v, ppd[i]); - mat_dt(nt->_dt, s); - solvemat(s); } } } } -#if CACHEVEC -void KSChan::state(int n, int* ni, Node** nd, double** pp, Datum** ppd, NrnThread* _nt) { - int i, j; - double* s; +void KSChan::state(NrnThread* _nt, Memb_list* ml) { + int n = ml->nodecount; + int* ni = ml->nodeindices; + Node** nd = ml->nodelist; + Datum** ppd = ml->pdata; + auto* const vec_v = _nt->node_voltage_storage(); if (nstate_) { - for (i = 0; i < n; ++i) { - if (is_single() && pp[i][NSingleIndex] > .999) { - single_->state(nd[i], pp[i], ppd[i], _nt); + for (int i = 0; i < n; ++i) { + if (is_single() && ml->data(i, NSingleIndex) > .999) { + single_->state(nd[i], ppd[i], _nt); continue; } - double v = VEC_V(ni[i]); - s = pp[i] + soffset_; + double v = vec_v[ni[i]]; + auto offset = soffset_; if (usetable_) { double inf, tau; - int k; - double x, y; - x = (v - vmin_) * dvinv_; - y = floor(x); - k = int(y); + auto x = (v - vmin_) * dvinv_; + auto const y = floor(x); + auto const k = static_cast(y); x -= y; if (k < 0) { - for (j = 0; j < nhhstate_; ++j) { + for (int j = 0; j < nhhstate_; ++j) { trans_[j].inftau_hh_table(0, inf, tau); - s[j] += (inf - s[j]) * tau; + ml->data(i, offset + j) += (inf - ml->data(i, offset + j)) * tau; } } else if (k >= hh_tab_size_) { - for (j = 0; j < nhhstate_; ++j) { + for (int j = 0; j < nhhstate_; ++j) { trans_[j].inftau_hh_table(hh_tab_size_ - 1, inf, tau); - s[j] += (inf - s[j]) * tau; + ml->data(i, offset + j) += (inf - ml->data(i, offset + j)) * tau; } } else { - for (j = 0; j < nhhstate_; ++j) { + for (int j = 0; j < nhhstate_; ++j) { trans_[j].inftau_hh_table(k, x, inf, tau); - s[j] += (inf - s[j]) * tau; + ml->data(i, offset + j) += (inf - ml->data(i, offset + j)) * tau; } } } else { - for (j = 0; j < nhhstate_; ++j) { + for (int j = 0; j < nhhstate_; ++j) { double inf, tau; trans_[j].inftau(v, inf, tau); tau = 1. - KSChanFunction::Exp(-_nt->_dt / tau); - s[j] += (inf - s[j]) * tau; + ml->data(i, offset + j) += (inf - ml->data(i, offset + j)) * tau; } } if (nksstate_) { - s += nhhstate_; + offset += nhhstate_; fillmat(v, ppd[i]); - mat_dt(_nt->_dt, s); - solvemat(s); + mat_dt(_nt->_dt, ml, i, offset); + solvemat(ml, i, offset); } } } } -#endif /* CACHEVEC */ - -void KSChan::cur(int n, Node** nd, double** pp, Datum** ppd) { - int i; - for (i = 0; i < n; ++i) { - double g, ic; - g = conductance(pp[i][gmaxoffset_], pp[i] + soffset_); - ic = iv_relation_->cur(g, pp[i] + gmaxoffset_, ppd[i], NODEV(nd[i])); - NODERHS(nd[i]) -= ic; - } -} - -#if CACHEVEC -void KSChan::cur(int n, int* nodeindices, double** pp, Datum** ppd, NrnThread* _nt) { - int i; - for (i = 0; i < n; ++i) { - double g, ic; - int ni = nodeindices[i]; - g = conductance(pp[i][gmaxoffset_], pp[i] + soffset_); - ic = iv_relation_->cur(g, pp[i] + gmaxoffset_, ppd[i], VEC_V(ni)); - VEC_RHS(ni) -= ic; - } -} -#endif /* CACHEVEC */ -void KSChan::jacob(int n, Node** nd, double** pp, Datum** ppd) { - int i; - for (i = 0; i < n; ++i) { - NODED(nd[i]) += iv_relation_->jacob(pp[i] + gmaxoffset_, ppd[i], NODEV(nd[i])); +void KSChan::cur(NrnThread* _nt, Memb_list* ml) { + auto* const vec_rhs = _nt->node_rhs_storage(); + auto* const vec_v = _nt->node_voltage_storage(); + int n = ml->nodecount; + int* nodeindices = ml->nodeindices; + Datum** ppd = ml->pdata; + for (int i = 0; i < n; ++i) { + auto const ni = nodeindices[i]; + auto const g = conductance(ml->data(i, gmaxoffset_), ml, i, soffset_); + auto const ic = iv_relation_->cur(g, ppd[i], vec_v[ni], ml, i, gmaxoffset_); + vec_rhs[ni] -= ic; } } -#if CACHEVEC -void KSChan::jacob(int n, int* nodeindices, double** pp, Datum** ppd, NrnThread* _nt) { - int i; - for (i = 0; i < n; ++i) { - int ni = nodeindices[i]; - VEC_D(ni) += iv_relation_->jacob(pp[i] + gmaxoffset_, ppd[i], VEC_V(ni)); +void KSChan::jacob(NrnThread* _nt, Memb_list* ml) { + int n = ml->nodecount; + Datum** ppd = ml->pdata; + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_v = _nt->node_voltage_storage(); + for (int i = 0; i < n; ++i) { + int ni = ml->nodeindices[i]; + vec_d[ni] += iv_relation_->jacob(ppd[i], vec_v[ni], ml, i, gmaxoffset_); } } -#endif /* CACHEVEC */ -double KSIv::cur(double g, double* p, Datum* pd, double v) { +double KSIv::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { auto ena = *pd[0].get(); - p[1] = g; + ml->data(instance, offset + 1) = g; double i = g * (v - ena); - p[2] = i; + ml->data(instance, offset + 2) = i; *pd[1].get() += i; // iion return i; } -double KSIv::jacob(double* p, Datum* pd, double) { - *pd[2].get() += p[1]; // diion/dv - return p[1]; +double KSIv::jacob(Datum* pd, double, Memb_list* ml, std::size_t instance, std::size_t offset) { + auto v = ml->data(instance, offset + 1); // diion/dv + *pd[2].get() += v; + return v; } -double KSIvghk::cur(double g, double* p, Datum* pd, double v) { +double KSIvghk::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double ci = *pd[3].get(); double co = *pd[4].get(); - p[1] = g; + ml->data(instance, offset + 1) = g; double i = g * nrn_ghk(v, ci, co, z); - p[2] = i; + ml->data(instance, offset + 2) = i; *pd[1].get() += i; return i; } -double KSIvghk::jacob(double* p, Datum* pd, double v) { +double KSIvghk::jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { auto ci = *pd[3].get(); auto co = *pd[4].get(); - double i1 = p[1] * nrn_ghk(v + .001, ci, co, z); // g is p[1] - double didv = (i1 - p[2]) * 1000.; + double i1 = ml->data(instance, offset + 1) * nrn_ghk(v + .001, ci, co, z); // g is p[1] + double didv = (i1 - ml->data(instance, offset + 2)) * 1000.; *pd[2].get() += didv; return didv; } -double KSIvNonSpec::cur(double g, double* p, Datum* pd, double v) { +double KSIvNonSpec::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double i; - p[2] = g; // gmax, e, g - i = g * (v - p[1]); - p[3] = i; + ml->data(instance, offset + 2) = g; // gmax, e, g + i = g * (v - ml->data(instance, offset + 1)); + ml->data(instance, offset + 3) = i; return i; } -double KSIvNonSpec::jacob(double* p, Datum* pd, double) { - return p[2]; +double KSIvNonSpec::jacob(Datum* pd, + double, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { + return ml->data(instance, offset + 2); } -double KSPPIv::cur(double g, double* p, Datum* pd, double v) { +double KSPPIv::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); pd += ppoff_; double ena = *pd[0].get(); - p[1] = g; + ml->data(instance, offset + 1) = g; double i = g * (v - ena); - p[2] = i; + ml->data(instance, offset + 2) = i; i *= afac; *pd[1].get() += i; // iion return i; } -double KSPPIv::jacob(double* p, Datum* pd, double) { +double KSPPIv::jacob(Datum* pd, double, Memb_list* ml, std::size_t instance, std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); pd += ppoff_; - double g = p[1] * afac; + double g = ml->data(instance, offset + 1) * afac; *pd[2].get() += g; // diion/dv return g; } -double KSPPIvghk::cur(double g, double* p, Datum* pd, double v) { +double KSPPIvghk::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); pd += ppoff_; auto ci = *pd[3].get(); auto co = *pd[4].get(); - p[1] = g; + ml->data(instance, offset + 1) = g; double i = g * nrn_ghk(v, ci, co, z) * 1e6; - p[2] = i; + ml->data(instance, offset + 2) = i; i *= afac; *pd[1].get() += i; return i; } -double KSPPIvghk::jacob(double* p, Datum* pd, double v) { +double KSPPIvghk::jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); pd += ppoff_; - auto ci = pd[3].get(); - auto co = pd[4].get(); - double i1 = p[1] * nrn_ghk(v + .001, ci, co, z) * 1e6; // g is p[1] - double didv = (i1 - p[2]) * 1000.; + auto ci = *pd[3].get(); + auto co = *pd[4].get(); + double i1 = ml->data(instance, offset + 1) * nrn_ghk(v + .001, ci, co, z) * 1e6; // g is p[1] + double didv = (i1 - ml->data(instance, offset + 2)) * 1000.; didv *= afac; *pd[2].get() += didv; return didv; } -double KSPPIvNonSpec::cur(double g, double* p, Datum* pd, double v) { +double KSPPIvNonSpec::cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); double i; - p[2] = g; // gmax, e, g - i = g * (v - p[1]); - p[3] = i; + ml->data(instance, offset + 2) = g; // gmax, e, g + i = g * (v - ml->data(instance, offset + 1)); + ml->data(instance, offset + 3) = i; return i * afac; } -double KSPPIvNonSpec::jacob(double* p, Datum* pd, double) { +double KSPPIvNonSpec::jacob(Datum* pd, + double, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { double afac = 1.e2 / (*pd[0].get()); - return p[2] * afac; + return ml->data(instance, offset + 2) * afac; } -double KSChan::conductance(double gmax, double* s) { +double KSChan::conductance(double gmax, Memb_list* ml, std::size_t instance, std::size_t offset) { double g = 1.; int i; for (i = 0; i < ngate_; ++i) { - g *= gc_[i].conductance(s, state_); + g *= gc_[i].conductance(ml, instance, offset, state_); } return gmax * g; } @@ -2903,13 +2908,16 @@ KSGateComplex::KSGateComplex() { } KSGateComplex::~KSGateComplex() {} -double KSGateComplex::conductance(double* s, KSState* st) { +double KSGateComplex::conductance(Memb_list* ml, + std::size_t instance, + std::size_t offset, + KSState* st) { double g = 0.; int i; - s += sindex_; + offset += sindex_; st += sindex_; for (i = 0; i < nstate_; ++i) { - g += s[i] * st[i].f_; + g += ml->data(instance, offset + i) * st[i].f_; } #if 1 switch (power_) { // 14.42 @@ -2938,80 +2946,80 @@ int KSChan::count() { return nstate_; } -void KSChan::map(int ieq, double** pv, double** pvdot, double* p, Datum* pd, double* atol) { - int i; - double* p1 = p + soffset_; - double* p2 = p1 + nstate_; - for (i = 0; i < nstate_; ++i) { - pv[i] = p1 + i; - pvdot[i] = p2 + i; +void KSChan::map(Prop* prop, + int ieq, + neuron::container::data_handle* pv, + neuron::container::data_handle* pvdot, + double* atol) { + for (int i = 0; i < nstate_; ++i) { + pv[i] = prop->param_handle(soffset_ + i); + pvdot[i] = prop->param_handle(soffset_ + nstate_ + i); } } -void KSChan::spec(int n, Node** nd, double** p, Datum** ppd) { +void KSChan::spec(Memb_list* ml) { + int n = ml->nodecount; + Node** nd = ml->nodelist; + Datum** ppd = ml->pdata; int i, j; if (nstate_) for (i = 0; i < n; ++i) { - // for (j=0; j < nstate_; ++j) { - // printf("KSChan spec before j=%d s=%g ds=%g\n", j, p[i][soffset_+j], - // p[i][soffset_+nstate_+j]); - //} double v = NODEV(nd[i]); - double* p1 = p[i] + soffset_; - double* p2 = p1 + nstate_; - if (is_single() && p[i][NSingleIndex] > .999) { + auto offset1 = soffset_; + auto offset2 = offset1 + nstate_; + if (is_single() && ml->data(i, NSingleIndex) > .999) { for (j = 0; j < nstate_; ++j) { - p2[j] = 0.; + ml->data(i, offset2 + j) = 0.; } continue; } for (j = 0; j < nhhstate_; ++j) { double inf, tau; trans_[j].inftau(v, inf, tau); - p2[j] = (inf - p1[j]) / tau; + ml->data(i, offset2 + j) = (inf - ml->data(i, offset1 + j)) / tau; } if (nksstate_) { fillmat(v, ppd[i]); - mulmat(p1 + nhhstate_, p2 + nhhstate_); + mulmat(ml, i, offset1 + nhhstate_, offset2 + nhhstate_); } - // for (j=0; j < nstate_; ++j) { - // printf("KSChan spec after j=%d s=%g ds=%g\n", j, p[i][soffset_+j], - // p[i][soffset_+nstate_+j]); - //} } } -void KSChan::matsol(int n, Node** nd, double** p, Datum** ppd, NrnThread* nt) { +void KSChan::matsol(NrnThread* nt, Memb_list* ml) { + int n = ml->nodecount; + Node** nd = ml->nodelist; + Datum** ppd = ml->pdata; int i, j; - double* p2; if (nstate_) for (i = 0; i < n; ++i) { - if (is_single() && p[i][NSingleIndex] > .999) { + if (is_single() && ml->data(i, NSingleIndex) > .999) { continue; } double v = NODEV(nd[i]); - p2 = p[i] + soffset_ + nstate_; + auto offset = soffset_ + nstate_; for (j = 0; j < nhhstate_; ++j) { double tau; tau = trans_[j].tau(v); - p2[j] /= (1 + nt->_dt / tau); + ml->data(i, offset + j) /= (1 + nt->_dt / tau); } if (nksstate_) { - p2 += nhhstate_; + offset += nhhstate_; fillmat(v, ppd[i]); - mat_dt(nt->_dt, p2); - solvemat(p2); + mat_dt(nt->_dt, ml, i, offset); + solvemat(ml, i, offset); } } } // from Cvode::do_nonode -void KSChan::cv_sc_update(int n, Node** nd, double** pp, Datum** ppd, NrnThread* nt) { - int i; +void KSChan::cv_sc_update(NrnThread* nt, Memb_list* ml) { + int n = ml->nodecount; + Node** nd = ml->nodelist; + Datum** ppd = ml->pdata; if (nstate_) { - for (i = 0; i < n; ++i) { - if (pp[i][NSingleIndex] > .999) { - single_->cv_update(nd[i], pp[i], ppd[i], nt); + for (int i = 0; i < n; ++i) { + if (ml->data(i, NSingleIndex) > .999) { + single_->cv_update(nd[i], ppd[i], nt); } } } diff --git a/src/nrniv/kschan.h b/src/nrniv/kschan.h index 52611d265c..de49789be5 100644 --- a/src/nrniv/kschan.h +++ b/src/nrniv/kschan.h @@ -2,10 +2,9 @@ #define kschan_h #include -#include #include "nrnoc2iv.h" #include "ivocvect.h" -#include "nrnunits_modern.h" +#include "nrnunits.h" #include "spmatrix.h" @@ -95,10 +94,7 @@ class KSChanSigmoid: public KSChanFunction { }; -// e/(kT) e/k=11.604589 from hoc's FARADAY and R values (legacy units) -#define _e_over_k _e_over_k_[_nrnunit_use_legacy_] -static double _e_over_k_[2] = {_e_over_k_codata2018, 11.604589}; /* K/mV */ -#define ebykt (_e_over_k / (273.15 + celsius)) +#define ebykt (_e_over_k_codata2018 / (273.15 + celsius)) // from MODELING NEURONAL BIOPHYSICS Lyle J Graham // A Chapter in the Handbook of Brain Theory and Neural Networks, Volume 2 @@ -223,7 +219,7 @@ class KSGateComplex { public: KSGateComplex(); virtual ~KSGateComplex(); - double conductance(double* state, KSState* st); + double conductance(Memb_list* ml, std::size_t instance, std::size_t offset, KSState* st); public: Object* obj_; @@ -234,44 +230,94 @@ class KSGateComplex { int power_; // eg. n^4, or m^3 }; -class KSIv { - public: +struct KSIv { virtual ~KSIv() = default; // this one for ionic ohmic and nernst. - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + virtual double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset); + virtual double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset); }; -class KSIvghk: public KSIv { - public: +struct KSIvghk: KSIv { // this one for ionic Goldman-Hodgkin-Katz - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; + double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; double z; }; -class KSIvNonSpec: public KSIv { +struct KSIvNonSpec: KSIv { // this one for non-specific ohmic. There will be a PARAMETER e_suffix at p[1] - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; + double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; }; -class KSPPIv: public KSIv { - public: +struct KSPPIv: KSIv { // this one for POINT_PROCESS ionic ohmic and nernst. - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; + double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; int ppoff_; }; -class KSPPIvghk: public KSPPIv { - public: +struct KSPPIvghk: KSPPIv { // this one for POINT_PROCESS ionic Goldman-Hodgkin-Katz - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; + double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; double z; }; -class KSPPIvNonSpec: public KSPPIv { +struct KSPPIvNonSpec: KSPPIv { // this one for POINT_PROCESS non-specific ohmic. There will be a PARAMETER e_suffix at p[1] - virtual double cur(double g, double* p, Datum* pd, double v); - virtual double jacob(double* p, Datum* pd, double v); + double cur(double g, + Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; + double jacob(Datum* pd, + double v, + Memb_list* ml, + std::size_t instance, + std::size_t offset) override; }; class KSState { @@ -279,10 +325,10 @@ class KSState { KSState(); virtual ~KSState(); const char* string() { - return name_.string(); + return name_.c_str(); } double f_; // normalized conductance - CopyString name_; + std::string name_; int index_; // into state_ array KSChan* ks_; Object* obj_; @@ -291,33 +337,30 @@ class KSState { class KSChan { public: KSChan(Object*, bool is_point = false); - virtual ~KSChan(); + virtual ~KSChan() {} virtual void alloc(Prop*); - virtual void init(int, Node**, double**, Datum**, NrnThread*); - virtual void cur(int, Node**, double**, Datum**); - virtual void jacob(int, Node**, double**, Datum**); - virtual void state(int, Node**, double**, Datum**, NrnThread*); -#if CACHEVEC != 0 - virtual void cur(int, int*, double**, Datum**, NrnThread*); - virtual void jacob(int, int*, double**, Datum**, NrnThread*); - virtual void state(int, int*, Node**, double**, Datum**, NrnThread*); -#endif /* CACHEVEC */ + virtual void init(NrnThread*, Memb_list*); + virtual void state(NrnThread*, Memb_list*); + virtual void cur(NrnThread*, Memb_list*); + virtual void jacob(NrnThread*, Memb_list*); void add_channel(const char**); // for cvode virtual int count(); - virtual void map(int, double**, double**, double*, Datum*, double*); - virtual void spec(int, Node**, double**, Datum**); - virtual void matsol(int, Node**, double**, Datum**, NrnThread*); - virtual void cv_sc_update(int, Node**, double**, Datum**, NrnThread*); - double conductance(double gmax, double* state); + virtual void map(Prop*, + int, + neuron::container::data_handle*, + neuron::container::data_handle*, + double*); + virtual void spec(Memb_list*); + virtual void matsol(NrnThread*, Memb_list*); + virtual void cv_sc_update(NrnThread*, Memb_list*); + double conductance(double gmax, Memb_list* ml, std::size_t instance, std::size_t offset); public: // hoc accessibilty - int state(const char* name); const char* state(int index); - int trans_index(const char* src, const char* target); // index of the transition - int trans_index(int src, int target); // index of the transition - int gate_index(int state_index); // index of the gate + int trans_index(int src, int target); // index of the transition + int gate_index(int state_index); // index of the gate bool is_point() { return is_point_; } @@ -338,7 +381,6 @@ class KSChan { void setname(const char*); void setsname(int, const char*); void setion(const char*); - void setligand(int i, const char*); void settype(KSTransition*, int type, const char*); void setivrelation(int); // hoc incremental management @@ -347,7 +389,7 @@ class KSChan { void remove_state(int); // these are only for kinetic scheme transitions since an hh // always has one and only one transition. - KSTransition* add_transition(int src, int target, const char* ligand); + KSTransition* add_transition(int src, int target); void remove_transition(int); void setcond(); void power(KSGateComplex*, int); @@ -361,16 +403,14 @@ class KSChan { private: void free1(); - void build(); void setupmat(); void fillmat(double v, Datum* pd); - void mat_dt(double dt, double* p); - void solvemat(double*); - void mulmat(double*, double*); + void mat_dt(double dt, Memb_list* ml, std::size_t instance, std::size_t offset); + void solvemat(Memb_list*, std::size_t instance, std::size_t offset); + void mulmat(Memb_list* ml, std::size_t instance, std::size_t offset_s, std::size_t offset_ds); void ion_consist(); void ligand_consist(int, int, Prop*, Node*); Prop* needion(Symbol*, Node*, Prop*); - void state_consist(int shift = 0); void sname_install(); Symbol* looksym(const char*, Symbol* tmplt = NULL); Symbol* installsym(const char*, int, Symbol* tmplt = NULL); @@ -379,6 +419,8 @@ class KSChan { void delete_schan_node_data(); void alloc_schan_node_data(); void update_prop(); // can add and remove Nsingle and SingleNodeData + void update_size(); + void must_allow_size_update(bool single, bool ion, int ligand, int nstate) const; KSState* state_insert(int i, const char* name, double frac); void state_remove(int i); @@ -397,8 +439,8 @@ class KSChan { int mechtype_; public: - CopyString name_; // name of channel - CopyString ion_; // name of ion , "" means non-specific + std::string name_; // name of channel + std::string ion_; // name of ion , "" means non-specific double gmax_deflt_; double erev_deflt_; int cond_model_; diff --git a/src/nrniv/kssingle.cpp b/src/nrniv/kssingle.cpp index 51421a832d..f23267ee33 100644 --- a/src/nrniv/kssingle.cpp +++ b/src/nrniv/kssingle.cpp @@ -1,7 +1,6 @@ #include <../../nrnconf.h> #include #include -#include #include "nrnoc2iv.h" #include "kschan.h" #include "kssingle.h" @@ -211,7 +210,6 @@ double KSSingleTrans::rate(Point_process* pnt) { } KSSingleNodeData::KSSingleNodeData() { - statepop_ = NULL; nsingle_ = 1; } @@ -238,7 +236,7 @@ void KSSingleNodeData::pr(const char* s, double tt, NetCvode* nc) { Printf("%s %s %.15g\n", s, hoc_object_name((*ppnt_)->ob), tt); } -void KSSingle::state(Node* nd, double* p, Datum* pd, NrnThread* nt) { +void KSSingle::state(Node* nd, Datum* pd, NrnThread* nt) { // integrate from t-dt to t int i; double v = NODEV(nd); @@ -253,7 +251,7 @@ void KSSingle::state(Node* nd, double* p, Datum* pd, NrnThread* nt) { } } -void KSSingle::cv_update(Node* nd, double* p, Datum* pd, NrnThread* nt) { +void KSSingle::cv_update(Node* nd, Datum* pd, NrnThread* nt) { // if v changed then need to move the outstanding // single channel event time to a recalculated time int i; @@ -300,9 +298,9 @@ void KSSingle::one(double v, KSSingleNodeData* snd, NrnThread* nt) { void KSSingle::do1trans(KSSingleNodeData* snd) { snd->t0_ = snd->t1_; // printf("KSSingle::do1trans t1=%g old=%d ", snd->t1_, snd->filledstate_); - snd->statepop_[snd->filledstate_] = 0.; + snd->statepop(snd->filledstate_) = 0.; snd->filledstate_ = transitions_[snd->next_trans_].target_; - snd->statepop_[snd->filledstate_] = 1.; + snd->statepop(snd->filledstate_) = 1.; // printf("new=%d \n", snd->filledstate_); next1trans(snd); } @@ -342,11 +340,11 @@ void KSSingle::multi(double v, KSSingleNodeData* snd, NrnThread* nt) { void KSSingle::doNtrans(KSSingleNodeData* snd) { snd->t0_ = snd->t1_; KSSingleTrans* st = transitions_ + snd->next_trans_; - assert(snd->statepop_[st->src_] >= 1.); - --snd->statepop_[st->src_]; - ++snd->statepop_[st->target_]; + assert(snd->statepop(st->src_) >= 1.); + --snd->statepop(st->src_); + ++snd->statepop(st->target_); // printf("KSSingle::doNtrans t1=%g %d with %g -> %d with %g\n", snd->t1_, - // st->src_, snd->statepop_[st->src_], st->target_, snd->statepop_[st->target_]); + // st->src_, snd->statepop(st->src_), st->target_, snd->statepop(st->target_)); nextNtrans(snd); } @@ -355,7 +353,7 @@ void KSSingle::nextNtrans(KSSingleNodeData* snd) { double x = 0; for (i = 0; i < ntrans_; ++i) { KSSingleTrans* st = transitions_ + i; - x += snd->statepop_[st->src_] * st->rate(*snd->ppnt_); + x += snd->statepop(st->src_) * st->rate(*snd->ppnt_); rval_[i] = x; } if (x > 1e-9) { @@ -376,10 +374,16 @@ void KSSingle::alloc(Prop* p, int sindex) { // and discard old if not NULL snd->kss_ = this; snd->ppnt_ = &(p->dparam[1].literal_value()); p->dparam[2] = snd; - snd->statepop_ = p->param + sindex; + snd->prop_ = p; + snd->statepop_offset_ = sindex; } -void KSSingle::init(double v, double* s, KSSingleNodeData* snd, NrnThread* nt) { +void KSSingle::init(double v, + KSSingleNodeData* snd, + NrnThread* nt, + Memb_list* ml, + std::size_t instance, + std::size_t offset) { // assuming 1-1 correspondence between KSChan and KSSingle states // place steady state population intervals end to end int i; @@ -388,25 +392,25 @@ void KSSingle::init(double v, double* s, KSSingleNodeData* snd, NrnThread* nt) { snd->t0_ = nt->_t; snd->vlast_ = v; for (i = 0; i < nstate_; ++i) { - x += s[i]; + x += ml->data(instance, offset + i); rval_[i] = x; } // initialization of complex kinetic schemes often not accurate to 9 decimal places // assert(Math::equal(rval_[nstate_ - 1], 1., 1e-9)); for (i = 0; i < nstate_; ++i) { - snd->statepop_[i] = 0; + snd->statepop(i) = 0; } if (snd->nsingle_ == 1) { snd->filledstate_ = rvalrand(nstate_); - ++snd->statepop_[snd->filledstate_]; + ++snd->statepop(snd->filledstate_); next1trans(snd); } else { for (i = 0; i < snd->nsingle_; ++i) { - ++snd->statepop_[rvalrand(nstate_)]; + ++snd->statepop(rvalrand(nstate_)); } nextNtrans(snd); // for (i=0; i < nstate_; ++i) { - // printf(" state %d pop %g\n", i, snd->statepop_[i]); + // printf(" state %d pop %g\n", i, snd->statepop(i)); //} } if (cvode_active_) { diff --git a/src/nrniv/kssingle.h b/src/nrniv/kssingle.h index e4728a8dd4..22034ee696 100644 --- a/src/nrniv/kssingle.h +++ b/src/nrniv/kssingle.h @@ -24,12 +24,22 @@ class KSSingleNodeData: public DiscreteEvent { // specific to KSSingleNodeData int nsingle_; - double* statepop_; // points to prop->param state array - int filledstate_; // used when nsingle_ == 1, index of populated state - double vlast_; // voltage at which the transition rates were calculated - double t0_; // last transition time. <= t on entry to step calculations. - double t1_; // next. > t + dt on exit from step calculations. - int next_trans_; // if t1_ takes effect, this is the trans. + Prop* prop_{}; + int statepop_offset_{std::numeric_limits::max()}; // what used to be statepop_[i] is now + // _prop->param(statepop_offset_ + i) + /** + * @brief Replacement for old double* statepop_ member. + */ + double& statepop(int i) { + assert(prop_); + assert(statepop_offset_ != std::numeric_limits::max()); + return prop_->param(statepop_offset_ + i); + } + int filledstate_; // used when nsingle_ == 1, index of populated state + double vlast_; // voltage at which the transition rates were calculated + double t0_; // last transition time. <= t on entry to step calculations. + double t1_; // next. > t + dt on exit from step calculations. + int next_trans_; // if t1_ takes effect, this is the trans. Point_process** ppnt_; KSSingle* kss_; TQItem* qi_; @@ -41,10 +51,15 @@ class KSSingle { virtual ~KSSingle(); void alloc(Prop*, int sindex); - void init(double v, double* s, KSSingleNodeData* snd, NrnThread*); + void init(double v, + KSSingleNodeData* snd, + NrnThread*, + Memb_list*, + std::size_t instance, + std::size_t offset); - void state(Node*, double*, Datum*, NrnThread*); - void cv_update(Node*, double*, Datum*, NrnThread*); + void state(Node*, Datum*, NrnThread*); + void cv_update(Node*, Datum*, NrnThread*); void one(double, KSSingleNodeData*, NrnThread*); void do1trans(KSSingleNodeData*); void next1trans(KSSingleNodeData*); diff --git a/src/nrniv/linmod.cpp b/src/nrniv/linmod.cpp index 5872c49bc4..03e7e0bd0f 100644 --- a/src/nrniv/linmod.cpp +++ b/src/nrniv/linmod.cpp @@ -24,9 +24,7 @@ #include #include "linmod.h" - -extern int (*nrnpy_hoccommand_exec)(Object*); - +#include "nrnpy.h" LinearModelAddition::LinearModelAddition(Matrix* cmat, Matrix* gmat, @@ -65,7 +63,7 @@ void LinearModelAddition::f_(Vect& y, Vect& yprime, int size) { // has been adjusted by daspk // size is the number of equations if (f_callable_) { - if (!(*nrnpy_hoccommand_exec)(f_callable_)) { + if (!neuron::python::methods.hoccommand_exec(f_callable_)) { hoc_execerror("LinearModelAddition runtime error", 0); } } diff --git a/src/nrniv/linmod.h b/src/nrniv/linmod.h index fada6c69a5..090d054456 100644 --- a/src/nrniv/linmod.h +++ b/src/nrniv/linmod.h @@ -1,7 +1,6 @@ #ifndef linmod_h #define linmod_h -#include #include "ocmatrix.h" #include "ivocvect.h" #include "nrnoc2iv.h" diff --git a/src/nrniv/linmod1.cpp b/src/nrniv/linmod1.cpp index 0898a4da4e..813299a8e8 100644 --- a/src/nrniv/linmod1.cpp +++ b/src/nrniv/linmod1.cpp @@ -8,7 +8,6 @@ #include "classreg.h" #include "linmod.h" #include "nrnoc2iv.h" -#include "treeset.h" // hoc interface to a LinearModelAddition // remember that the policy for equation additions to the tree matrix is @@ -31,7 +30,6 @@ class LinearMechanism: public Observer { bool valid() { return model_ != NULL; } - void update_ptrs(); LinearModelAddition* model_; Matrix* c_; @@ -45,12 +43,6 @@ class LinearMechanism: public Observer { Vect* elayer_; }; -extern void nrn_linmod_update_ptrs(void*); -void nrn_linmod_update_ptrs(void* p) { - LinearMechanism* lm = (LinearMechanism*) p; - lm->update_ptrs(); -} - static double valid(void* v) { return double(((LinearMechanism*) v)->valid()); } @@ -108,18 +100,6 @@ void LinearMechanism::lmfree() { } } -void LinearMechanism::update_ptrs() { - if (nodes_) { - nrn_notify_pointer_disconnect(this); - for (int i = 0; i < nnode_; ++i) { - double* pd = nrn_recalc_ptr(&(NODEV(nodes_[i]))); - if (pd != &(NODEV(nodes_[i]))) { - nrn_notify_when_double_freed(pd, this); - } - } - } -} - void LinearMechanism::disconnect(Observable*) {} void LinearMechanism::update(Observable*) { lmfree(); @@ -156,7 +136,7 @@ void LinearMechanism::create() { double x = chkarg(i, 0., 1.); Section* sec = chk_access(); nodes_[0] = node_exact(sec, x); - nrn_notify_when_double_freed(&NODEV(nodes_[0]), this); + neuron::container::notify_when_handle_dies(nodes_[0]->v_handle(), this); } else { Object* o = *hoc_objgetarg(i); check_obj_type(o, "SectionList"); @@ -168,7 +148,7 @@ void LinearMechanism::create() { nodes_ = new Node*[x->size()]; for (sec = sl->begin(); sec; sec = sl->next()) { nodes_[nnode_] = node_exact(sec, x->elem(nnode_)); - nrn_notify_when_double_freed(&NODEV(nodes_[nnode_]), this); + neuron::container::notify_when_handle_dies(nodes_[nnode_]->v_handle(), this); ++nnode_; } if (ifarg(i + 2)) { diff --git a/src/nrniv/matrixmap.h b/src/nrniv/matrixmap.h index 202ba1081d..4838d24af4 100644 --- a/src/nrniv/matrixmap.h +++ b/src/nrniv/matrixmap.h @@ -1,9 +1,6 @@ #ifndef matrixmap_h #define matrixmap_h -// this defines things needed by ocmatrix -#include - #include "ocmatrix.h" #include "nrnoc2iv.h" diff --git a/src/nrniv/memory_usage.cpp b/src/nrniv/memory_usage.cpp new file mode 100644 index 0000000000..a7b6766dc5 --- /dev/null +++ b/src/nrniv/memory_usage.cpp @@ -0,0 +1,150 @@ +#include +#include +#include +#include "oc_ansi.h" + +namespace neuron::container { +ModelMemoryUsage memory_usage(const Model& model) { + auto nodes = memory_usage(model.node_data()); + + auto mechanisms = StorageMemoryUsage(); + model.apply_to_mechanisms([&mechanisms](const auto& md) { mechanisms += memory_usage(md); }); + + return {nodes, mechanisms}; +} + +cache::ModelMemoryUsage memory_usage(const std::optional& model) { + return model ? memory_usage(*model) : cache::ModelMemoryUsage{}; +} +cache::ModelMemoryUsage memory_usage(const neuron::cache::Model& model) { + VectorMemoryUsage thread(model.thread); + for (const auto& t: model.thread) { + thread += VectorMemoryUsage(t.mechanism_offset); + } + + VectorMemoryUsage mechanism(model.mechanism); + for (const auto& m: model.mechanism) { + mechanism += VectorMemoryUsage(m.pdata_ptr_cache); + + mechanism += VectorMemoryUsage(m.pdata); + for (const auto& pd: m.pdata) { + mechanism += VectorMemoryUsage(pd); + } + + mechanism += VectorMemoryUsage(m.pdata_hack); + for (const auto& pdd: m.pdata_hack) { + mechanism += VectorMemoryUsage(pdd); + } + } + + return {thread, mechanism}; +} + +MemoryUsage local_memory_usage() { + return MemoryUsage{memory_usage(model()), + memory_usage(neuron::cache::model), + detail::compute_defer_delete_storage_size()}; +} + +namespace detail { +template +VectorMemoryUsage compute_defer_delete_storage_size(std::vector const* const v, + size_t per_element_size) { + if (v) { + size_t size = v->size() * (sizeof(T) + per_element_size); + size_t capacity = size + (v->capacity() - v->size()) * sizeof(T); + + return {size, capacity}; + } + + return {0ul, 0ul}; +} + + +VectorMemoryUsage compute_defer_delete_storage_size() { + return compute_defer_delete_storage_size(defer_delete_storage, sizeof(void*)); +} +} // namespace detail + + +/** @brief Format the memory sizes as human readable strings. + * + * Currently, the intended use is in aligned tables in the memory usage + * report. Hence, the string has two digits and is 9 characters long + * (without the null char). + */ +std::string format_memory(size_t bytes) { + static std::vector suffixes{" ", " kB", " MB", " GB", " TB", " PB"}; + + size_t suffix_id = std::min(size_t(std::floor(std::log10(std::max(1.0, double(bytes))))) / 3, + suffixes.size() - 1); + auto suffix = suffixes[suffix_id]; + + double value = double(bytes) / std::pow(10.0, suffix_id * 3.0); + + char formatted[64]; + if (suffix_id == 0) { + snprintf(formatted, sizeof(formatted), "% 6ld", bytes); + } else { + snprintf(formatted, sizeof(formatted), "%6.2f", value); + } + + return formatted + suffix; +} + +std::string format_memory_usage(const VectorMemoryUsage& memory_usage) { + return format_memory(memory_usage.size) + " " + format_memory(memory_usage.capacity); +} + +std::string format_memory_usage(const MemoryUsage& usage) { + const auto& model = usage.model; + const auto& cache_model = usage.cache_model; + const auto& stable_pointers = usage.stable_pointers; + const auto& total = usage.compute_total(); + const auto& summary = MemoryUsageSummary(usage); + + std::stringstream os; + + os << " size capacity \n"; + os << "Model \n"; + os << " nodes \n"; + os << " data " << format_memory_usage(model.nodes.heavy_data) << "\n"; + os << " stable_identifiers " << format_memory_usage(model.nodes.stable_identifiers) << "\n"; + os << " mechanisms \n"; + os << " data " << format_memory_usage(model.mechanisms.heavy_data) << "\n"; + os << " stable_identifiers " << format_memory_usage(model.mechanisms.stable_identifiers) + << "\n"; + os << "cache::Model \n"; + os << " threads " << format_memory_usage(cache_model.threads) << "\n"; + os << " mechanisms " << format_memory_usage(cache_model.mechanisms) << "\n"; + os << "deferred deletion \n"; + os << " stable_pointers " << format_memory_usage(stable_pointers) << "\n"; + os << "\n"; + os << "total " << format_memory_usage(total) << "\n"; + os << "\n"; + os << "Summary\n"; + os << " required " << format_memory(summary.required) << "\n"; + os << " convenient " << format_memory(summary.convenient) << "\n"; + os << " oversized " << format_memory(summary.oversized) << "\n"; + os << " leaked " << format_memory(summary.leaked) << "\n"; + + + return os.str(); +} + + +void print_memory_usage(MemoryUsage const& memory_usage) { + std::cout << format_memory_usage(memory_usage) << "\n"; +} + +} // namespace neuron::container + +void print_local_memory_usage() { + if (!ifarg(0)) { + hoc_execerror("print_local_memory_usage doesn't support any arguments.", nullptr); + } + + neuron::container::print_memory_usage(neuron::container::local_memory_usage()); + + hoc_retpushx(1.); +} diff --git a/src/nrniv/multisplit.cpp b/src/nrniv/multisplit.cpp index 66c54d7b65..3ca23e4dcc 100644 --- a/src/nrniv/multisplit.cpp +++ b/src/nrniv/multisplit.cpp @@ -36,7 +36,7 @@ static void multisplit_v_setup(); static void multisplit_solve(); extern double nrnmpi_rtcomp_time_; -#if PARANEURON +#if NRNMPI extern double nrnmpi_splitcell_wait_; #else static double nrnmpi_splitcell_wait_; @@ -59,10 +59,8 @@ static double nrnmpi_wtime() { class MultiSplit; class MultiSplitControl; -#define A(i) VEC_A(i) -#define B(i) VEC_B(i) -#define D(i) VEC_D(i) -#define RHS(i) VEC_RHS(i) +#define D(i) vec_d[i] +#define RHS(i) vec_rhs[i] #define S1A(i) sid1A[i] #define S1B(i) sid1B[i] @@ -365,7 +363,6 @@ void MultiSplitControl::multisplit(Section* sec, double x, int sid, int backbone if (sid >= 1000) { pmat(sid>1000); return; } #endif if (sid < 0) { - nrn_cachevec(1); if (classical_root_to_multisplit_) { nrn_multisplit_setup_ = multisplit_v_setup; nrn_multisplit_solve_ = multisplit_solve; @@ -1003,6 +1000,8 @@ bb_relation[j], rthost[j]); for (j = 0; j < 2; ++j) { NrnThread* _nt = nrn_threads + threadid[i]; Node* nd = _nt->_v_node[inode[i + j]]; + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); if (nd->_classical_parent && nd->sec_node_index_ < nd->sec->nnode - 1) { if (rthost[i] == nrnmpi_myid) { Area2RT& art = area2rt_[narea2rt_]; @@ -1474,6 +1473,8 @@ secname(v_node[j]->sec), v_node[j]->sec_node_index_); for (MultiSplit* ms: *multisplit_list_) { NrnThread* _nt = nrn_threads + ms->ithread; MultiSplitThread& t = mth_[ms->ithread]; + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); if (ms->rthost == nrnmpi_myid) { // printf("%d nrtree_=%d i=%d rt=%p\n", nrnmpi_myid, nrtree_, i, rt[i]); int j = ms->nd[0]->v_node_index; @@ -1657,8 +1658,8 @@ void MultiSplitControl::rt_map_update() { Area2RT& art = area2rt_[i]; MultiSplit& ms = *art.ms; NrnThread* _nt = nrn_threads + ms.ithread; - art.pd[0] = &D(art.inode); - art.pd[1] = &RHS(art.inode); + art.pd[0] = _nt->node_d_storage() + art.inode; + art.pd[1] = _nt->node_rhs_storage() + art.inode; if (art.n == 3) { MultiSplitThread& t = mth_[ms.ithread]; if (art.inode == ms.nd[0]->v_node_index) { @@ -2000,18 +2001,21 @@ void MultiSplitControl::multisplit_nocap_v_part1(NrnThread* _nt) { // non-zero area nodes (because current from zero area not added) // so encode v into D and sum of zero-area rhs will end up in // rhs. + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); + auto* const vec_v = _nt->node_voltage_storage(); if (_nt->id == 0) for (i = 0; i < narea2buf_; ++i) { Area2Buf& ab = area2buf_[i]; - VEC_D(ab.inode) = 1e50; // sentinal - VEC_RHS(ab.inode) = VEC_V(ab.inode) * 1e50; + vec_d[ab.inode] = 1e50; // sentinal + vec_rhs[ab.inode] = vec_v[ab.inode] * 1e50; } // also scale the non-zero area elements on this host for (i = 0; i < narea2rt_; ++i) { Area2RT& ar = area2rt_[i]; if (_nt->id == ar.ms->ithread) { - VEC_D(ar.inode) = 1e50; - VEC_RHS(ar.inode) = VEC_V(ar.inode) * 1e50; + vec_d[ar.inode] = 1e50; + vec_rhs[ar.inode] = vec_v[ar.inode] * 1e50; } } } @@ -2027,24 +2031,27 @@ void MultiSplitControl::multisplit_nocap_v_part3(NrnThread* _nt) { // But for non-zero area nodes, D is the sum of all zero-area // node d, and RHS is the sum of all zero-area node rhs. int i; - + auto* const vec_area = _nt->node_area_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); + auto* const vec_v = _nt->node_voltage_storage(); if (_nt->id == 0) for (i = 0; i < narea2buf_; ++i) { Area2Buf& ab = area2buf_[i]; int j = ab.inode; - double afac = 100. / VEC_AREA(j); - ab.adjust_rhs_ = (VEC_RHS(j) - VEC_D(j) * VEC_V(j)) * afac; + double afac = 100. / vec_area[j]; + ab.adjust_rhs_ = (vec_rhs[j] - vec_d[j] * vec_v[j]) * afac; // printf("%d nz1 %d D=%g RHS=%g V=%g afac=%g adjust=%g\n", - // nrnmpi_myid, i, D(i), RHS(i), VEC_V(j), afac, ab.adjust_rhs_); + // nrnmpi_myid, i, D(i), RHS(i), vec_v[j], afac, ab.adjust_rhs_); } for (i = 0; i < narea2rt_; ++i) { Area2RT& ar = area2rt_[i]; if (_nt->id == ar.ms->ithread) { int j = ar.inode; - double afac = 100. / VEC_AREA(j); - ar.adjust_rhs_ = (VEC_RHS(j) - VEC_D(j) * VEC_V(j)) * afac; + double afac = 100. / vec_area[j]; + ar.adjust_rhs_ = (vec_rhs[j] - vec_d[j] * vec_v[j]) * afac; // printf("%d nz2 %d D=%g RHS=%g V=%g afac=%g adjust=%g\n", - // nrnmpi_myid, i, D(i), RHS(i), VEC_V(j), afac, ar.adjust_rhs_); + // nrnmpi_myid, i, D(i), RHS(i), vec_v[j], afac, ar.adjust_rhs_); } } } @@ -2055,10 +2062,11 @@ void nrn_multisplit_adjust_rhs(NrnThread* nt) { void MultiSplitControl::multisplit_adjust_rhs(NrnThread* _nt) { int i; + auto* const vec_rhs = _nt->node_rhs_storage(); if (_nt->id == 0) for (i = 0; i < narea2buf_; ++i) { Area2Buf& ab = area2buf_[i]; - VEC_RHS(ab.inode) += ab.adjust_rhs_; + vec_rhs[ab.inode] += ab.adjust_rhs_; } // also scale the non-zero area elements on this host for (i = 0; i < narea2rt_; ++i) { @@ -2066,7 +2074,7 @@ void MultiSplitControl::multisplit_adjust_rhs(NrnThread* _nt) { if (_nt->id == ar.ms->ithread) { // printf("%d adjust %d %g %g\n", // nrnmpi_myid, ar.inode, ar.adjust_rhs_, VEC_RHS(ar.inode)); - VEC_RHS(ar.inode) += ar.adjust_rhs_; + vec_rhs[ar.inode] += ar.adjust_rhs_; } } } @@ -2126,8 +2134,8 @@ nrnmpi_myid, i, mt.displ_, mt.size_, mt.host_, tag); for (jj = 0; jj < mt.nnode_; ++jj) { k = mt.nodeindex_[jj]; _nt = nrn_threads + mt.nodeindex_th_[jj]; - tbuf[j++] = D(k); - tbuf[j++] = RHS(k); + tbuf[j++] = _nt->actual_d(k); + tbuf[j++] = _nt->actual_rhs(k); } // each sent backbone will have added 2 to mt.nnode_rt_ for (jj = 0; jj < mt.nnode_rt_; ++jj) { @@ -2158,7 +2166,7 @@ nrnmpi_myid, mt.host_, jj, tbuf[jj]); for (i = 0; i < narea2buf_; ++i) { Area2Buf& ab = area2buf_[i]; _nt = nrn_threads + ab.ms->ithread; - double afac = 0.01 * VEC_AREA(ab.inode); + double afac = 0.01 * _nt->node_area_storage()[ab.inode]; tbuf = tsendbuf_; for (j = 0; j < ab.n; ++j) { tbuf[ab.ibuf[j]] *= afac; @@ -2215,7 +2223,7 @@ for (i=0; i < tbsize_; ++i) { printf("%d trecvbuf[%d] = %g\n", nrnmpi_myid, i, t for (i = 0; i < narea2rt_; ++i) { Area2RT& ar = area2rt_[i]; NrnThread* _nt = nrn_threads + ar.ms->ithread; - double afac = 0.01 * VEC_AREA(ar.inode); + double afac = 0.01 * _nt->node_area_storage()[ar.inode]; for (j = 0; j < ar.n; ++j) { *ar.pd[j] *= afac; } @@ -2279,8 +2287,8 @@ nrnmpi_myid, mt.host_, mt.nnode_, mt.nnode_rt_, mt.size_, mt.tag_); for (jj = 0; jj < mt.nnode_; ++jj) { k = mt.nodeindex_[jj]; _nt = nrn_threads + mt.nodeindex_th_[jj]; - D(k) += tbuf[j++]; - RHS(k) += tbuf[j++]; + _nt->actual_d(k) += tbuf[j++]; + _nt->actual_rhs(k) += tbuf[j++]; } #if 0 if (nrnmpi_myid == 4) { @@ -2293,7 +2301,7 @@ nrnmpi_myid, mt.host_, 2*j, tbuf[2*j], 2*j+1, tbuf[2*j+1], mt.nodeindex_[j]); } #endif // EXCHANGE_ON -#if PARANEURON +#if NRNMPI nrnmpi_splitcell_wait_ += nrnmpi_wtime() - wt; #endif errno = 0; @@ -2335,8 +2343,8 @@ nrnmpi_myid, i, mt.displ_, mt.size_, mt.host_, tag); for (jj = 0; jj < mt.nnode_; ++jj) { k = mt.nodeindex_[jj]; _nt = nrn_threads + mt.nodeindex_th_[jj]; - tbuf[j++] = D(k); - tbuf[j++] = RHS(k); + tbuf[j++] = _nt->actual_d(k); + tbuf[j++] = _nt->actual_rhs(k); } // each sent backbone will have added 2 to mt.nnode_rt_ for (jj = 0; jj < mt.nnode_rt_; ++jj) { @@ -2421,8 +2429,8 @@ for (i=0; i < tbsize_; ++i) { printf("%d trecvbuf[%d] = %g\n", nrnmpi_myid, i, t for (jj = 0; jj < mt.nnode_; ++jj) { k = mt.nodeindex_[jj]; _nt = nrn_threads + mt.nodeindex_th_[jj]; - D(k) = tbuf[j++]; - RHS(k) = tbuf[j++]; + _nt->actual_d(k) = tbuf[j++]; + _nt->actual_rhs(k) = tbuf[j++]; } #if 0 for (j = 0; j < mt.nnode_; ++j) { @@ -2478,8 +2486,8 @@ nrnmpi_myid, mt.host_, mt.nnode_, mt.nnode_rt_, mt.size_, mt.tag_); for (jj = 0; jj < mt.nnode_; ++jj) { k = mt.nodeindex_[jj]; _nt = nrn_threads + mt.nodeindex_th_[jj]; - D(k) = tbuf[j++]; - RHS(k) = tbuf[j++]; + _nt->actual_d(k) = tbuf[j++]; + _nt->actual_rhs(k) = tbuf[j++]; } #if 0 if (nrnmpi_myid == 4) { @@ -2492,7 +2500,7 @@ nrnmpi_myid, mt.host_, 2*j, tbuf[2*j], 2*j+1, tbuf[2*j+1], mt.nodeindex_[j]); } #endif // EXCHANGE_ON -#if PARANEURON +#if NRNMPI nrnmpi_splitcell_wait_ += nrnmpi_wtime() - wt; #endif errno = 0; @@ -2670,22 +2678,22 @@ void ReducedTree::pr_map(int tsize, double* trbuf) { if (rmap[i] >= trbuf && rmap[i] < (trbuf + tsize)) { Printf(" %2d rhs[%2d] += tbuf[%ld]\n", i, irmap[i], rmap[i] - trbuf); } - if (rmap[i] >= nt->_actual_rhs && rmap[i] < (nt->_actual_rhs + nt->end)) { - Node* nd = nt->_v_node[rmap[i] - nt->_actual_rhs]; + if (rmap[i] >= nt->node_rhs_storage() && rmap[i] < (nt->node_rhs_storage() + nt->end)) { + Node* nd = nt->_v_node[rmap[i] - nt->node_rhs_storage()]; Printf(" %2d rhs[%2d] rhs[%d] += rhs[%ld] \t%s{%d}\n", i, irmap[i], irmap[i], - rmap[i] - nt->_actual_rhs, + rmap[i] - nt->node_rhs_storage(), secname(nd->sec), nd->sec_node_index_); } - if (rmap[i] >= nt->_actual_d && rmap[i] < (nt->_actual_d + nt->end)) { + if (rmap[i] >= nt->node_d_storage() && rmap[i] < (nt->node_d_storage() + nt->end)) { Printf(" %2d rhs[%2d] d[%d] += d[%ld]\n", i, irmap[i], irmap[i] - n, - rmap[i] - nt->_actual_d); + rmap[i] - nt->node_d_storage()); } if (rmap[i] >= t.sid1A && rmap[i] < (t.sid1A + nb)) { Printf(" %2d rhs[%2d] a[%d] += sid1A[%ld]", @@ -2855,11 +2863,15 @@ void ReducedTree::fillsmap(int sid, double* prhs, double* pd) { void MultiSplitThread::triang_subtree2backbone(NrnThread* _nt) { int i, ip; double p; + auto* const vec_a = _nt->node_a_storage(); + auto* const vec_b = _nt->node_b_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); // eliminate a of the subtrees for (i = i3 - 1; i >= backbone_end; --i) { ip = _nt->_v_parent_index[i]; - p = A(i) / D(i); - D(ip) -= p * B(i); + p = vec_a[i] / vec_d[i]; + vec_d[ip] -= p * vec_b[i]; RHS(ip) -= p * RHS(i); } #if 0 @@ -2876,17 +2888,21 @@ void MultiSplitThread::triang_backbone(NrnThread* _nt) { double p; // begin the backbone triangularization. This eliminates a and fills in // sid1A column. Begin with pivot equation adjacent to sid1. + auto* const vec_a = _nt->node_a_storage(); for (i = backbone_sid1_begin; i < backbone_end; ++i) { // what is the equation index for A(i) j = _nt->_v_parent_index[i] - backbone_begin; - S1A(j) = A(i); + S1A(j) = vec_a[i]; } + auto* const vec_b = _nt->node_b_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); for (i = backbone_sid1_begin - 1; i >= backbone_interior_begin; --i) { ip = _nt->_v_parent_index[i]; j = i - backbone_begin; jp = ip - backbone_begin; - p = A(i) / D(i); - D(ip) -= p * B(i); + p = vec_a[i] / D(i); + D(ip) -= p * vec_b[i]; RHS(ip) -= p * RHS(i); S1A(jp) = -p * S1A(j); // printf("iter i=%d ip=%d j=%d jp=%d D(ip)=%g RHS(ip)=%g S1A(ip)=%g\n", @@ -2900,11 +2916,11 @@ void MultiSplitThread::triang_backbone(NrnThread* _nt) { ip = _nt->_v_parent_index[i]; j = i - backbone_begin; if (ip < backbone_interior_begin) { - S1B(j) = B(i); + S1B(j) = vec_b[i]; continue; } jp = ip - backbone_begin; - p = B(i) / D(ip); + p = vec_b[i] / D(ip); RHS(i) -= p * RHS(ip); S1A(j) -= p * S1A(jp); S1B(j) = -p * S1B(jp); @@ -2916,11 +2932,11 @@ void MultiSplitThread::triang_backbone(NrnThread* _nt) { ip = _nt->_v_parent_index[i]; j = i - backbone_begin; if (ip < backbone_interior_begin) { - S1B(j) = B(i); + S1B(j) = vec_b[i]; continue; } jp = ip - backbone_begin; - p = B(i) / D(ip); + p = vec_b[i] / D(ip); RHS(i) -= p * RHS(ip); D(i) -= p * S1A(jp); S1B(j) = -p * S1B(jp); @@ -2938,6 +2954,8 @@ for (i=i1; i < backbone_end; ++i) { // exchange of d and rhs of sids has taken place and we can solve for the // backbone nodes void MultiSplitThread::bksub_backbone(NrnThread* _nt) { + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); int i, j; double a, b, p, vsid1; // need to solve the 2x2 consisting of sid0 and sid1 points @@ -2983,6 +3001,8 @@ for (i=i1; i < backbone_end; ++i) { } void MultiSplitThread::bksub_short_backbone_part1(NrnThread* _nt) { + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); int i, j; double a, b, p; // solve the 2x2 consisting of sid0 and sid1 points. @@ -3022,6 +3042,9 @@ nrnmpi_myid, RHS(i), RHS(j)); // solve the subtrees, rhs on the backbone is already solved void MultiSplitThread::bksub_subtrees(NrnThread* _nt) { + auto* const vec_b = _nt->node_b_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); int i, ip; // solve all rootnodes not part of a backbone for (i = i1; i < backbone_begin; ++i) { @@ -3030,7 +3053,7 @@ void MultiSplitThread::bksub_subtrees(NrnThread* _nt) { // solve the subtrees for (i = backbone_end; i < i3; ++i) { ip = _nt->_v_parent_index[i]; - RHS(i) -= B(i) * RHS(ip); + RHS(i) -= vec_b[i] * RHS(ip); RHS(i) /= D(i); } #if 0 @@ -3074,8 +3097,6 @@ void MultiSplitControl::v_setup() { // thread nt->_v_node and ms->ithread. Hence anything that // changes the overall structure // requires a complete start over from the point prior to splitting. - - assert(use_cachevec); assert(!use_sparse13); int i; // first time through, nth_ = 0 @@ -3510,6 +3531,8 @@ void MultiSplitControl::pmat1(const char* s) { double a, b, d, rhs; for (it = 0; it < nrn_nthread; ++it) { NrnThread* _nt = nrn_threads + it; + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); MultiSplitThread& t = mth_[it]; int i1 = 0; int i3 = _nt->end; diff --git a/src/nrniv/ndatclas.cpp b/src/nrniv/ndatclas.cpp index c523325b55..d336e459ab 100644 --- a/src/nrniv/ndatclas.cpp +++ b/src/nrniv/ndatclas.cpp @@ -155,41 +155,44 @@ int NrnProperty::var_type(Symbol* sym) const { } bool NrnProperty::assign(Prop* src, Prop* dest, int vartype) { - int n; assert(vartype != NRNPOINTER); if (src && dest && src != dest && src->_type == dest->_type) { if (src->ob) { Symbol* msym = memb_func[src->_type].sym; - int i, j, jmax, cnt = msym->s_varn; - for (i = 0; i < cnt; ++i) { + auto const cnt = msym->s_varn; + for (int i = 0; i < cnt; ++i) { Symbol* sym = msym->u.ppsym[i]; if (vartype == 0 || nrn_vartype(sym) == vartype) { - jmax = hoc_total_array_data(sym, 0); - n = sym->u.rng.index; - double *x, *y; - y = dest->ob->u.dataspace[n].pval; - x = src->ob->u.dataspace[n].pval; - for (j = 0; j < jmax; ++j) { + auto const jmax = hoc_total_array_data(sym, 0); + auto const n = sym->u.rng.index; + auto* const y = dest->ob->u.dataspace[n].pval; + auto* const x = src->ob->u.dataspace[n].pval; + for (int j = 0; j < jmax; ++j) { y[j] = x[j]; } } } } else { if (vartype == 0) { - n = src->param_size; + assert(dest->param_num_vars() == src->param_num_vars()); + auto const n = src->param_num_vars(); for (int i = 0; i < n; ++i) { - dest->param[i] = src->param[i]; + assert(dest->param_array_dimension(i) == src->param_array_dimension(i)); + for (auto j = 0; j < src->param_array_dimension(i); ++j) { + dest->param(i, j) = src->param(i, j); + } } } else { Symbol* msym = memb_func[src->_type].sym; - int i, j, jmax, cnt = msym->s_varn; - for (i = 0; i < cnt; ++i) { + auto const cnt = msym->s_varn; + for (int i = 0; i < cnt; ++i) { Symbol* sym = msym->u.ppsym[i]; if (nrn_vartype(sym) == vartype) { - jmax = hoc_total_array_data(sym, 0); - n = sym->u.rng.index; - for (j = 0; j < jmax; ++j) { - dest->param[n + j] = src->param[n + j]; + auto const jmax = hoc_total_array_data(sym, 0); + auto const n = sym->u.rng.index; + assert(src->param_size() == dest->param_size()); + for (int j = 0; j < jmax; ++j) { + dest->param_legacy(n + j) = src->param_legacy(n + j); } } } @@ -214,20 +217,22 @@ Symbol* NrnProperty::find(const char* name) { } int NrnProperty::prop_index(const Symbol* s) const { assert(s); - if (s->type != RANGEVAR) { + if (s->type != RANGEVAR && s->type != RANGEOBJ) { hoc_execerror(s->name, "not a range variable"); } return s->u.rng.index; } -double* NrnProperty::prop_pval(const Symbol* s, int index) const { +neuron::container::data_handle NrnProperty::prop_pval(const Symbol* s, int index) const { if (npi_->p_->ob) { - return npi_->p_->ob->u.dataspace[prop_index(s)].pval + index; + return neuron::container::data_handle{ + npi_->p_->ob->u.dataspace[prop_index(s)].pval + index}; } else { if (s->subtype == NRNPOINTER) { - return npi_->p_->dparam[prop_index(s) + index].get(); + return static_cast>( + npi_->p_->dparam[prop_index(s) + index]); } else { - return npi_->p_->param + prop_index(s) + index; + return npi_->p_->param_handle_legacy(prop_index(s) + index); } } } diff --git a/src/nrniv/ndatclas.h b/src/nrniv/ndatclas.h index e68fbbe9df..b10e9c3b13 100644 --- a/src/nrniv/ndatclas.h +++ b/src/nrniv/ndatclas.h @@ -23,7 +23,7 @@ class NrnProperty { Symbol* find(const char* rangevar); Symbol* var(int); int prop_index(const Symbol*) const; - double* prop_pval(const Symbol*, int arrayindex = 0) const; + neuron::container::data_handle prop_pval(const Symbol*, int arrayindex = 0) const; Prop* prop() const; int var_type(Symbol*) const; diff --git a/src/nrniv/netpar.cpp b/src/nrniv/netpar.cpp index f31951bbe8..249d577a2b 100644 --- a/src/nrniv/netpar.cpp +++ b/src/nrniv/netpar.cpp @@ -24,9 +24,11 @@ using Gid2PreSyn = std::unordered_map; #include #include #include -#include #include "ivocvect.h" +#include +#include + static int n_multisend_interval; #if NRN_MUSIC @@ -41,6 +43,19 @@ static IvocVect* all_spiketvec = NULL; static IvocVect* all_spikegidvec = NULL; static double t_exchange_; static double dt1_; // 1/dt +static int localgid_size_; +static int ag_send_size_; +static int ag_send_nspike_; +static int ovfl_capacity_; +static int ovfl_; +static unsigned char* spfixout_; +static unsigned char* spfixin_; +static unsigned char* spfixin_ovfl_; +static int nout_; +static int* nin_; +static NRNMPI_Spike* spikeout_; +static NRNMPI_Spike* spikein_; +static int icapacity_; static void alloc_space(); extern NetCvode* net_cvode_instance; @@ -79,7 +94,7 @@ double nrn_multisend_receive_time(int) { } #endif -#if PARANEURON +#if NRNMPI extern void nrnmpi_split_clear(); #endif extern void nrnmpi_multisplit_clear(); @@ -213,7 +228,7 @@ Gid2PreSyn& nrn_gid2out() { #if NRN_ENABLE_THREADS static MUTDEC #endif - static int seqcnt_; +static std::atomic seqcnt_; static NrnThread* last_nt_; #endif @@ -225,6 +240,8 @@ NetParEvent::~NetParEvent() {} void NetParEvent::send(double tt, NetCvode* nc, NrnThread* nt) { nc->event(tt + usable_mindelay_, this, nt); } + + void NetParEvent::deliver(double tt, NetCvode* nc, NrnThread* nt) { int seq; if (nrn_use_selfqueue_) { // first handle pending flag=1 self events @@ -240,9 +257,7 @@ void NetParEvent::deliver(double tt, NetCvode* nc, NrnThread* nt) { nt->_t = tt; #if NRNMPI if (nrnmpi_numprocs > 0) { - MUTLOCK seq = ++seqcnt_; - MUTUNLOCK if (seq == nrn_nthread) { last_nt_ = nt; #if NRNMPI @@ -574,7 +589,7 @@ void nrn_spike_exchange(NrnThread* nt) { nrnmpi_barrier(); nrnmpi_step_wait_ += nrnmpi_wtime() - wt; } - n = nrnmpi_spike_exchange(); + n = nrnmpi_spike_exchange(&ovfl_, &nout_, nin_, spikeout_, &spikein_, &icapacity_); wt_ = nrnmpi_wtime() - wt; wt = nrnmpi_wtime(); TBUF @@ -670,7 +685,15 @@ void nrn_spike_exchange_compressed(NrnThread* nt) { nrnmpi_barrier(); nrnmpi_step_wait_ += nrnmpi_wtime() - wt; } - n = nrnmpi_spike_exchange_compressed(); + n = nrnmpi_spike_exchange_compressed(localgid_size_, + ag_send_size_, + ag_send_nspike_, + &ovfl_capacity_, + &ovfl_, + spfixout_, + spfixin_, + &spfixin_ovfl_, + nin_); wt_ = nrnmpi_wtime() - wt; wt = nrnmpi_wtime(); TBUF @@ -688,7 +711,7 @@ void nrn_spike_exchange_compressed(NrnThread* nt) { if (max_histogram_) { vector_vec(max_histogram_)[0] += 1.; } - t_exchange_ = nrn_threads->_t; + t_exchange_ = nt->_t; TBUF return; } @@ -785,7 +808,7 @@ void nrn_spike_exchange_compressed(NrnThread* nt) { } } } - t_exchange_ = nrn_threads->_t; + t_exchange_ = nt->_t; wt1_ = nrnmpi_wtime() - wt; TBUF } @@ -969,7 +992,7 @@ void nrn_cleanup_presyn(PreSyn* ps) { void nrnmpi_gid_clear(int arg) { if (arg == 0 || arg == 3 || arg == 4) { nrn_partrans_clear(); -#if PARANEURON +#if NRNMPI nrnmpi_split_clear(); #endif } diff --git a/src/nrniv/nmodlrandom.cpp b/src/nrniv/nmodlrandom.cpp new file mode 100644 index 0000000000..8432c64e78 --- /dev/null +++ b/src/nrniv/nmodlrandom.cpp @@ -0,0 +1,141 @@ +#include <../../nrnconf.h> + +/* +HOC wrapper object for NMODL NEURON block RANDOM variables to give a HOC +pointprocess.ranvar.method(...) +sec.ranvar_mech(x).method(...) +and Python +poinprocess.ranvar.method(...) +sec(x).mech.ranvar.method(...) +syntax +*/ + +#include +#include +#include +#include +#include + +struct NMODLRandom { + NMODLRandom(Object*) {} + ~NMODLRandom() {} + nrnran123_State* r() { + return (nrnran123_State*) hr_.get(); + } + void chk() { + if (!prop_id_) { + hoc_execerr_ext("NMODLRandom wrapped handle is not valid"); + } + } + neuron::container::generic_data_handle hr_{}; + neuron::container::non_owning_identifier_without_container prop_id_{}; +}; + +static Symbol* nmodlrandom_sym{}; +#undef dmaxuint +#define dmaxuint 4294967295. + +static Object** set_ids(void* v) { // return this NMODLRandom instance + NMODLRandom* r = (NMODLRandom*) v; + r->chk(); + uint32_t id[3]; + for (int i = 0; i < 3; ++i) { + id[i] = (uint32_t) (chkarg(i + 1, 0., dmaxuint)); + } + nrnran123_setids(r->r(), id[0], id[1], id[2]); + return hoc_temp_objptr(nrn_get_gui_redirect_obj()); +} + +static Object** get_ids(void* v) { // return a Vector of size 3. + NMODLRandom* r = (NMODLRandom*) v; + r->chk(); + uint32_t id[3]{}; + nrnran123_getids3(r->r(), id, id + 1, id + 2); + IvocVect* vec = vector_new1(3); + double* data = vector_vec(vec); + for (int i = 0; i < 3; ++i) { + data[i] = double(id[i]); + } + return vector_temp_objvar(vec); +} + +static Object** set_seq(void* v) { // return this NModlRandom instance + NMODLRandom* r = (NMODLRandom*) v; + r->chk(); + double seq = *getarg(1); + nrnran123_setseq(r->r(), seq); + return hoc_temp_objptr(nrn_get_gui_redirect_obj()); +} + +static double get_seq(void* v) { // return the 34 bits (seq*4 + which) as double + NMODLRandom* r = (NMODLRandom*) v; + r->chk(); + uint32_t seq; + char which; + nrnran123_getseq(r->r(), &seq, &which); + return double(seq) * 4.0 + which; +} + +static double uniform(void* v) { + NMODLRandom* r = (NMODLRandom*) v; + r->chk(); + return nrnran123_uniform(r->r()); +} + +static Member_func members[] = {{"get_seq", get_seq}, {"uniform", uniform}, {nullptr, nullptr}}; + +static Member_ret_obj_func retobj_members[] = {{"set_ids", set_ids}, + {"get_ids", get_ids}, + {"set_seq", set_seq}, + {nullptr, nullptr}}; + +static void* nmodlrandom_cons(Object*) { + NMODLRandom* r = new NMODLRandom(nullptr); + return r; +} + +static void nmodlrandom_destruct(void* v) { + NMODLRandom* r = (NMODLRandom*) v; + delete r; +} + +void NMODLRandom_reg() { + class2oc("NMODLRandom", + nmodlrandom_cons, + nmodlrandom_destruct, + members, + nullptr, + retobj_members, + nullptr); + if (!nmodlrandom_sym) { + nmodlrandom_sym = hoc_lookup("NMODLRandom"); + assert(nmodlrandom_sym); + } +} + +Object* nrn_pntproc_nmodlrandom_wrap(void* v, Symbol* sym) { + auto* const pnt = static_cast(v); + if (!pnt->prop) { + if (nrn_inpython_ == 1) { /* python will handle the error */ + hoc_warning("point process not located in a section", nullptr); + nrn_inpython_ = 2; + return {}; + } else { + hoc_execerror("point process not located in a section", nullptr); + } + } + + return nrn_nmodlrandom_wrap(pnt->prop, sym); +} + +Object* nrn_nmodlrandom_wrap(Prop* prop, Symbol* sym) { + assert(sym->type == RANGEOBJ && sym->subtype == NMODLRANDOM); + auto& datum = prop->dparam[sym->u.rng.index]; + assert(datum.holds()); + + NMODLRandom* r = new NMODLRandom(nullptr); + r->hr_ = datum; + r->prop_id_ = prop->id(); + Object* wrap = hoc_new_object(nmodlrandom_sym, r); + return wrap; +} diff --git a/src/nrniv/nonlinz.cpp b/src/nrniv/nonlinz.cpp index 87d4d329f8..f0a24eac8d 100644 --- a/src/nrniv/nonlinz.cpp +++ b/src/nrniv/nonlinz.cpp @@ -6,58 +6,58 @@ #include "nrniv_mf.h" #include "nrnoc2iv.h" #include "nrnmpi.h" -#include "cspmatrix.h" #include "membfunc.h" +#include +#include + +using namespace std::complex_literals; + extern void v_setup_vectors(); -extern void nrn_rhs(NrnThread*); extern int nrndae_extra_eqn_count(); extern Symlist* hoc_built_in_symlist; extern void (*nrnthread_v_transfer_)(NrnThread*); -extern spREAL* spGetElement(char*, int, int); -extern void pargap_jacobi_rhs(double*, double*); +extern void pargap_jacobi_rhs(std::vector>&, + const std::vector>&); extern void pargap_jacobi_setup(int mode); class NonLinImpRep { public: NonLinImpRep(); - virtual ~NonLinImpRep(); void delta(double); + + // Functions to fill the matrix void didv(); void dids(); void dsdv(); void dsds(); + int gapsolve(); - char* m_; + // Matrix containing the non linear system to solve. + Eigen::SparseMatrix> m_{}; + // The solver of the matrix using the LU decomposition method. + Eigen::SparseLU>> lu_{}; int scnt_; // structure_change int n_v_, n_ext_, n_lin_, n_ode_, neq_v_, neq_; - double** pv_; - double** pvdot_; - int* v_index_; - double* rv_; - double* jv_; - double** diag_; - double* deltavec_; // just like cvode.atol*cvode.atolscale for ode's - double delta_; // slightly more efficient and easier for v. + std::vector> pv_, pvdot_; + std::vector> v_; + std::vector deltavec_; // just like cvode.atol*cvode.atolscale for ode's + double delta_; // slightly more efficient and easier for v. void current(int, Memb_list*, int); void ode(int, Memb_list*); double omega_; int iloc_; // current injection site of last solve - float* vsymtol_; - int maxiter_; + float* vsymtol_{}; + int maxiter_{500}; }; -NonLinImp::NonLinImp() { - rep_ = NULL; -} NonLinImp::~NonLinImp() { - if (rep_) { - delete rep_; - } + delete rep_; } + double NonLinImp::transfer_amp(int curloc, int vloc) { if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_ && curloc != rep_->iloc_) { hoc_execerror( @@ -66,9 +66,7 @@ double NonLinImp::transfer_amp(int curloc, int vloc) { if (curloc != rep_->iloc_) { solve(curloc); } - double x = rep_->rv_[vloc]; - double y = rep_->jv_[vloc]; - return sqrt(x * x + y * y); + return std::abs(rep_->v_[vloc]); } double NonLinImp::input_amp(int curloc) { if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_) { @@ -80,9 +78,7 @@ double NonLinImp::input_amp(int curloc) { if (curloc < 0) { return 0.0; } - double x = rep_->rv_[curloc]; - double y = rep_->jv_[curloc]; - return sqrt(x * x + y * y); + return std::abs(rep_->v_[curloc]); } double NonLinImp::transfer_phase(int curloc, int vloc) { if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_ && curloc != rep_->iloc_) { @@ -92,9 +88,7 @@ double NonLinImp::transfer_phase(int curloc, int vloc) { if (curloc != rep_->iloc_) { solve(curloc); } - double x = rep_->rv_[vloc]; - double y = rep_->jv_[vloc]; - return atan2(y, x); + return std::arg(rep_->v_[vloc]); } double NonLinImp::input_phase(int curloc) { if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_) { @@ -106,9 +100,7 @@ double NonLinImp::input_phase(int curloc) { if (curloc < 0) { return 0.0; } - double x = rep_->rv_[curloc]; - double y = rep_->jv_[curloc]; - return atan2(y, x); + return std::arg(rep_->v_[curloc]); } double NonLinImp::ratio_amp(int clmploc, int vloc) { if (nrnmpi_numprocs > 1 && nrnthread_v_transfer_) { @@ -120,22 +112,14 @@ double NonLinImp::ratio_amp(int clmploc, int vloc) { if (clmploc != rep_->iloc_) { solve(clmploc); } - double ax, bx, cx, ay, by, cy, bb; - ax = rep_->rv_[vloc]; - ay = rep_->jv_[vloc]; - bx = rep_->rv_[clmploc]; - by = rep_->jv_[clmploc]; - bb = bx * bx + by * by; - cx = (ax * bx + ay * by) / bb; - cy = (ay * bx - ax * by) / bb; - return sqrt(cx * cx + cy * cy); + return std::abs(rep_->v_[vloc] * std::conj(rep_->v_[clmploc]) / std::norm(rep_->v_[clmploc])); } void NonLinImp::compute(double omega, double deltafac, int maxiter) { v_setup_vectors(); - nrn_rhs(nrn_threads); + nrn_rhs(nrn_ensure_model_data_are_sorted(), nrn_threads[0]); if (rep_ && rep_->scnt_ != structure_change_cnt) { delete rep_; - rep_ = NULL; + rep_ = nullptr; } if (!rep_) { rep_ = new NonLinImpRep(); @@ -153,8 +137,10 @@ void NonLinImp::compute(double omega, double deltafac, int maxiter) { rep_->omega_ = 1000. * omega; rep_->delta(deltafac); + + rep_->m_.setZero(); + // fill matrix - cmplx_spClear(rep_->m_); rep_->didv(); rep_->dsds(); #if 1 // when 0 equivalent to standard method @@ -162,18 +148,32 @@ void NonLinImp::compute(double omega, double deltafac, int maxiter) { rep_->dsdv(); #endif - // cmplx_spPrint(rep_->m_, 0, 1, 1); - // for (int i=0; i < rep_->neq_; ++i) { - // printf("i=%d %g %g\n", i, rep_->diag_[i][0], rep_->diag_[i][1]); - // } - int e = cmplx_spFactor(rep_->m_); - switch (e) { - case spZERO_DIAG: - hoc_execerror("cmplx_spFactor error:", "Zero Diagonal"); - case spNO_MEMORY: - hoc_execerror("cmplx_spFactor error:", "No Memory"); - case spSINGULAR: - hoc_execerror("cmplx_spFactor error:", "Singular"); + // Now that the matrix is filled we can compress it (mandatory for SparseLU) + rep_->m_.makeCompressed(); + + // Factorize the matrix so this is ready to solve + rep_->lu_.compute(rep_->m_); + switch (rep_->lu_.info()) { + case Eigen::Success: + // Everything fine + break; + case Eigen::NumericalIssue: + hoc_execerror( + "Eigen Sparse LU factorization failed with Eigen::NumericalIssue, please check the " + "input matrix:", + rep_->lu_.lastErrorMessage().c_str()); + break; + case Eigen::NoConvergence: + hoc_execerror( + "Eigen Sparse LU factorization reports Eigen::NonConvergence after calling compute():", + rep_->lu_.lastErrorMessage().c_str()); + break; + case Eigen::InvalidInput: + hoc_execerror( + "Eigen Sparse LU factorization failed with Eigen::InvalidInput, the input matrix seems " + "invalid:", + rep_->lu_.lastErrorMessage().c_str()); + break; } rep_->iloc_ = -2; @@ -186,20 +186,18 @@ int NonLinImp::solve(int curloc) { hoc_execerror("Must call Impedance.compute first", 0); } if (rep_->iloc_ != curloc) { - int i; rep_->iloc_ = curloc; - for (i = 0; i < rep_->neq_; ++i) { - rep_->rv_[i] = 0; - rep_->jv_[i] = 0; - } + rep_->v_ = std::vector>(rep_->neq_); if (curloc >= 0) { - rep_->rv_[curloc] = 1.e2 / NODEAREA(_nt->_v_node[curloc]); + rep_->v_[curloc] = 1.e2 / NODEAREA(_nt->_v_node[curloc]); } if (nrnthread_v_transfer_) { rval = rep_->gapsolve(); } else { - assert(rep_->m_); - cmplx_spSolve(rep_->m_, rep_->rv_ - 1, rep_->rv_ - 1, rep_->jv_ - 1, rep_->jv_ - 1); + auto v = + Eigen::Map, Eigen::Dynamic>>(rep_->v_.data(), + rep_->v_.size()); + v = rep_->lu_.solve(v); } } return rval; @@ -209,13 +207,8 @@ int NonLinImp::solve(int curloc) { // mapping is already done there. NonLinImpRep::NonLinImpRep() { - int err; - int i, j, ieq, cnt; NrnThread* _nt = nrn_threads; - maxiter_ = 500; - m_ = NULL; - vsymtol_ = NULL; Symbol* vsym = hoc_table_lookup("v", hoc_built_in_symlist); if (vsym->extra) { vsymtol_ = &vsym->extra->tolerance; @@ -235,10 +228,10 @@ NonLinImpRep::NonLinImpRep() { n_ode_ = 0; for (NrnThreadMembList* tml = _nt->tml; tml; tml = tml->next) { Memb_list* ml = tml->ml; - i = tml->index; + int i = tml->index; nrn_ode_count_t s = memb_func[i].ode_count; if (s) { - cnt = (*s)(i); + int cnt = (*s)(i); n_ode_ += cnt * ml->nodecount; } } @@ -247,50 +240,23 @@ NonLinImpRep::NonLinImpRep() { if (neq_ == 0) { return; } - m_ = cmplx_spCreate(neq_, 1, &err); - assert(err == spOKAY); - pv_ = new double*[neq_]; - pvdot_ = new double*[neq_]; - v_index_ = new int[n_v_]; - rv_ = new double[neq_ + 1]; - rv_ += 1; - jv_ = new double[neq_ + 1]; - jv_ += 1; - diag_ = new double*[neq_]; - deltavec_ = new double[neq_]; - - for (i = 0; i < n_v_; ++i) { + m_.resize(neq_, neq_); + pv_.resize(neq_); + pvdot_.resize(neq_); + v_.resize(neq_); + deltavec_.resize(neq_); + + for (int i = 0; i < n_v_; ++i) { // utilize nd->eqn_index in case of use_sparse13 later Node* nd = _nt->_v_node[i]; - pv_[i] = &NODEV(nd); - pvdot_[i] = nd->_rhs; - v_index_[i] = i + 1; - } - for (i = 0; i < n_v_; ++i) { - diag_[i] = cmplx_spGetElement(m_, v_index_[i], v_index_[i]); - } - for (i = neq_v_; i < neq_; ++i) { - diag_[i] = cmplx_spGetElement(m_, i + 1, i + 1); + pv_[i] = nd->v_handle(); + pvdot_[i] = nd->rhs_handle(); } scnt_ = structure_change_cnt; } -NonLinImpRep::~NonLinImpRep() { - if (!m_) { - return; - } - cmplx_spDestroy(m_); - delete[] pv_; - delete[] pvdot_; - delete[] v_index_; - delete[](rv_ - 1); - delete[](jv_ - 1); - delete[] diag_; - delete[] deltavec_; -} - void NonLinImpRep::delta(double deltafac) { // also defines pv_,pvdot_ map for ode - int i, j, nc, cnt, ieq; + int i, nc, cnt, ieq; NrnThread* nt = nrn_threads; for (i = 0; i < neq_; ++i) { deltavec_[i] = deltafac; // all v's wasted but no matter. @@ -300,11 +266,15 @@ void NonLinImpRep::delta(double deltafac) { // also defines pv_,pvdot_ map for Memb_list* ml = tml->ml; i = tml->index; nc = ml->nodecount; - nrn_ode_count_t s = memb_func[i].ode_count; - if (s && (cnt = (*s)(i)) > 0) { - nrn_ode_map_t m = memb_func[i].ode_map; - for (j = 0; j < nc; ++j) { - (*m)(ieq, pv_ + ieq, pvdot_ + ieq, ml->_data[j], ml->pdata[j], deltavec_ + ieq, i); + if (nrn_ode_count_t s = memb_func[i].ode_count; s && (cnt = s(i)) > 0) { + nrn_ode_map_t ode_map = memb_func[i].ode_map; + for (auto j = 0; j < nc; ++j) { + ode_map(ml->prop[j], + ieq, + pv_.data() + ieq, + pvdot_.data() + ieq, + deltavec_.data() + ieq, + i); ieq += cnt; } } @@ -321,20 +291,17 @@ void NonLinImpRep::didv() { for (i = _nt->ncell; i < n_v_; ++i) { nd = _nt->_v_node[i]; ip = _nt->_v_parent[i]->v_node_index; - double* a = cmplx_spGetElement(m_, v_index_[ip], v_index_[i]); - double* b = cmplx_spGetElement(m_, v_index_[i], v_index_[ip]); - *a += NODEA(nd); - *b += NODEB(nd); - *diag_[i] -= NODEB(nd); - *diag_[ip] -= NODEA(nd); + m_.coeffRef(ip, i) += NODEA(nd); + m_.coeffRef(i, ip) += NODEB(nd); + m_.coeffRef(i, i) -= NODEB(nd); + m_.coeffRef(ip, ip) -= NODEA(nd); } // jwC term Memb_list* mlc = _nt->tml->ml; int n = mlc->nodecount; for (i = 0; i < n; ++i) { - double* cd = mlc->_data[i]; j = mlc->nodelist[i]->v_node_index; - diag_[v_index_[j] - 1][1] += .001 * cd[0] * omega_; + m_.coeffRef(j, j) += .001 * mlc->data(i, 0) * omega_ * 1i; } // di/dv terms // because there may be several point processes of the same type @@ -364,18 +331,18 @@ void NonLinImpRep::didv() { NODERHS(nd) = 0; double x1 = NODEV(nd); // v+dv - NODEV(nd) += delta_; + nd->v() = x1 + delta_; current(i, ml, j); // save rhs // zero rhs // restore v x2 = NODERHS(nd); NODERHS(nd) = 0; - NODEV(nd) = x1; + nd->v() = x1; current(i, ml, j); // conductance // add to matrix - *diag_[v_index_[nd->v_node_index] - 1] -= (x2 - NODERHS(nd)) / delta_; + m_.coeffRef(nd->v_node_index, nd->v_node_index) -= (x2 - NODERHS(nd)) / delta_; } } } @@ -395,8 +362,6 @@ void NonLinImpRep::dids() { nrn_ode_count_t s = memb_func[i].ode_count; int cnt = (*s)(i); if (memb_func[i].current) { - double* x1 = rv_; // use as temporary storage - double* x2 = jv_; for (in = 0; in < ml->nodecount; ++in) { Node* nd = ml->nodelist[in]; // zero rhs @@ -404,22 +369,20 @@ void NonLinImpRep::dids() { // compute rhs current(i, ml, in); // save rhs - x2[in] = NODERHS(nd); + v_[in].imag(NODERHS(nd)); // each state incremented separately and restored for (iis = 0; iis < cnt; ++iis) { is = ieq + in * cnt + iis; // save s - x1[is] = *pv_[is]; + v_[is].real(*pv_[is]); // increment s and zero rhs *pv_[is] += deltavec_[is]; NODERHS(nd) = 0; current(i, ml, in); - *pv_[is] = x1[is]; // restore s - double g = (NODERHS(nd) - x2[in]) / deltavec_[is]; + *pv_[is] = v_[is].real(); // restore s + double g = (NODERHS(nd) - v_[in].imag()) / deltavec_[is]; if (g != 0.) { - double* elm = - cmplx_spGetElement(m_, v_index_[nd->v_node_index], is + 1); - elm[0] = -g; + m_.coeffRef(nd->v_node_index, is) = -g; } } // don't know if this is necessary but make sure last @@ -444,22 +407,21 @@ void NonLinImpRep::dsdv() { nrn_ode_count_t s = memb_func[i].ode_count; int cnt = (*s)(i); if (memb_func[i].current) { - double* x1 = rv_; // use as temporary storage - double* x2 = jv_; // zero rhs, save v for (in = 0; in < ml->nodecount; ++in) { Node* nd = ml->nodelist[in]; for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { *pvdot_[is] = 0.; } - x1[in] = NODEV(nd); + v_[in].real(NODEV(nd)); } // increment v only once in case there are multiple // point processes at the same location for (in = 0; in < ml->nodecount; ++in) { Node* nd = ml->nodelist[in]; - if (x1[in] == NODEV(nd)) { - NODEV(nd) += delta_; + auto const v = nd->v(); + if (v_[in].real() == v) { + nd->v() = v + delta_; } } // compute rhs. this is the rhs(v+dv) @@ -468,10 +430,10 @@ void NonLinImpRep::dsdv() { for (in = 0; in < ml->nodecount; ++in) { Node* nd = ml->nodelist[in]; for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { - x2[is] = *pvdot_[is]; + v_[is].imag(*pvdot_[is]); *pvdot_[is] = 0; } - NODEV(nd) = x1[in]; + nd->v() = v_[in].real(); } // compute the rhs(v) ode(i, ml); @@ -479,11 +441,9 @@ void NonLinImpRep::dsdv() { for (in = 0; in < ml->nodecount; ++in) { Node* nd = ml->nodelist[in]; for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { - double ds = (x2[is] - *pvdot_[is]) / delta_; + double ds = (v_[is].imag() - *pvdot_[is]) / delta_; if (ds != 0.) { - double* elm = - cmplx_spGetElement(m_, is + 1, v_index_[nd->v_node_index]); - elm[0] = -ds; + m_.coeffRef(is, nd->v_node_index) = -ds; } } } @@ -498,7 +458,7 @@ void NonLinImpRep::dsds() { NrnThread* nt = nrn_threads; // jw term for (i = neq_v_; i < neq_; ++i) { - diag_[i][1] += omega_; + m_.coeffRef(i, i) += omega_ * 1i; } ieq = neq_v_; for (NrnThreadMembList* tml = nt->tml; tml; tml = tml->next) { @@ -508,13 +468,11 @@ void NonLinImpRep::dsds() { int nc = ml->nodecount; nrn_ode_count_t s = memb_func[i].ode_count; int cnt = (*s)(i); - double* x1 = rv_; // use as temporary storage - double* x2 = jv_; // zero rhs, save s for (in = 0; in < ml->nodecount; ++in) { for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { *pvdot_[is] = 0.; - x1[is] = *pv_[is]; + v_[is].real(*pv_[is]); } } // compute rhs. this is the rhs(s) @@ -522,7 +480,7 @@ void NonLinImpRep::dsds() { // save rhs for (in = 0; in < ml->nodecount; ++in) { for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { - x2[is] = *pvdot_[is]; + v_[is].imag(*pvdot_[is]); } } // iterate over the states @@ -542,12 +500,11 @@ void NonLinImpRep::dsds() { Node* nd = ml->nodelist[in]; ks = ieq + in * cnt + kks; for (is = ieq + in * cnt, iis = 0; iis < cnt; ++iis, ++is) { - double ds = (*pvdot_[is] - x2[is]) / deltavec_[is]; + double ds = (*pvdot_[is] - v_[is].imag()) / deltavec_[is]; if (ds != 0.) { - double* elm = cmplx_spGetElement(m_, is + 1, ks + 1); - elm[0] = -ds; + m_.coeffRef(is, ks) = -ds; } - *pv_[ks] = x1[ks]; + *pv_[ks] = v_[ks].real(); } } // perhaps not necessary but ensures the last computation is with @@ -561,29 +518,27 @@ void NonLinImpRep::dsds() { void NonLinImpRep::current(int im, Memb_list* ml, int in) { // assume there is in fact a current // method - Pvmi s = memb_func[im].current; // fake a 1 element memb_list - Memb_list mfake; -#if CACHEVEC != 0 + Memb_list mfake{im}; mfake.nodeindices = ml->nodeindices + in; -#endif mfake.nodelist = ml->nodelist + in; - mfake._data = ml->_data + in; + mfake.set_storage_offset(ml->get_storage_offset()); mfake.pdata = ml->pdata + in; mfake.prop = ml->prop ? ml->prop + in : nullptr; mfake.nodecount = 1; mfake._thread = ml->_thread; - (*s)(nrn_threads, &mfake, im); + memb_func[im].current(nrn_ensure_model_data_are_sorted(), nrn_threads, &mfake, im); } void NonLinImpRep::ode(int im, Memb_list* ml) { // assume there is in fact an ode method - Pvmi s = memb_func[im].ode_spec; - (*s)(nrn_threads, ml, im); + memb_func[im].ode_spec(nrn_ensure_model_data_are_sorted(), nrn_threads, ml, im); } +// This function compute a solution of a converging system by iteration. +// The value returned is the number of iterations to reach a precision of "tol" (1e-9). int NonLinImpRep::gapsolve() { - // On entry, rv_ and jv_ contain the complex b for A*x = b. - // On return rv_ and jv_ contain complex solution, x. + // On entry, v_ contains the complex b for A*x = b. + // On return v_ contains complex solution, x. // m_ is the factored matrix for the trees without gap junctions // Jacobi method (easy for parallel) // A = D + R @@ -600,48 +555,40 @@ int NonLinImpRep::gapsolve() { } #endif - pargap_jacobi_setup(0); + pargap_jacobi_setup(0); // 0 means 'setup' - double *rx, *jx, *rx1, *jx1, *rb, *jb; - if (neq_) { - rx = new double[neq_]; - jx = new double[neq_]; - rx1 = new double[neq_]; - jx1 = new double[neq_]; - rb = new double[neq_]; - jb = new double[neq_]; - } - - // initialize for first iteration - for (int i = 0; i < neq_; ++i) { - rx[i] = jx[i] = 0.0; - rb[i] = rv_[i]; - jb[i] = jv_[i]; - } + std::vector> x_old(neq_); + std::vector> x(neq_); + std::vector> b(v_); // iterate till change in x is small double tol = 1e-9; - double delta; + double delta{}; int success = 0; int iter; for (iter = 1; iter <= maxiter_; ++iter) { if (neq_) { - cmplx_spSolve(m_, rb - 1, rx1 - 1, jb - 1, jx1 - 1); + auto b_ = Eigen::Map, Eigen::Dynamic>>(b.data(), + b.size()); + auto x_ = Eigen::Map, Eigen::Dynamic>>(x.data(), + x.size()); + x_ = lu_.solve(b_); } // if any change in x > tol, then do another iteration. success = 1; delta = 0.0; + // Do the substraction of the previous result (x_old) and current result (x). + // If all differences are < tol stop the loop, otherwise continue to iterate for (int i = 0; i < neq_; ++i) { - double err = fabs(rx1[i] - rx[i]) + fabs(jx1[i] - jx[i]); + auto diff = x[i] - x_old[i]; + double err = std::abs(diff.real()) + std::abs(diff.imag()); if (err > tol) { success = 0; } - if (delta < err) { - delta = err; - } + delta = std::max(err, delta); } #if NRNMPI if (nrnmpi_numprocs > 1) { @@ -649,34 +596,17 @@ int NonLinImpRep::gapsolve() { } #endif if (success) { - for (int i = 0; i < neq_; ++i) { - rv_[i] = rx1[i]; - jv_[i] = jx1[i]; - } + v_ = x; break; } // setup for next iteration - for (int i = 0; i < neq_; ++i) { - rx[i] = rx1[i]; - jx[i] = jx1[i]; - rb[i] = rv_[i]; - jb[i] = jv_[i]; - } - pargap_jacobi_rhs(rb, rx); - pargap_jacobi_rhs(jb, jx); + x_old = x; + b = v_; + pargap_jacobi_rhs(b, x_old); } - pargap_jacobi_setup(1); // tear down - - if (neq_) { - delete[] rx; - delete[] jx; - delete[] rx1; - delete[] jx1; - delete[] rb; - delete[] jb; - } + pargap_jacobi_setup(1); // 1 means 'tear down' if (!success) { char buf[256]; @@ -686,7 +616,7 @@ int NonLinImpRep::gapsolve() { maxiter_, delta, tol); - execerror(buf, 0); + hoc_execerror(buf, nullptr); } return iter; } diff --git a/src/nrniv/nonlinz.h b/src/nrniv/nonlinz.h index 115a191b27..b477bf783a 100644 --- a/src/nrniv/nonlinz.h +++ b/src/nrniv/nonlinz.h @@ -3,20 +3,24 @@ class NonLinImpRep; +// A solver for non linear equation of complex numbers. +// Matrix should be squared. class NonLinImp { public: - NonLinImp(); - virtual ~NonLinImp(); + ~NonLinImp(); + // Prepare the matrix before solving it. void compute(double omega, double deltafac, int maxiter); - double transfer_amp(int curloc, int vloc); // v_node[arg] is the node + + double transfer_amp(int curloc, int vloc); double transfer_phase(int curloc, int vloc); double input_amp(int curloc); double input_phase(int curloc); double ratio_amp(int clmploc, int vloc); + int solve(int curloc); private: - NonLinImpRep* rep_; + NonLinImpRep* rep_{}; }; #endif diff --git a/src/nrniv/nrnclass.h b/src/nrniv/nrnclass.h index 73e89cec23..02659d7993 100644 --- a/src/nrniv/nrnclass.h +++ b/src/nrniv/nrnclass.h @@ -3,9 +3,9 @@ , Shape_reg(), PlotShape_reg(), PPShape_reg(), RangeVarPlot_reg(), SectionBrowser_reg(), MechanismStandard_reg(), MechanismType_reg(), NetCon_reg(), LinearMechanism_reg(), KSChan_reg(), Impedance_reg(), SaveState_reg(), BBSaveState_reg(), FInitializeHandler_reg(), - StateTransitionEvent_reg(), nrnpython_reg() + StateTransitionEvent_reg(), nrnpython_reg(), NMODLRandom_reg() #if USEDASPK - , + , Daspk_reg() #endif #if USECVODE @@ -26,7 +26,7 @@ , Shape_reg, PlotShape_reg, PPShape_reg, RangeVarPlot_reg, SectionBrowser_reg, MechanismStandard_reg, MechanismType_reg, NetCon_reg, LinearMechanism_reg, KSChan_reg, Impedance_reg, SaveState_reg, BBSaveState_reg, FInitializeHandler_reg, StateTransitionEvent_reg, - nrnpython_reg + nrnpython_reg, NMODLRandom_reg #if USEDASPK , Daspk_reg diff --git a/src/nrniv/nrncore_write.cpp b/src/nrniv/nrncore_write.cpp index d449977074..cd9550f0b7 100644 --- a/src/nrniv/nrncore_write.cpp +++ b/src/nrniv/nrncore_write.cpp @@ -141,7 +141,12 @@ bool corenrn_direct; // name of coreneuron mpi library to load std::string corenrn_mpi_library; -static size_t part1(); +struct part1_ret { + std::size_t rankbytes{}; + neuron::model_sorted_token sorted_token; +}; + +static part1_ret part1(); static void part2(const char*); /// dump neuron model to given directory path @@ -156,7 +161,7 @@ size_t write_corenrn_model(const std::string& path) { create_dir_path(path); // calculate size of the model - auto const rankbytes = part1(); + auto const rankbytes = part1().rankbytes; // mechanism and global variables write_memb_mech_types(get_filename(path, "bbcore_mech.dat").c_str()); @@ -174,7 +179,11 @@ size_t nrncore_write() { return write_corenrn_model(path); } -static size_t part1() { +static part1_ret part1() { + // Need the NEURON model to be frozen and sorted in order to transfer it to + // CoreNEURON + auto sorted_token = nrn_ensure_model_data_are_sorted(); + size_t rankbytes = 0; static int bbcore_dparam_size_size = -1; @@ -191,23 +200,22 @@ static size_t part1() { for (int i = 0; i < n_memb_func; ++i) { int sz = nrn_prop_dparam_size_[i]; bbcore_dparam_size[i] = sz; - Memb_func* mf = memb_func + i; - if (mf && mf->dparam_semantics && sz && mf->dparam_semantics[sz - 1] == -3) { + const Memb_func& mf = memb_func[i]; + if (mf.dparam_semantics && sz && mf.dparam_semantics[sz - 1] == -3) { // cvode_ieq in NEURON but not CoreNEURON bbcore_dparam_size[i] = sz - 1; } } CellGroup::setup_nrn_has_net_event(); cellgroups_ = new CellGroup[nrn_nthread]; // here because following needs mlwithart - CellGroup::mk_tml_with_art(cellgroups_); + CellGroup::mk_tml_with_art(sorted_token, cellgroups_); rankbytes += CellGroup::get_mla_rankbytes(cellgroups_); rankbytes += nrncore_netpar_bytes(); // printf("%d bytes %ld\n", nrnmpi_myid, rankbytes); - CellGroup* cgs = CellGroup::mk_cellgroups(cellgroups_); - - CellGroup::datumtransform(cgs); - return rankbytes; + CellGroup::mk_cellgroups(sorted_token, cellgroups_); + CellGroup::datumtransform(cellgroups_); + return {rankbytes, std::move(sorted_token)}; } static void part2(const char* path) { @@ -267,8 +275,19 @@ int nrncore_run(const char* arg) { // using direct memory mode corenrn_direct = true; + // If "--simulate-only" argument is passed that means that the model is already dumped to disk + // and we just need to simulate it with CoreNEURON + // Avoid trying to check the NEURON model, passing any data between them and other bookeeping + // actions + bool corenrn_skip_write_model_to_disk = static_cast(arg).find( + "--skip-write-model-to-disk") != std::string::npos; + // check that model can be transferred - model_ready(); + // unless "--simulate-only" argument is passed that means that the model is already dumped to + // disk + if (!corenrn_skip_write_model_to_disk) { + model_ready(); + } // get coreneuron library handle void* handle = [] { @@ -293,8 +312,20 @@ int nrncore_run(const char* arg) { hoc_execerror("Could not get symbol corenrn_embedded_run from", NULL); } - // prepare the model - part1(); + if (nrnmpi_numprocs > 1 && t > 0.0) { + // In case t was reached by an fadvance on the NEURON side, + // it may be the case that there are spikes generated on other + // ranks that have not been enqueued on this rank. + nrn_spike_exchange(nrn_threads); + } + + // check that model can be transferred unless we only want to run the CoreNEURON simulation + // with prebuilt model + if (!corenrn_skip_write_model_to_disk) { + // prepare the model, the returned token will keep the NEURON-side copy of + // the model frozen until the end of nrncore_run. + auto sorted_token = part1().sorted_token; + } int have_gap = nrnthread_v_transfer_ ? 1 : 0; #if !NRNMPI @@ -308,6 +339,11 @@ int nrncore_run(const char* arg) { // close handle and return result dlclose(handle); + // Simulation has finished after calling coreneuron_launcher so we can now return + if (!corenrn_skip_write_model_to_disk) { + return result; + } + // Note: possibly non-empty only if nrn_nthread > 1 CellGroup::clean_deferred_type2artml(); @@ -335,26 +371,53 @@ int nrncore_is_file_mode() { return 0; } +/** Find folder set for --datpath CLI option in CoreNEURON to dump the CoreNEURON data + * Note that it is expected to have the CLI option passed in the form of `--datpath ` + * All the logic to find the proper folder to dump the coreneuron files in file_mode is + * tightly coupled with the `coreneuron` Python class. + */ +std::string find_datpath_in_arguments(const std::string& coreneuron_arguments) { + std::string arg; + std::stringstream ss(coreneuron_arguments); + // Split the coreneuron arguments based on spaces + // and look for the `--datpath ` + getline(ss, arg, ' '); + while (arg != "--datpath") { + getline(ss, arg, ' '); + } + // Read the real path that follows `--datpath` + getline(ss, arg, ' '); + return arg; +} + /** Run coreneuron with arg string from neuron.coreneuron.nrncore_arg(tstop) * Return 0 on success */ int nrncore_psolve(double tstop, int file_mode) { if (nrnpy_nrncore_arg_p_) { - char* arg = (*nrnpy_nrncore_arg_p_)(tstop); - if (arg) { + char* args = (*nrnpy_nrncore_arg_p_)(tstop); + if (args) { + auto args_as_str = static_cast(args); // if file mode is requested then write model to a directory // note that CORENRN_DATA_DIR name is also used in module // file coreneuron.py - if (file_mode) { - const char* CORENRN_DATA_DIR = "corenrn_data"; + auto corenrn_skip_write_model_to_disk = + args_as_str.find("--skip-write-model-to-disk") != std::string::npos; + if (file_mode && !corenrn_skip_write_model_to_disk) { + std::string CORENRN_DATA_DIR = "corenrn_data"; + if (args_as_str.find("--datpath") != std::string::npos) { + CORENRN_DATA_DIR = find_datpath_in_arguments(args); + } write_corenrn_model(CORENRN_DATA_DIR); } - nrncore_run(arg); + nrncore_run(args); // data return nt._t so copy to t t = nrn_threads[0]._t; - free(arg); + free(args); // Really just want to get NetParEvent back onto queue. - nrn_spike_exchange_init(); + if (!corenrn_skip_write_model_to_disk) { + nrn_spike_exchange_init(); + } return 0; } } diff --git a/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.cpp b/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.cpp index 0c9b0a9016..ec88e782a7 100644 --- a/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.cpp +++ b/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.cpp @@ -74,8 +74,6 @@ int get_global_int_item(const char* name) { return secondorder; } else if (strcmp(name, "Random123_global_index") == 0) { return nrnran123_get_globalindex(); - } else if (strcmp(name, "_nrnunit_use_legacy_") == 0) { - return _nrnunit_use_legacy_; } return 0; } @@ -148,19 +146,21 @@ void nrnthreads_all_weights_return(std::vector& weights) { * The ARTIFICIAL_CELL type case is special as there is no thread specific * Memb_list for those. */ -size_t nrnthreads_type_return(int type, int tid, double*& data, double**& mdata) { +size_t nrnthreads_type_return(int type, int tid, double*& data, std::vector& mdata) { size_t n = 0; data = NULL; - mdata = NULL; + mdata.clear(); if (tid >= nrn_nthread) { return n; } NrnThread& nt = nrn_threads[tid]; if (type == voltage) { - data = nt._actual_v; + auto const cache_token = nrn_ensure_model_data_are_sorted(); + data = nt.node_voltage_storage(); n = size_t(nt.end); } else if (type == i_membrane_) { // i_membrane_ - data = nt._nrn_fast_imem->_nrn_sav_rhs; + auto const cache_token = nrn_ensure_model_data_are_sorted(); + data = nt.node_sav_rhs_storage(); n = size_t(nt.end); } else if (type == 0) { // time data = &nt._t; @@ -168,13 +168,13 @@ size_t nrnthreads_type_return(int type, int tid, double*& data, double**& mdata) } else if (type > 0 && type < n_memb_func) { Memb_list* ml = nt._ml_list[type]; if (ml) { - mdata = ml->_data; + mdata = ml->data(); n = ml->nodecount; } else { // The single thread case is easy if (nrn_nthread == 1) { - ml = memb_list + type; - mdata = ml->_data; + ml = &memb_list[type]; + mdata = ml->data(); n = ml->nodecount; } else { // mk_tml_with_art() created a cgs[id].mlwithart which appended @@ -185,7 +185,7 @@ size_t nrnthreads_type_return(int type, int tid, double*& data, double**& mdata) // cellgroups_ portion (deleting it on return from nrncore_run). auto& ml = CellGroup::deferred_type2artml_[tid][type]; n = size_t(ml->nodecount); - mdata = ml->_data; + mdata = ml->data(); } } } @@ -203,7 +203,7 @@ void nrnthread_group_ids(int* grp) { int nrnthread_dat1(int tid, int& n_presyn, int& n_netcon, - int*& output_gid, + std::vector& output_gid, int*& netcon_srcgid, std::vector& netcon_negsrcgid_tid) { if (tid >= nrn_nthread) { @@ -212,8 +212,7 @@ int nrnthread_dat1(int tid, CellGroup& cg = cellgroups_[tid]; n_presyn = cg.n_presyn; n_netcon = cg.n_netcon; - output_gid = cg.output_gid; - cg.output_gid = NULL; + output_gid = std::move(cg.output_gid); netcon_srcgid = cg.netcon_srcgid; cg.netcon_srcgid = NULL; netcon_negsrcgid_tid = cg.netcon_negsrcgid_tid; @@ -259,7 +258,7 @@ int nrnthread_dat2_1(int tid, cg.ml_vdata_offset[j] = vdata_offset; int* ds = memb_func[type].dparam_semantics; for (int psz = 0; psz < bbcore_dparam_size[type]; ++psz) { - if (ds[psz] == -4 || ds[psz] == -6 || ds[psz] == -7 || ds[psz] == 0) { + if (ds[psz] == -4 || ds[psz] == -6 || ds[psz] == -7 || ds[psz] == -11 || ds[psz] == 0) { // printf("%s ds[%d]=%d vdata_offset=%d\n", memb_func[type].sym->name, psz, ds[psz], // vdata_offset); vdata_offset += ml->nodecount; @@ -295,19 +294,18 @@ int nrnthread_dat2_2(int tid, // If direct transfer, copy, because target space already allocated bool copy = corenrn_direct; if (copy) { - for (int i = 0; i < nt.end; ++i) { - v_parent_index[i] = nt._v_parent_index[i]; - a[i] = nt._actual_a[i]; - b[i] = nt._actual_b[i]; - area[i] = nt._actual_area[i]; - v[i] = nt._actual_v[i]; - } + std::copy_n(nt.node_a_storage(), nt.end, a); + std::copy_n(nt.node_b_storage(), nt.end, b); + std::copy_n(nt.node_area_storage(), nt.end, area); + std::copy_n(nt.node_voltage_storage(), nt.end, v); + std::copy_n(nt._v_parent_index, nt.end, v_parent_index); } else { v_parent_index = nt._v_parent_index; - a = nt._actual_a; - b = nt._actual_b; - area = nt._actual_area; - v = nt._actual_v; + auto const cache_token = nrn_ensure_model_data_are_sorted(); + a = nt.node_a_storage(); + area = nt.node_area_storage(); + b = nt.node_b_storage(); + v = nt.node_voltage_storage(); } if (cg.ndiam) { if (!copy) { @@ -318,7 +316,7 @@ int nrnthread_dat2_2(int tid, double diam = 0.0; for (Prop* p = nd->prop; p; p = p->next) { if (p->_type == MORPHOLOGY) { - diam = p->param[0]; + diam = p->param(0); break; } } @@ -334,6 +332,7 @@ int nrnthread_dat2_mech(int tid, int*& nodeindices, double*& data, int*& pdata, + std::vector& nmodlrandom, // 5 uint32_t per var per instance std::vector& pointer2type) { if (tid >= nrn_nthread) { return 0; @@ -350,13 +349,23 @@ int nrnthread_dat2_mech(int tid, int isart = nrn_is_artificial_[type]; int n = ml->nodecount; int sz = nrn_prop_param_size_[type]; - double* data1; - if (isart) { // data may not be contiguous - data1 = contiguous_art_data(ml->_data, n, sz); // delete after use + + // As the NEURON data is now transposed then for now always create a new + // copy in the format expected by CoreNEURON. + // TODO remove the need for this entirely + if (!copy) { + data = new double[n * sz]; + } + for (auto instance = 0, k = 0; instance < n; ++instance) { + for (auto variable = 0; variable < sz; ++variable) { + data[k++] = ml->data(instance, variable); + } + } + + if (isart) { // data may not be contiguous nodeindices = NULL; } else { nodeindices = ml->nodeindices; // allocated below if copy - data1 = ml->_data[0]; // do not delete after use } if (copy) { if (!isart) { @@ -365,15 +374,6 @@ int nrnthread_dat2_mech(int tid, nodeindices[i] = ml->nodeindices[i]; } } - int nn = n * sz; - for (int i = 0; i < nn; ++i) { - data[i] = data1[i]; - } - if (isart) { - delete[] data1; - } - } else { - data = data1; } sz = bbcore_dparam_size[type]; // nrn_prop_dparam_size off by 1 if cvode_ieq. @@ -393,6 +393,32 @@ int nrnthread_dat2_mech(int tid, pdata = NULL; } + // nmodlrandom: reserve 5 uint32 for each var of each instance + // id1, id2, id3, seq, uint32_t(which) + // Header is number of random variables followed by dparam indices + // if no destructor, skip. There are no random variables. + if (nrn_mech_inst_destruct.count(type)) { + auto& indices = nrn_mech_random_indices(type); + nmodlrandom.reserve(1 + indices.size() + 5 * n * indices.size()); + nmodlrandom.push_back(indices.size()); + for (int ix: indices) { + nmodlrandom.push_back((uint32_t) ix); + } + for (int ix: indices) { + uint32_t data[5]; + char which; + for (int i = 0; i < n; ++i) { + auto& datum = ml->pdata[i][ix]; + nrnran123_State* r = (nrnran123_State*) datum.get(); + nrnran123_getids3(r, &data[0], &data[1], &data[2]); + nrnran123_getseq(r, &data[3], &which); + data[4] = uint32_t(which); + for (auto j: data) { + nmodlrandom.push_back(j); + } + } + } + } return 1; } @@ -477,8 +503,7 @@ int nrnthread_dat2_corepointer_mech(int tid, icnt = 0; // data size and allocate for (int i = 0; i < ml->nodecount; ++i) { - (*nrn_bbcore_write_[type])( - NULL, NULL, &dcnt, &icnt, ml->_data[i], ml->pdata[i], ml->_thread, &nt); + (*nrn_bbcore_write_[type])(NULL, NULL, &dcnt, &icnt, ml, i, ml->pdata[i], ml->_thread, &nt); } dArray = NULL; iArray = NULL; @@ -492,7 +517,7 @@ int nrnthread_dat2_corepointer_mech(int tid, // data values for (int i = 0; i < ml->nodecount; ++i) { (*nrn_bbcore_write_[type])( - dArray, iArray, &dcnt, &icnt, ml->_data[i], ml->pdata[i], ml->_thread, &nt); + dArray, iArray, &dcnt, &icnt, ml, i, ml->pdata[i], ml->_thread, &nt); } return 1; @@ -518,14 +543,44 @@ int core2nrn_corepointer_mech(int tid, int type, int icnt, int dcnt, int* iArray int dk = 0; // data values for (int i = 0; i < ml->nodecount; ++i) { - (*nrn_bbcore_read_[type])( - dArray, iArray, &dk, &ik, ml->_data[i], ml->pdata[i], ml->_thread, &nt); + (*nrn_bbcore_read_[type])(dArray, iArray, &dk, &ik, ml, i, ml->pdata[i], ml->_thread, &nt); } assert(dk == dcnt); assert(ik == icnt); return 1; } +// NMODL RANDOM seq34 data return from coreneuron +int core2nrn_nmodlrandom(int tid, + int type, + const std::vector& indices, + const std::vector& nmodlrandom) { + if (tid >= nrn_nthread) { + return 0; + } + NrnThread& nt = nrn_threads[tid]; + Memb_list* ml = nt._ml_list[type]; + // ARTIFICIAL_CELL are not in nt. + if (!ml) { + ml = CellGroup::deferred_type2artml_[tid][type]; + assert(ml); + } + + auto& nrnindices = nrn_mech_random_indices(type); // for sanity checking + assert(nrnindices == indices); + assert(nmodlrandom.size() == indices.size() * ml->nodecount); + + int ir = 0; // into nmodlrandom + for (const auto ix: nrnindices) { + for (int i = 0; i < ml->nodecount; ++i) { + auto& datum = ml->pdata[i][ix]; + nrnran123_State* state = (nrnran123_State*) datum.get(); + nrnran123_setseq(state, nmodlrandom[ir++]); + } + } + return 1; +} + int* datum2int(int type, Memb_list* ml, NrnThread& nt, @@ -580,6 +635,8 @@ int* datum2int(int type, } else if (etype == -7) { // bbcorepointer pdata[jj] = ml_vdata_offset + eindex; // printf("etype %d jj=%d eindex=%d pdata=%d\n", etype, jj, eindex, pdata[jj]); + } else if (etype == -11) { // random + pdata[jj] = ml_vdata_offset + eindex; } else { // uninterpreted assert(eindex != -3); // avoided if last pdata[jj] = 0; @@ -590,8 +647,6 @@ int* datum2int(int type, } void part2_clean() { - CellGroup::clear_artdata2index(); - CellGroup::clean_art(cellgroups_); if (corenrn_direct) { @@ -602,23 +657,16 @@ void part2_clean() { cellgroups_ = NULL; } -std::vector CellGroup::deferred_netcons; +std::vector> CellGroup::deferred_netcons; void CellGroup::defer_clean_netcons(CellGroup* cgs) { clean_deferred_netcons(); for (int tid = 0; tid < nrn_nthread; ++tid) { - CellGroup& cg = cgs[tid]; - deferred_netcons.push_back(cg.netcons); - cg.netcons = nullptr; + deferred_netcons.push_back(std::move(cgs[tid].netcons)); } } void CellGroup::clean_deferred_netcons() { - for (auto ncs: deferred_netcons) { - if (ncs) { - delete[] ncs; - } - } deferred_netcons.clear(); } @@ -640,10 +688,10 @@ int nrnthread_dat2_vecplay(int tid, std::vector& indices) { // add the index of each instance in fixed_play_ for thread tid. // error if not a VecPlayContinuous with no discon vector - PlayRecList* fp = net_cvode_instance->fixed_play_; - for (int i = 0; i < fp->count(); ++i) { - if (fp->item(i)->type() == VecPlayContinuousType) { - VecPlayContinuous* vp = (VecPlayContinuous*) fp->item(i); + int i = 0; + for (auto& item: *net_cvode_instance->fixed_play_) { + if (item->type() == VecPlayContinuousType) { + auto* vp = static_cast(item); if (vp->discon_indices_ == NULL) { if (vp->ith_ == nt.id) { assert(vp->y_ && vp->t_); @@ -655,6 +703,7 @@ int nrnthread_dat2_vecplay(int tid, std::vector& indices) { } else { assert(0); } + ++i; } return 1; @@ -676,9 +725,9 @@ int nrnthread_dat2_vecplay_inst(int tid, } NrnThread& nt = nrn_threads[tid]; - PlayRecList* fp = net_cvode_instance->fixed_play_; - if (fp->item(i)->type() == VecPlayContinuousType) { - auto* const vp = static_cast(fp->item(i)); + auto* fp = net_cvode_instance->fixed_play_; + if (fp->at(i)->type() == VecPlayContinuousType) { + auto* const vp = static_cast(fp->at(i)); if (!vp->discon_indices_) { if (vp->ith_ == nt.id) { auto* pd = static_cast(vp->pd_); @@ -690,9 +739,10 @@ int nrnthread_dat2_vecplay_inst(int tid, } Memb_list* ml = tml->ml; int nn = nrn_prop_param_size_[tml->index] * ml->nodecount; - if (pd >= ml->_data[0] && pd < (ml->_data[0] + nn)) { + auto const legacy_index = ml->legacy_index(pd); + if (legacy_index >= 0) { mtype = tml->index; - ix = (pd - ml->_data[0]); + ix = legacy_index; sz = vector_capacity(vp->y_); yvec = vector_vec(vp->y_); tvec = vector_vec(vp->t_); @@ -718,9 +768,9 @@ void core2nrn_vecplay(int tid, int i, int last_index, int discon_index, int ubou if (tid >= nrn_nthread) { return; } - PlayRecList* fp = net_cvode_instance->fixed_play_; - assert(fp->item(i)->type() == VecPlayContinuousType); - VecPlayContinuous* vp = (VecPlayContinuous*) fp->item(i); + auto* fp = net_cvode_instance->fixed_play_; + assert(fp->at(i)->type() == VecPlayContinuousType); + VecPlayContinuous* vp = (VecPlayContinuous*) fp->at(i); vp->last_index_ = last_index; vp->discon_index_ = discon_index; vp->ubound_index_ = ubound_index; @@ -728,10 +778,9 @@ void core2nrn_vecplay(int tid, int i, int last_index, int discon_index, int ubou /** start the vecplay events **/ void core2nrn_vecplay_events() { - PlayRecList* fp = net_cvode_instance->fixed_play_; - for (int i = 0; i < fp->count(); ++i) { - if (fp->item(i)->type() == VecPlayContinuousType) { - VecPlayContinuous* vp = (VecPlayContinuous*) fp->item(i); + for (auto& item: *net_cvode_instance->fixed_play_) { + if (item->type() == VecPlayContinuousType) { + auto* vp = static_cast(item); NrnThread* nt = nrn_threads + vp->ith_; vp->e_->send(vp->t_->elem(vp->ubound_index_), net_cvode_instance, nt); } @@ -746,7 +795,7 @@ void nrn2core_transfer_WatchCondition(WatchCondition* wc, void (*cb)(int, int, i int pnttype = pnt->prop->_type; int watch_index = wc->watch_index_; int triggered = wc->flag_ ? 1 : 0; - int pntindex = CellGroup::nrncore_pntindex_for_queue(pnt->prop->param, tid, pnttype); + int pntindex = CellGroup::nrncore_pntindex_for_queue(pnt->prop, tid, pnttype); (*cb)(tid, pnttype, pntindex, watch_index, triggered); // This transfers CvodeThreadData activated WatchCondition @@ -844,10 +893,9 @@ static void set_info(TQItem* tqi, // On the other hand, if there is a non-null weight pointer, its index // can only be determined by sweeping over all NetCon. - double* data = pnt->prop->param; // Introduced the public static method below because ARTIFICIAL_CELL // are not located in NrnThread and are not cache efficient. - int index = CellGroup::nrncore_pntindex_for_queue(data, tid, type); + int index = CellGroup::nrncore_pntindex_for_queue(pnt->prop, tid, type); core_te->intdata.push_back(index); size_t iloc_wt = core_te->intdata.size(); @@ -1068,7 +1116,7 @@ static void core2nrn_SelfEvent_helper(int tid, // Needs to be tested when permuted on CoreNEURON side. assert(tar_type == pnt->prop->_type); - // assert(tar_index == CellGroup::nrncore_pntindex_for_queue(pnt->prop->param, tid, tar_type)); + assert(tar_index == CellGroup::nrncore_pntindex_for_queue(pnt->prop, tid, tar_type)); int const movable_index = type2movable[tar_type]; auto* const movable_arg = pnt->prop->dparam + movable_index; @@ -1163,7 +1211,7 @@ void nrn2core_PreSyn_flag(int tid, std::set& presyns_flag_true) { if (ps->flag_ && ps->thvar_) { int type = 0; int index_v = -1; - nrn_dblpntr2nrncore(static_cast(ps->thvar_), *ps->nt_, type, index_v); + nrn_dblpntr2nrncore(ps->thvar_, *ps->nt_, type, index_v); assert(type == voltage); presyns_flag_true.insert(index_v); } @@ -1229,9 +1277,14 @@ void nrn2core_subworld_info(int& cnt, int& subworld_rank, int& numprocs_subworld, int& numprocs_world) { - cnt = nrnmpi_subworld_change_cnt; - subworld_index = nrnmpi_subworld_id; - subworld_rank = nrnmpi_myid; - numprocs_subworld = nrnmpi_numprocs_subworld; - numprocs_world = nrnmpi_numprocs_world; +#ifdef NRNMPI + nrnmpi_get_subworld_info( + &cnt, &subworld_index, &subworld_rank, &numprocs_subworld, &numprocs_world); +#else + cnt = 0; + subworld_index = -1; + subworld_rank = 0; + numprocs_subworld = 1; + numprocs_world = 1; +#endif } diff --git a/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.h b/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.h index df07d8ef64..efce1359fb 100644 --- a/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.h +++ b/src/nrniv/nrncore_write/callbacks/nrncore_callbacks.h @@ -6,6 +6,8 @@ #include #include #include +#include + // includers need several pieces of info for nrn_get_partrans_setup_info #include "partrans.h" @@ -38,7 +40,7 @@ void nrnthread_group_ids(int* groupids); int nrnthread_dat1(int tid, int& n_presyn, int& n_netcon, - int*& output_gid, + std::vector& output_gid, int*& netcon_srcgid, std::vector& netcon_negsrcgid_tid); int nrnthread_dat2_1(int tid, @@ -66,6 +68,7 @@ int nrnthread_dat2_mech(int tid, int*& nodeindices, double*& data, int*& pdata, + std::vector& nmodlrandom, std::vector& pointer2type); int nrnthread_dat2_3(int tid, int nweight, @@ -121,8 +124,12 @@ extern "C" { int nrnthread_all_spike_vectors_return(std::vector& spiketvec, std::vector& spikegidvec); void nrnthreads_all_weights_return(std::vector& weights); -size_t nrnthreads_type_return(int type, int tid, double*& data, double**& mdata); +size_t nrnthreads_type_return(int type, int tid, double*& data, std::vector& mdata); int core2nrn_corepointer_mech(int tid, int type, int icnt, int dcnt, int* iarray, double* darray); +int core2nrn_nmodlrandom(int tid, + int type, + const std::vector& indices, + const std::vector& nmodlrandom); } // For direct transfer of event queue information from CoreNEURON @@ -227,6 +234,7 @@ static core2nrn_callback_t cnbs[] = { {"core2nrn_clear_queues_", (CNB) core2nrn_clear_queues}, {"core2nrn_corepointer_mech_", (CNB) core2nrn_corepointer_mech}, + {"core2nrn_nmodlrandom_", (CNB) core2nrn_nmodlrandom}, {"core2nrn_NetCon_event_", (CNB) core2nrn_NetCon_event}, {"core2nrn_SelfEvent_event_", (CNB) core2nrn_SelfEvent_event}, {"core2nrn_SelfEvent_event_noweight_", (CNB) core2nrn_SelfEvent_event_noweight}, diff --git a/src/nrniv/nrncore_write/data/cell_group.cpp b/src/nrniv/nrncore_write/data/cell_group.cpp index b9563ea152..fe25bc1793 100644 --- a/src/nrniv/nrncore_write/data/cell_group.cpp +++ b/src/nrniv/nrncore_write/data/cell_group.cpp @@ -18,16 +18,12 @@ extern int nrn_has_net_event_cnt_; extern int* nrn_has_net_event_; extern short* nrn_is_artificial_; -PVoid2Int CellGroup::artdata2index_; Deferred_Type2ArtMl CellGroup::deferred_type2artml_; int* CellGroup::has_net_event_; CellGroup::CellGroup() { n_output = n_real_output = n_presyn = n_netcon = n_mech = ntype = 0; group_id = -1; - output_gid = output_vindex = 0; - netcons = 0; - output_ps = 0; ndiam = 0; netcon_srcgid = netcon_pnttype = netcon_pntindex = 0; datumindices = 0; @@ -39,10 +35,6 @@ CellGroup::CellGroup() { } CellGroup::~CellGroup() { - if (output_gid) - delete[] output_gid; - if (output_vindex) - delete[] output_vindex; if (netcon_srcgid) delete[] netcon_srcgid; if (netcon_pnttype) @@ -51,17 +43,13 @@ CellGroup::~CellGroup() { delete[] netcon_pntindex; if (datumindices) delete[] datumindices; - if (netcons) - delete[] netcons; - if (output_ps) - delete[] output_ps; if (ml_vdata_offset) delete[] ml_vdata_offset; delete[] type2ml; } -CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { +void CellGroup::mk_cellgroups(neuron::model_sorted_token const& cache_token, CellGroup* cgs) { for (int i = 0; i < nrn_nthread; ++i) { auto& nt = nrn_threads[i]; cgs[i].n_real_cell = nt.ncell; // real cell count @@ -75,9 +63,13 @@ CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { hoc_Item* q; ITERATE(q, pth) { auto* ps = static_cast(VOIDITM(q)); - auto& pv = ps->thvar_; - assert(pv); - if (pv < nt._actual_v || pv >= (nt._actual_v + nt.end)) { + // The PreSyn should refer to a valid Node + assert(ps->thvar_); + // The old code says this should always be a voltage, and + // voltage is the thing we are moving to a new data structure, + // so we should not be hitting the backwards-compatibility layer + if (!ps->thvar_.refers_to( + neuron::model().node_data())) { hoc_execerr_ext("NetCon range variable reference source not a voltage"); } if (ps->gid_ < 0) { @@ -113,17 +105,12 @@ CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { } cgs[i].n_presyn = npre; - cgs[i].output_ps = new PreSyn*[npre]; - cgs[i].output_gid = new int[npre]; - cgs[i].output_vindex = new int[npre]; // in case some cells do not have voltage presyns (eg threshold detection // computed from a POINT_PROCESS NET_RECEIVE with WATCH and net_event) // initialize as unused. - for (int j = 0; j < npre; ++j) { - cgs[i].output_ps[j] = NULL; - cgs[i].output_gid[j] = -1; - cgs[i].output_vindex[j] = -1; - } + cgs[i].output_ps.resize(npre); + cgs[i].output_gid.resize(npre, -1); + cgs[i].output_vindex.resize(npre, -1); // fill in the output_ps, output_gid, and output_vindex for the real cells. npre = 0; @@ -131,10 +118,16 @@ CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { hoc_Item* q; ITERATE(q, pth) { auto* ps = static_cast(VOIDITM(q)); - auto& pv = ps->thvar_; - cgs[i].output_ps[npre] = ps; - cgs[i].output_gid[npre] = ps->output_index_; - cgs[i].output_vindex[npre] = pv - nt._actual_v; + assert(ps->thvar_); + assert(ps->thvar_.refers_to_a_modern_data_structure()); + assert(ps->thvar_.refers_to( + neuron::model().node_data())); + cgs[i].output_ps.at(npre) = ps; + cgs[i].output_gid.at(npre) = ps->output_index_; + // Convert back to an old-style index, i.e. the index of the + // voltage within this NrnThread after sorting + cgs[i].output_vindex.at(npre) = ps->thvar_.current_row() - + cache_token.thread_cache(i).node_data_offset; ++npre; } } @@ -147,25 +140,16 @@ CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { int type = mla[j].first; Memb_list* ml = mla[j].second; if (nrn_has_net_event(type)) { - for (int j = 0; j < ml->nodecount; ++j) { - auto* pnt = ml->pdata[j][1].get(); - PreSyn* ps = (PreSyn*) pnt->presyn_; - cgs[i].output_ps[npre] = ps; - long agid = -1; - if (nrn_is_artificial_[type]) { - // static_cast ensures the RHS is calculated with - // `long` precision, not `int` precision. This lets us - // check for overflow below. - agid = -(type + - 1000 * static_cast(nrncore_art2index(pnt->prop->param))); - } else { // POINT_PROCESS with net_event - int sz = nrn_prop_param_size_[type]; - double* d1 = ml->_data[0]; - double* d2 = pnt->prop->param; - assert(d2 >= d1 && d2 < (d1 + (sz * ml->nodecount))); - long ix{(d2 - d1) / sz}; - agid = -(type + 1000 * ix); - } + for (int instance = 0; instance < ml->nodecount; ++instance) { + auto* const pnt = ml->pdata[instance][1].get(); + auto* const ps = static_cast(pnt->presyn_); + auto const other_thread = static_cast(pnt->_vnt)->id; + assert(other_thread == i); + cgs[i].output_ps.at(npre) = ps; + auto const offset = cache_token.thread_cache(i).mechanism_offset.at(type); + auto const global_row = pnt->prop->id().current_row(); + assert(global_row >= offset); + long const agid = -(type + 1000 * static_cast(global_row - offset)); if (ps) { if (ps->output_index_ >= 0) { // has gid cgs[i].output_gid[npre] = ps->output_index_; @@ -215,9 +199,7 @@ CellGroup* CellGroup::mk_cellgroups(CellGroup* cgs) { // use the Hoc NetCon object list to segregate according to threads // and fill the CellGroup netcons, netcon_srcgid, netcon_pnttype, and // netcon_pntindex (and, if nrn_nthread > 1, netcon_negsrcgid_tid). - CellGroup::mk_cgs_netcon_info(cgs); - - return cgs; + CellGroup::mk_cgs_netcon_info(cache_token, cgs); } void CellGroup::datumtransform(CellGroup* cgs) { @@ -264,7 +246,6 @@ void CellGroup::datumtransform(CellGroup* cgs) { void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_list* ml) { NrnThread& nt = nrn_threads[ith]; - double* a = nt._actual_area; int nnode = nt.end; int mcnt = ml->nodecount; int dsize = bbcore_dparam_size[di.type]; @@ -277,7 +258,7 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l int vdata_size = 0; for (int i = 0; i < dsize; ++i) { int* ds = memb_func[di.type].dparam_semantics; - if (ds[i] == -4 || ds[i] == -6 || ds[i] == -7 || ds[i] == 0) { + if (ds[i] == -4 || ds[i] == -6 || ds[i] == -7 || ds[i] == -11 || ds[i] == 0) { ++vdata_size; } } @@ -287,41 +268,25 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l // Prop* datum instance arrays are not in cache efficient order // ie. ml->pdata[i] are not laid out end to end in memory. // Also, ml->_data for artificial cells is not in cache efficient order - // but in the artcell case there are no pointers to doubles and - // the _actual_area pointer should be left unfilled. + // but in the artcell case there are no pointers to doubles Datum* dparam = ml->pdata[i]; int offset = i * dsize; int vdata_offset = i * vdata_size; for (int j = 0; j < dsize; ++j) { int etype = -100; // uninterpreted int eindex = -1; - if (dmap[j] == -1) { // double* into _actual_area + if (dmap[j] == -1) { // used to be a double* into _actual_area, now handled by soa<...> if (isart) { etype = -1; eindex = -1; // the signal to ignore in bbcore. } else { - const auto* dpj = dparam[j].get(); - if (dpj == &ml->nodelist[i]->_area) { - // possibility it points directly into Node._area instead of - // _actual_area. For our purposes we need to figure out the - // _actual_area index. - etype = -1; - eindex = ml->nodeindices[i]; - assert(a[ml->nodeindices[i]] == *dpj); - } else { - if (dpj < a || dpj >= (a + nnode)) { - printf("%s dparam=%p a=%p a+nnode=%p j=%d\n", - memb_func[di.type].sym->name, - dpj, - a, - a + nnode, - j); - abort(); - } - assert(dpj >= a && dpj < (a + nnode)); - etype = -1; - eindex = dpj - a; - } + auto area = static_cast>(dparam[j]); + assert(area.refers_to_a_modern_data_structure()); + auto const cache_token = nrn_ensure_model_data_are_sorted(); + etype = -1; + // current_row() refers to the global Node data, but we need + // to set eindex to something local to the NrnThread + eindex = area.current_row() - cache_token.thread_cache(ith).node_data_offset; } } else if (dmap[j] == -2) { // this is an ion and dparam[j][0].i is the iontype etype = -2; @@ -346,6 +311,9 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l } else if (dmap[j] == -10) { // fornetcon etype = -10; eindex = 0; + } else if (dmap[j] == -11) { // random + etype = -11; + eindex = vdata_offset++; } else if (dmap[j] == -9) { // diam cg.ndiam = nt.end; etype = -9; @@ -355,19 +323,19 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l // Need to determine this node and then simple to search its // mechanism list for MORPHOLOGY and then know the diam. Node* nd = ml->nodelist[i]; - double* pdiam = NULL; + neuron::container::data_handle pdiam{}; for (Prop* p = nd->prop; p; p = p->next) { if (p->_type == MORPHOLOGY) { - pdiam = p->param; + pdiam = p->param_handle(0); break; } } - assert(dparam[j].get() == pdiam); + assert(static_cast>(dparam[j]) == pdiam); eindex = ml->nodeindices[i]; } else if (dmap[j] == -5) { // POINTER // must be a pointer into nt->_data. Handling is similar to eion so // give proper index into the type. - double* pd = dparam[j].get(); + auto const pd = static_cast>(dparam[j]); nrn_dblpntr2nrncore(pd, nt, etype, eindex); if (etype == 0) { fprintf(stderr, @@ -377,34 +345,13 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l assert(etype != 0); // pointer into one of the tml types? } else if (dmap[j] > 0 && dmap[j] < 1000) { // double* into eion type data - Memb_list* eml = cg.type2ml[dmap[j]]; + etype = dmap[j]; + Memb_list* eml = cg.type2ml[etype]; assert(eml); auto* const pval = dparam[j].get(); - if (pval < eml->_data[0]) { - printf("%s dparam=%p data=%p j=%d etype=%d %s\n", - memb_func[di.type].sym->name, - pval, - eml->_data[0], - j, - dmap[j], - memb_func[dmap[j]].sym->name); - abort(); - } - assert(pval >= eml->_data[0]); - etype = dmap[j]; - if (pval >= (eml->_data[0] + (nrn_prop_param_size_[etype] * eml->nodecount))) { - printf("%s dparam=%p data=%p j=%d psize=%d nodecount=%d etype=%d %s\n", - memb_func[di.type].sym->name, - pval, - eml->_data[0], - j, - nrn_prop_param_size_[etype], - eml->nodecount, - etype, - memb_func[etype].sym->name); - } - assert(pval < (eml->_data[0] + (nrn_prop_param_size_[etype] * eml->nodecount))); - eindex = pval - eml->_data[0]; + auto const legacy_index = eml->legacy_index(pval); + assert(legacy_index >= 0); + eindex = legacy_index; } else if (dmap[j] > 1000) { // int* into ion dparam[xxx][0] // store the actual ionstyle etype = dmap[j]; @@ -424,7 +371,7 @@ void CellGroup::datumindex_fill(int ith, CellGroup& cg, DatumIndices& di, Memb_l // use the Hoc NetCon object list to segregate according to threads // and fill the CellGroup netcons, netcon_srcgid, netcon_pnttype, and // netcon_pntindex (called at end of mk_cellgroups); -void CellGroup::mk_cgs_netcon_info(CellGroup* cgs) { +void CellGroup::mk_cgs_netcon_info(neuron::model_sorted_token const& cache_token, CellGroup* cgs) { // count the netcons for each thread int* nccnt = new int[nrn_nthread]; for (int i = 0; i < nrn_nthread; ++i) { @@ -446,7 +393,7 @@ void CellGroup::mk_cgs_netcon_info(CellGroup* cgs) { // allocate for (int i = 0; i < nrn_nthread; ++i) { cgs[i].n_netcon = nccnt[i]; - cgs[i].netcons = new NetCon*[nccnt[i] + 1]; + cgs[i].netcons.resize(nccnt[i] + 1); cgs[i].netcon_srcgid = new int[nccnt[i] + 1]; cgs[i].netcon_pnttype = new int[nccnt[i] + 1]; cgs[i].netcon_pntindex = new int[nccnt[i] + 1]; @@ -468,19 +415,11 @@ void CellGroup::mk_cgs_netcon_info(CellGroup* cgs) { if (nc->target_) { int type = nc->target_->prop->_type; + auto const target_thread = static_cast(nc->target_->_vnt)->id; + assert(target_thread == ith); cgs[ith].netcon_pnttype[i] = type; - if (nrn_is_artificial_[type]) { - cgs[ith].netcon_pntindex[i] = nrncore_art2index(nc->target_->prop->param); - } else { - // cache efficient so can calculate index from pointer - Memb_list* ml = cgs[ith].type2ml[type]; - int sz = nrn_prop_param_size_[type]; - double* d1 = ml->_data[0]; - double* d2 = nc->target_->prop->param; - assert(d2 >= d1 && d2 < (d1 + (sz * ml->nodecount))); - int ix = (d2 - d1) / sz; - cgs[ith].netcon_pntindex[i] = ix; - } + cgs[ith].netcon_pntindex[i] = nc->target_->prop->id().current_row() - + cache_token.thread_cache(ith).mechanism_offset.at(type); } else { cgs[ith].netcon_pnttype[i] = 0; cgs[ith].netcon_pntindex[i] = -1; @@ -504,21 +443,18 @@ void CellGroup::mk_cgs_netcon_info(CellGroup* cgs) { NULL); } } - Point_process* pnt = (Point_process*) ps->osrc_->u.this_pointer; + auto* const pnt = static_cast(ps->osrc_->u.this_pointer); int type = pnt->prop->_type; - if (nrn_is_artificial_[type]) { - int ix = nrncore_art2index(pnt->prop->param); - cgs[ith].netcon_srcgid[i] = -(type + 1000 * ix); - } else { - assert(nrn_has_net_event(type)); - Memb_list* ml = cgs[ith].type2ml[type]; - int sz = nrn_prop_param_size_[type]; - double* d1 = ml->_data[0]; - double* d2 = pnt->prop->param; - assert(d2 >= d1 && d2 < (d1 + (sz * ml->nodecount))); - int ix = (d2 - d1) / sz; - cgs[ith].netcon_srcgid[i] = -(type + 1000 * ix); - } + auto const src_thread = static_cast(pnt->_vnt)->id; + auto const current = pnt->prop->id().current_row(); + auto const offset = + cache_token.thread_cache(src_thread).mechanism_offset.at(type); + // the resulting GID is different for "the same" pnt/source + // if the number of threads changes, because it encodes the + // offset of the source process into the thread that it + // lives in + cgs[ith].netcon_srcgid[i] = -(type + + 1000 * static_cast(current - offset)); } else { cgs[ith].netcon_srcgid[i] = -1; } @@ -542,7 +478,7 @@ void CellGroup::mk_cgs_netcon_info(CellGroup* cgs) { // so we assume there will be no POINTER usage into that data. // Also, note that ml.nodecount for artificial cell does not refer to // a list of voltage nodes but just to the count of instances. -void CellGroup::mk_tml_with_art(CellGroup* cgs) { +void CellGroup::mk_tml_with_art(neuron::model_sorted_token const& cache_token, CellGroup* cgs) { // copy NrnThread tml list and append ARTIFICIAL cell types // but do not include PatternStim if file mode. // For direct mode PatternStim is not treated specially except that @@ -554,7 +490,6 @@ void CellGroup::mk_tml_with_art(CellGroup* cgs) { // Now using cgs[tid].mlwithart instead of // tml_with_art = new NrnThreadMembList*[nrn_nthread]; // to allow fast retrieval of type and Memb_list* given index into the vector. - // copy from NrnThread for (int id = 0; id < nrn_nthread; ++id) { MlWithArt& mla = cgs[id].mlwithart; @@ -563,7 +498,6 @@ void CellGroup::mk_tml_with_art(CellGroup* cgs) { } } int* acnt = new int[nrn_nthread]; - for (int i = 0; i < n_memb_func; ++i) { if (nrn_is_artificial_[i] && memb_list[i].nodecount) { // skip PatternStim if file mode transfer. @@ -573,7 +507,7 @@ void CellGroup::mk_tml_with_art(CellGroup* cgs) { if (strcmp(memb_func[i].sym->name, "HDF5Reader") == 0) { continue; } - Memb_list* ml = memb_list + i; + Memb_list* ml = &memb_list[i]; // how many artificial in each thread for (int id = 0; id < nrn_nthread; ++id) { acnt[id] = 0; @@ -588,19 +522,18 @@ void CellGroup::mk_tml_with_art(CellGroup* cgs) { for (int id = 0; id < nrn_nthread; ++id) { if (acnt[id]) { MlWithArt& mla = cgs[id].mlwithart; - ml = new Memb_list; + ml = new Memb_list{i}; mla.push_back(MlWithArtItem(i, ml)); // need to delete ml when mla destroyed. ml->nodecount = acnt[id]; ml->nodelist = NULL; ml->nodeindices = NULL; ml->prop = NULL; ml->_thread = NULL; - ml->_data = new double*[acnt[id]]; + // ml->_data = new double*[acnt[id]]; ml->pdata = new Datum*[acnt[id]]; } } // fill data and pdata pointers - // and fill the artdata2index hash table for (int id = 0; id < nrn_nthread; ++id) { acnt[id] = 0; } @@ -608,9 +541,8 @@ void CellGroup::mk_tml_with_art(CellGroup* cgs) { auto* pnt = memb_list[i].pdata[j][1].get(); int id = ((NrnThread*) pnt->_vnt)->id; Memb_list* ml = cgs[id].mlwithart.back().second; - ml->_data[acnt[id]] = memb_list[i]._data[j]; + ml->set_storage_offset(cache_token.thread_cache(id).mechanism_offset.at(i)); ml->pdata[acnt[id]] = memb_list[i].pdata[j]; - artdata2index_.insert(std::pair(ml->_data[acnt[id]], acnt[id])); ++acnt[id]; } } @@ -628,9 +560,6 @@ size_t CellGroup::get_mla_rankbytes(CellGroup* cellgroups_) { size_t npnt = 0; size_t nart = 0; int ith = nt->id; - // printf("rank %d thread %d\n", nrnmpi_myid, ith); - // printf(" ncell=%d nnode=%d\n", nt->ncell, nt->end); - // v_parent_index, _actual_a, _actual_b, _actual_area nbytes = nt->end * (1 * sizeof(int) + 3 * sizeof(double)); threadbytes += nbytes; @@ -683,7 +612,7 @@ void CellGroup::clean_art(CellGroup* cgs) { if (!deferred_type2artml_.empty()) { deferred_type2artml_[ith][type] = ml; } else { - delete[] ml->_data; + // delete[] ml->_data; delete[] ml->pdata; delete ml; } diff --git a/src/nrniv/nrncore_write/data/cell_group.h b/src/nrniv/nrncore_write/data/cell_group.h index 7d8cde04d4..77962a3d5d 100644 --- a/src/nrniv/nrncore_write/data/cell_group.h +++ b/src/nrniv/nrncore_write/data/cell_group.h @@ -14,7 +14,6 @@ struct NrnThread; typedef std::pair MlWithArtItem; typedef std::vector MlWithArt; -typedef std::map PVoid2Int; typedef std::vector> Deferred_Type2ArtMl; class CellGroup { @@ -32,12 +31,12 @@ class CellGroup { int n_mech; int* ml_vdata_offset; // following three are parallel arrays - PreSyn** output_ps; // n_presyn of these, real are first, tml order for acell. - int* output_gid; // n_presyn of these, -(type + 1000*index) if no gid - int* output_vindex; // n_presyn of these. >=0 if associated with voltage, -(type + 1000*index) - // for acell. - int n_netcon; // all that have targets associated with this threads Point_process. - NetCon** netcons; + std::vector output_ps; // n_presyn of these, real are first, tml order for acell. + std::vector output_gid; // n_presyn of these, -(type + 1000*index) if no gid + std::vector output_vindex; // n_presyn of these. >=0 if associated with voltage, -(type + + // 1000*index) for acell. + int n_netcon; // all that have targets associated with this threads Point_process. + std::vector netcons; int* netcon_srcgid; // -(type + 1000*index) refers to acell with no gid // -1 means the netcon has no source std::vector netcon_negsrcgid_tid; // if some srcgid above are negative, @@ -50,26 +49,20 @@ class CellGroup { DatumIndices* datumindices; MlWithArt mlwithart; - static CellGroup* mk_cellgroups(CellGroup*); // gid, PreSyn, NetCon, Point_process relation. - static void datumtransform(CellGroup*); // Datum.pval to int + static void mk_cellgroups(neuron::model_sorted_token const& cache_token, + CellGroup*); // gid, PreSyn, NetCon, Point_process relation. + static void datumtransform(CellGroup*); // Datum.pval to int static void datumindex_fill(int, CellGroup&, DatumIndices&, Memb_list*); // helper - static void mk_cgs_netcon_info(CellGroup* cgs); - static void mk_tml_with_art(CellGroup*); + static void mk_cgs_netcon_info(neuron::model_sorted_token const& cache_token, CellGroup* cgs); + static void mk_tml_with_art(neuron::model_sorted_token const& cache_token, CellGroup*); static size_t get_mla_rankbytes(CellGroup*); static void clean_art(CellGroup*); static void setup_nrn_has_net_event(); - static inline void clear_artdata2index() { - artdata2index_.clear(); - } - static inline void clean_deferred_type2artml() { for (auto& th: deferred_type2artml_) { for (auto& p: th) { Memb_list* ml = p.second; - if (ml->_data) { - delete[] ml->_data; - } if (ml->pdata) { delete[] ml->pdata; } @@ -80,33 +73,21 @@ class CellGroup { } static Deferred_Type2ArtMl deferred_type2artml_; - static std::vector deferred_netcons; + static std::vector> deferred_netcons; static void defer_clean_netcons(CellGroup*); static void clean_deferred_netcons(); private: - static PVoid2Int artdata2index_; - static int* has_net_event_; - - static inline int nrncore_art2index(double* d) { - assert(artdata2index_.find(d) != artdata2index_.end()); - return artdata2index_[d]; - } - static inline int nrn_has_net_event(int type) { return has_net_event_[type]; } public: - static inline int nrncore_pntindex_for_queue(double* d, int tid, int type) { - Memb_list* ml = nrn_threads[tid]._ml_list[type]; - if (ml) { - assert(d >= ml->_data[0] && - d < (ml->_data[0] + (ml->nodecount * nrn_prop_param_size_[type]))); - return (d - ml->_data[0]) / nrn_prop_param_size_[type]; - } - return nrncore_art2index(d); + static inline int nrncore_pntindex_for_queue(Prop* p, int tid, int type) { + assert(p->_type == type); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + return p->id().current_row() - sorted_token.thread_cache(tid).mechanism_offset.at(type); } }; diff --git a/src/nrniv/nrncore_write/data/datum_indices.h b/src/nrniv/nrncore_write/data/datum_indices.h index e26ee47361..705e7743a6 100644 --- a/src/nrniv/nrncore_write/data/datum_indices.h +++ b/src/nrniv/nrncore_write/data/datum_indices.h @@ -4,8 +4,7 @@ // assume all Datum.pval point into this cell. In practice, this holds because // they point either to the area or an ion property of the given node. // This is tightly coupled to cache_efficient -// NrnThread.NrnThreadMembList.Memb_List.data and pdata and -// NrnThread._actual_area +// NrnThread.NrnThreadMembList.Memb_List.data and pdata etc. class DatumIndices { public: DatumIndices(); diff --git a/src/nrniv/nrncore_write/io/nrncore_io.cpp b/src/nrniv/nrncore_write/io/nrncore_io.cpp index b53923fd6d..4b3fa7f709 100644 --- a/src/nrniv/nrncore_write/io/nrncore_io.cpp +++ b/src/nrniv/nrncore_write/io/nrncore_io.cpp @@ -22,7 +22,7 @@ extern NetCvode* net_cvode_instance; extern void (*nrnthread_v_transfer_)(NrnThread*); int chkpnt; -const char* bbcore_write_version = "1.6"; // Allow muliple gid and PreSyn per real cell. +const char* bbcore_write_version = "1.7"; // NMODLRandom /// create directory with given path void create_dir_path(const std::string& path) { @@ -108,7 +108,6 @@ void write_globals(const char* fname) { fprintf(f, "0 0\n"); fprintf(f, "secondorder %d\n", secondorder); fprintf(f, "Random123_globalindex %d\n", nrnran123_get_globalindex()); - fprintf(f, "_nrnunit_use_legacy_ %d\n", _nrnunit_use_legacy_); fclose(f); } @@ -130,13 +129,10 @@ void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) { // nrnthread_dat1(int tid, int& n_presyn, int& n_netcon, int*& output_gid, int*& netcon_srcgid); fprintf(f, "%d npresyn\n", cg.n_presyn); fprintf(f, "%d nnetcon\n", cg.n_netcon); - writeint(cg.output_gid, cg.n_presyn); + writeint(cg.output_gid.data(), cg.n_presyn); writeint(cg.netcon_srcgid, cg.n_netcon); - if (cg.output_gid) { - delete[] cg.output_gid; - cg.output_gid = NULL; - } + cg.output_gid.clear(); if (cg.netcon_srcgid) { delete[] cg.netcon_srcgid; cg.netcon_srcgid = NULL; @@ -190,10 +186,13 @@ void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) { double *a = NULL, *b = NULL, *area = NULL, *v = NULL, *diamvec = NULL; nrnthread_dat2_2(nt.id, v_parent_index, a, b, area, v, diamvec); writeint(nt._v_parent_index, nt.end); - writedbl(nt._actual_a, nt.end); - writedbl(nt._actual_b, nt.end); - writedbl(nt._actual_area, nt.end); - writedbl(nt._actual_v, nt.end); + // Warning: this is only correct if no modifications have been made to any + // Node since reorder_secorder() was last called. + auto const cache_token = nrn_ensure_model_data_are_sorted(); + writedbl(nt.node_a_storage(), nt.end); + writedbl(nt.node_b_storage(), nt.end); + writedbl(nt.node_area_storage(), nt.end); + writedbl(nt.node_voltage_storage(), nt.end); if (cg.ndiam) { writedbl(diamvec, nt.end); delete[] diamvec; @@ -207,7 +206,9 @@ void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) { int *nodeindices = NULL, *pdata = NULL; double* data = NULL; std::vector pointer2type; - nrnthread_dat2_mech(nt.id, i, dsz_inst, nodeindices, data, pdata, pointer2type); + std::vector nmodlrandom; + nrnthread_dat2_mech( + nt.id, i, dsz_inst, nodeindices, data, pdata, nmodlrandom, pointer2type); Memb_list* ml = mla[i].second; int n = ml->nodecount; int sz = nrn_prop_param_size_[type]; @@ -215,7 +216,7 @@ void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) { writeint(nodeindices, n); } writedbl(data, n * sz); - if (nrn_is_artificial_[type]) { + if (data) { delete[] data; } sz = bbcore_dparam_size[type]; @@ -228,6 +229,11 @@ void write_nrnthread(const char* path, NrnThread& nt, CellGroup& cg) { if (sz > 0) { writeint(pointer2type.data(), sz); } + + fprintf(f, "%d nmodlrandom\n", int(nmodlrandom.size())); + if (nmodlrandom.size()) { + write_uint32vec(nmodlrandom, f); + } } } @@ -301,30 +307,14 @@ void writedbl_(double* p, size_t size, FILE* f) { assert(n == size); } -#define writeint(p, size) writeint_(p, size, f) -#define writedbl(p, size) writedbl_(p, size, f) - -void write_contiguous_art_data(double** data, int nitem, int szitem, FILE* f) { +void write_uint32vec(std::vector& vec, FILE* f) { fprintf(f, "chkpnt %d\n", chkpnt++); - // the assumption is that an fwrite of nitem groups of szitem doubles can be - // fread as a single group of nitem*szitem doubles. - for (int i = 0; i < nitem; ++i) { - size_t n = fwrite(data[i], sizeof(double), szitem, f); - assert(n == szitem); - } -} - -double* contiguous_art_data(double** data, int nitem, int szitem) { - double* d1 = new double[nitem * szitem]; - int k = 0; - for (int i = 0; i < nitem; ++i) { - for (int j = 0; j < szitem; ++j) { - d1[k++] = data[i][j]; - } - } - return d1; + size_t n = fwrite(vec.data(), sizeof(uint32_t), vec.size(), f); + assert(n == vec.size()); } +#define writeint(p, size) writeint_(p, size, f) +#define writedbl(p, size) writedbl_(p, size, f) void nrnbbcore_vecplay_write(FILE* f, NrnThread& nt) { // Get the indices in NetCvode.fixed_play_ for this thread diff --git a/src/nrniv/nrncore_write/io/nrncore_io.h b/src/nrniv/nrncore_write/io/nrncore_io.h index 6d38b1f4a6..d191de5f89 100644 --- a/src/nrniv/nrncore_write/io/nrncore_io.h +++ b/src/nrniv/nrncore_write/io/nrncore_io.h @@ -31,12 +31,15 @@ void write_nrnthread(const char* fname, NrnThread& nt, CellGroup& cg); void writeint_(int* p, size_t size, FILE* f); void writedbl_(double* p, size_t size, FILE* f); +void write_uint32vec(std::vector& vec, FILE* f); + #define writeint(p, size) writeint_(p, size, f) #define writedbl(p, size) writedbl_(p, size, f) -typedef void (*bbcore_write_t)(double*, int*, int*, int*, double*, Datum*, Datum*, NrnThread*); +// also for read +struct Memb_list; +using bbcore_write_t = + void (*)(double*, int*, int*, int*, Memb_list*, std::size_t, Datum*, Datum*, NrnThread*); -void write_contiguous_art_data(double** data, int nitem, int szitem, FILE* f); -double* contiguous_art_data(double** data, int nitem, int szitem); void write_nrnthread_task(const char*, CellGroup* cgs, bool append); void nrnbbcore_vecplay_write(FILE* f, NrnThread& nt); diff --git a/src/nrniv/nrncore_write/utils/nrncore_utils.cpp b/src/nrniv/nrncore_write/utils/nrncore_utils.cpp index 3645d53846..9267bd0c96 100644 --- a/src/nrniv/nrncore_write/utils/nrncore_utils.cpp +++ b/src/nrniv/nrncore_write/utils/nrncore_utils.cpp @@ -11,20 +11,12 @@ #include "vrecitem.h" // for nrnbbcore_vecplay_write #include "parse.hpp" #include +#ifdef HAVE_UNISTD_H #include +#endif #include #include -#if __has_include() #include -namespace neuron::std { -namespace filesystem = ::std::filesystem; -} -#else -#include -namespace neuron::std { -namespace filesystem = ::std::experimental::filesystem; -} -#endif #include "nrnwrap_dlfcn.h" @@ -43,9 +35,6 @@ extern short* nrn_is_artificial_; // prerequisites for a NEURON model to be transferred to CoreNEURON. void model_ready() { - // Do the model type checks first as some of them prevent the success - // of cvode.cache_efficient(1) and the error message associated with - // !use_cachevec would be misleading. if (!nrndae_list_is_empty()) { hoc_execerror( "CoreNEURON cannot simulate a model that contains extra LinearMechanism or RxD " @@ -61,14 +50,10 @@ void model_ready() { hoc_execerror("CoreNEURON can only use fixed step method.", NULL); } } - - if (!use_cachevec) { - hoc_execerror("NEURON model for CoreNEURON requires cvode.cache_efficient(1)", NULL); - } if (tree_changed || v_structure_change || diam_changed) { hoc_execerror( "NEURON model internal structures for CoreNEURON are out of date. Make sure call to " - "finitialize(...) is after cvode.cache_efficient(1))", + "finitialize(...)", NULL); } } @@ -140,34 +125,39 @@ void nrnbbcore_register_mapping() { // This function is related to stdindex2ptr in CoreNeuron to determine which values should // be transferred from CoreNeuron. Types correspond to the value to be transferred based on // mech_type enum or non-artificial cell mechanisms. -// Limited to pointers to voltage, nt._nrn_fast_imem->_nrn_sav_rhs (fast_imem value) or +// Limited to pointers to voltage, nt.node_sav_rhs_storage() (fast_imem value) or // data of non-artificial cell mechanisms. -// Requires cache_efficient mode. // Input double* and NrnThread. Output type and index. // type == 0 means could not determine index. -extern "C" int nrn_dblpntr2nrncore(double* pd, NrnThread& nt, int& type, int& index) { - assert(use_cachevec); +int nrn_dblpntr2nrncore(neuron::container::data_handle dh, + NrnThread& nt, + int& type, + int& index) { int nnode = nt.end; type = 0; - if (pd >= nt._actual_v && pd < (nt._actual_v + nnode)) { - type = voltage; // signifies an index into voltage array portion of _data - index = pd - nt._actual_v; - } else if (nt._nrn_fast_imem && pd >= nt._nrn_fast_imem->_nrn_sav_rhs && - pd < (nt._nrn_fast_imem->_nrn_sav_rhs + nnode)) { + if (dh.refers_to(neuron::model().node_data())) { + auto const cache_token = nrn_ensure_model_data_are_sorted(); + type = voltage; + // In the CoreNEURON world this is an offset into the voltage array part + // of _data + index = dh.current_row() - cache_token.thread_cache(nt.id).node_data_offset; + return 0; + } + if (dh.refers_to(neuron::model().node_data())) { + auto const cache_token = nrn_ensure_model_data_are_sorted(); type = i_membrane_; // signifies an index into i_membrane_ array portion of _data - index = pd - nt._nrn_fast_imem->_nrn_sav_rhs; - } else { - for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) { - if (nrn_is_artificial_[tml->index]) { - continue; - } - Memb_list* ml1 = tml->ml; - int nn = nrn_prop_param_size_[tml->index] * ml1->nodecount; - if (pd >= ml1->_data[0] && pd < (ml1->_data[0] + nn)) { - type = tml->index; - index = pd - ml1->_data[0]; - break; - } + index = dh.current_row() - cache_token.thread_cache(nt.id).node_data_offset; + return 0; + } + auto* const pd = static_cast(dh); + for (NrnThreadMembList* tml = nt.tml; tml; tml = tml->next) { + if (nrn_is_artificial_[tml->index]) { + continue; + } + if (auto const maybe_index = tml->ml->legacy_index(pd); maybe_index >= 0) { + type = tml->index; + index = maybe_index; + break; } } return type == 0 ? 1 : 0; @@ -176,7 +166,6 @@ extern "C" int nrn_dblpntr2nrncore(double* pd, NrnThread& nt, int& type, int& in #if defined(HAVE_DLFCN_H) -extern int nrn_use_fast_imem; extern char* neuron_home; /** Check if coreneuron is loaded into memory */ @@ -194,7 +183,7 @@ bool is_coreneuron_loaded() { /** Open library with given path and return dlopen handle **/ -void* get_handle_for_lib(neuron::std::filesystem::path const& path) { +void* get_handle_for_lib(std::filesystem::path const& path) { // On windows path.c_str() is wchar_t* void* handle = dlopen(path.string().c_str(), RTLD_NOW | RTLD_GLOBAL | RTLD_NODELETE); if (!handle) { @@ -213,15 +202,15 @@ void* get_coreneuron_handle() { } // record what we tried so we can give a helpful error message - std::vector paths_tried; + std::vector paths_tried; paths_tried.reserve(3); // env variable get highest preference const char* corenrn_lib = std::getenv("CORENEURONLIB"); if (corenrn_lib) { - neuron::std::filesystem::path const corenrn_lib_path{corenrn_lib}; + std::filesystem::path const corenrn_lib_path{corenrn_lib}; paths_tried.push_back(corenrn_lib_path); - if (neuron::std::filesystem::exists(corenrn_lib_path)) { + if (std::filesystem::exists(corenrn_lib_path)) { return get_handle_for_lib(corenrn_lib_path); } } @@ -233,17 +222,17 @@ void* get_coreneuron_handle() { // first check if coreneuron specific library exist in /.libs // note that we need to get full path especially for OSX { - auto const corenrn_lib_path = neuron::std::filesystem::current_path() / + auto const corenrn_lib_path = std::filesystem::current_path() / neuron::config::system_processor / corenrn_lib_name; paths_tried.push_back(corenrn_lib_path); - if (neuron::std::filesystem::exists(corenrn_lib_path)) { + if (std::filesystem::exists(corenrn_lib_path)) { return get_handle_for_lib(corenrn_lib_path); } } // last fallback is minimal library with internal mechanisms // named libcorenrnmech_internal - neuron::std::filesystem::path corenrn_lib_path{neuron_home}; + std::filesystem::path corenrn_lib_path{neuron_home}; auto const corenrn_internal_lib_name = std::string{neuron::config::shared_library_prefix} .append("corenrnmech_internal") .append(neuron::config::shared_library_suffix); @@ -252,7 +241,7 @@ void* get_coreneuron_handle() { #endif (corenrn_lib_path /= "lib") /= corenrn_internal_lib_name; paths_tried.push_back(corenrn_lib_path); - if (neuron::std::filesystem::exists(corenrn_lib_path)) { + if (std::filesystem::exists(corenrn_lib_path)) { return get_handle_for_lib(corenrn_lib_path); } // Nothing worked => error @@ -280,18 +269,6 @@ void check_coreneuron_compatibility(void* handle) { s_path << bbcore_write_version << " vs " << cn_bbcore_read_version; hoc_execerror("Incompatible NEURON and CoreNEURON versions :", s_path.str().c_str()); } - - // Make sure legacy vs modern units are consistent. - // Would be nice to check in coreneuron set_globals but that would abort - // if inconsistent. - void* cn_nrnunit_use_legacy_sym = dlsym(handle, "corenrn_units_use_legacy"); - if (!cn_nrnunit_use_legacy_sym) { - hoc_execerror("Could not get symbol corenrn_units_use_legacy from CoreNEURON", NULL); - } - bool cn_nrnunit_use_legacy = (*(bool (*)()) cn_nrnunit_use_legacy_sym)(); - if (cn_nrnunit_use_legacy != (_nrnunit_use_legacy_ == 1)) { - hoc_execerror("nrnunit_use_legacy() inconsistent with CORENRN_ENABLE_LEGACY_UNITS", NULL); - } } #endif //! HAVE_DLFCN_H diff --git a/src/nrniv/nrncore_write/utils/nrncore_utils.h b/src/nrniv/nrncore_write/utils/nrncore_utils.h index c51f98788a..e76b9a7ca7 100644 --- a/src/nrniv/nrncore_write/utils/nrncore_utils.h +++ b/src/nrniv/nrncore_write/utils/nrncore_utils.h @@ -1,4 +1,5 @@ #pragma once +#include "neuron/container/data_handle.hpp" #include @@ -7,7 +8,10 @@ struct NrnThread; void model_ready(); int count_distinct(double* data, int len); extern void nrnbbcore_register_mapping(); -extern "C" int nrn_dblpntr2nrncore(double* pd, NrnThread& nt, int& type, int& index); +int nrn_dblpntr2nrncore(neuron::container::data_handle pd, + NrnThread& nt, + int& type, + int& index); #include "nrnwrap_dlfcn.h" #if defined(HAVE_DLFCN_H) diff --git a/src/nrniv/nrndae.cpp b/src/nrniv/nrndae.cpp index 304b77c7a2..4bbe44d8f6 100644 --- a/src/nrniv/nrndae.cpp +++ b/src/nrniv/nrndae.cpp @@ -3,6 +3,8 @@ #include "nrndae.h" #include "nrndae_c.h" #include "nrnoc2iv.h" +#include "treeset.h" +#include "utils/enumerate.h" extern int secondorder; @@ -23,16 +25,18 @@ void nrndae_deregister(NrnDAE* n) { int nrndae_extra_eqn_count() { int neqn = 0; - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - neqn += (*m)->extra_eqn_count(); + for (NrnDAE* item: nrndae_list) { + neqn += item->extra_eqn_count(); } return neqn; } -void nrndae_update() { - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->update(); +void nrndae_update(NrnThread* _nt) { + update_sp13_rhs_based_on_actual_rhs(_nt); + for (NrnDAE* item: nrndae_list) { + item->update(); } + update_actual_rhs_based_on_sp13_rhs(_nt); } void nrndae_alloc() { @@ -42,46 +46,61 @@ void nrndae_alloc() { if (_nt->_ecell_memb_list) { neqn += _nt->_ecell_memb_list->nodecount * nlayer; } - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->alloc(neqn + 1); - neqn += (*m)->extra_eqn_count(); + for (NrnDAE* item: nrndae_list) { + item->alloc(neqn + 1); + neqn += item->extra_eqn_count(); } } void nrndae_init() { + for (int it = 0; it < nrn_nthread; ++it) { + auto* const nt = std::next(nrn_threads, it); + update_sp13_mat_based_on_actual_d(nt); + update_sp13_rhs_based_on_actual_rhs(nt); + } if ((!nrndae_list.empty()) && (secondorder > 0 || ((cvode_active_ > 0) && (nrn_use_daspk_ == 0)))) { hoc_execerror("NrnDAEs only work with secondorder==0 or daspk", 0); } - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->init(); + for (NrnDAE* item: nrndae_list) { + item->init(); + } + for (int it = 0; it < nrn_nthread; ++it) { + auto* const nt = std::next(nrn_threads, it); + update_actual_d_based_on_sp13_mat(nt); + update_actual_rhs_based_on_sp13_rhs(nt); } } -void nrndae_rhs() { - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->rhs(); +void nrndae_rhs(NrnThread* _nt) { + update_sp13_mat_based_on_actual_d(_nt); + update_sp13_rhs_based_on_actual_rhs(_nt); + for (NrnDAE* item: nrndae_list) { + item->rhs(); } + update_actual_d_based_on_sp13_mat(_nt); + update_actual_rhs_based_on_sp13_rhs(_nt); } void nrndae_lhs() { - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->lhs(); + for (NrnDAE* item: nrndae_list) { + item->lhs(); } } -void nrndae_dkmap(double** pv, double** pvdot) { - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->dkmap(pv, pvdot); +void nrndae_dkmap(std::vector>& pv, + std::vector>& pvdot) { + for (NrnDAE* item: nrndae_list) { + item->dkmap(pv, pvdot); } } void nrndae_dkres(double* y, double* yprime, double* delta) { // c*y' = f(y) so // delta = c*y' - f(y) - for (NrnDAEPtrListIterator m = nrndae_list.begin(); m != nrndae_list.end(); m++) { - (*m)->dkres(y, yprime, delta); + for (NrnDAE* item: nrndae_list) { + item->dkres(y, yprime, delta); } } @@ -192,13 +211,17 @@ int NrnDAE::extra_eqn_count() { return c_->nrow() - nnode_; } -void NrnDAE::dkmap(double** pv, double** pvdot) { +void NrnDAE::dkmap(std::vector>& pv, + std::vector>& pvdot) { // printf("NrnDAE::dkmap\n"); NrnThread* _nt = nrn_threads; for (int i = nnode_; i < size_; ++i) { // printf("bmap_[%d] = %d\n", i, bmap_[i]); - pv[bmap_[i] - 1] = y_.data() + i; - pvdot[bmap_[i] - 1] = _nt->_actual_rhs + bmap_[i]; + pv[bmap_[i] - 1] = neuron::container::data_handle{neuron::container::do_not_search, + y_.data() + i}; + pvdot[bmap_[i] - 1] = + neuron::container::data_handle{neuron::container::do_not_search, + _nt->_sp13_rhs + bmap_[i]}; } } @@ -208,7 +231,7 @@ void NrnDAE::update() { // note that the following is correct also for states that refer // to the internal potential of a segment. i.e rhs is v + vext[0] for (int i = 0; i < size_; ++i) { - y_[i] += _nt->_actual_rhs[bmap_[i]]; + y_[i] += _nt->_sp13_rhs[bmap_[i]]; } // for (int i=0; i < size_; ++i) printf(" i=%d bmap_[i]=%d y_[i]=%g\n", i, bmap_[i], // y_->elem(i)); @@ -288,7 +311,7 @@ void NrnDAE::rhs() { v2y(); f_(y_, yptmp_, size_); for (int i = 0; i < size_; ++i) { - _nt->_actual_rhs[bmap_[i]] += yptmp_[i]; + _nt->_sp13_rhs[bmap_[i]] += yptmp_[i]; } } diff --git a/src/nrniv/nrndae.h b/src/nrniv/nrndae.h index 5e7085c34a..bf0817e1f8 100644 --- a/src/nrniv/nrndae.h +++ b/src/nrniv/nrndae.h @@ -11,12 +11,11 @@ #ifndef nrndae_h #define nrndae_h -// this defines things needed by ocmatrix -#include - #include "ivocvect.h" #include "matrixmap.h" +#include "neuron/container/data_handle.hpp" + #include #include @@ -84,7 +83,8 @@ class NrnDAE { * @param pvdot pointers to voltage derivatives (set by this * function) */ - void dkmap(double** pv, double** pvdot); + void dkmap(std::vector>& pv, + std::vector>& pvdot); /** * Destructor. diff --git a/src/nrniv/nrnmenu.cpp b/src/nrniv/nrnmenu.cpp index 3127af986d..463ab45280 100644 --- a/src/nrniv/nrnmenu.cpp +++ b/src/nrniv/nrnmenu.cpp @@ -3,7 +3,6 @@ #include #include -#include #if HAVE_IV #include "secbrows.h" @@ -11,12 +10,10 @@ #endif #include "nrniv_mf.h" #include "nrnoc2iv.h" +#include "nrnpy.h" #include "nrnmenu.h" #include "classreg.h" #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - typedef void (*ReceiveFunc)(Point_process*, double*, double); extern int hoc_return_type_code; @@ -25,7 +22,6 @@ extern int hoc_return_type_code; #include "parse.hpp" extern Symlist* hoc_built_in_symlist; extern Symbol** pointsym; -extern double* point_process_pointer(Point_process*, Symbol*, int); extern ReceiveFunc* pnt_receive; extern int nrn_has_net_event_cnt_; extern int* nrn_has_net_event_; @@ -42,10 +38,8 @@ void nrnglobalmechmenu(); void nrnmechmenu(); void nrnpointmenu(); -Object* (*nrnpy_callable_with_args)(Object*, int narg); int (*nrnpy_ob_is_seg)(Object*); - #if HAVE_IV static void pnodemenu(Prop* p1, double, int type, const char* path, MechSelector* = NULL); static void mech_menu(Prop* p1, double, int type, const char* path, MechSelector* = NULL); @@ -148,7 +142,7 @@ void nrnglobalmechmenu() { break; Sprintf(buf, "%s[%d]", sp->name, i); Sprintf(n, "%s[%d]", sp->name, i); - hoc_ivpvalue(n, hoc_val_pointer(buf), false, sp->extra); + hoc_ivpvalue(n, hoc_val_handle(buf), false, sp->extra); } } else { hoc_ivvalue(sp->name, sp->name, 1); @@ -216,7 +210,9 @@ void section_menu(double x1, int type, MechSelector* ms) { } Sprintf(buf, "%s.Ra += 0", sname.string()); hoc_ivpvaluerun("Ra", - &(sec->prop->dparam[7].literal_value()), + neuron::container::data_handle{ + neuron::container::do_not_search, + &(sec->prop->dparam[7].literal_value())}, buf, 1, 0, @@ -224,7 +220,9 @@ void section_menu(double x1, int type, MechSelector* ms) { p = sec->prop; if (p->dparam[4].literal_value() != 1) { hoc_ivpvaluerun("Rall", - &(sec->prop->dparam[4].literal_value()), + neuron::container::data_handle{ + neuron::container::do_not_search, + &(sec->prop->dparam[4].literal_value())}, "diam_changed = 1", 1, 0, @@ -237,7 +235,7 @@ void section_menu(double x1, int type, MechSelector* ms) { hoc_ivvalue("v", buf); } else { Sprintf(buf, "v(%g)", x); - hoc_ivpvalue("v", hoc_val_pointer(buf), false, hoc_lookup("v")->extra); + hoc_ivpvalue("v", hoc_val_handle(buf), false, hoc_lookup("v")->extra); } } @@ -316,7 +314,7 @@ static void mech_menu(Prop* p1, double x, int type, const char* path, MechSelect } } else { Sprintf(buf, "%s[%d](%g)", vsym->name, i, x); - hoc_ivpvalue(n, hoc_val_pointer(buf), false, vsym->extra); + hoc_ivpvalue(n, hoc_val_handle(buf), false, vsym->extra); } } } else { @@ -335,9 +333,9 @@ static void mech_menu(Prop* p1, double x, int type, const char* path, MechSelect char buf2[200]; Sprintf(buf2, "%s.Ra += 0", secname(sec)); hoc_ivpvaluerun( - vsym->name, hoc_val_pointer(buf), buf2, 1, 0, vsym->extra); + vsym->name, hoc_val_handle(buf), buf2, 1, 0, vsym->extra); } else { - hoc_ivpvalue(vsym->name, hoc_val_pointer(buf), deflt, vsym->extra); + hoc_ivpvalue(vsym->name, hoc_val_handle(buf), deflt, vsym->extra); } } } @@ -471,17 +469,21 @@ static void point_menu(Object* ob, int make_label) { break; case STATE: Sprintf(buf,"%s[%d] (States)", psym->name, j); - break; - case 2: + break; + case 2: Sprintf(buf,"%s[%d] (Assigned)", psym->name, j); - break; + break; } #endif if (psym->s_varn) { for (k = 0; k < psym->s_varn; k++) { vsym = psym->u.ppsym[k]; - if (nrn_vartype(vsym) == nrnocCONST) { + int vartype = nrn_vartype(vsym); + if (vartype == NMODLRANDOM) { // skip + continue; + } + if (vartype == nrnocCONST) { deflt = true; #if defined(MikeNeubig) @@ -493,11 +495,10 @@ static void point_menu(Object* ob, int make_label) { if (ISARRAY(vsym)) { Arrayinfo* a = vsym->arayinfo; for (m = 0; m < vsym->arayinfo->sub[0]; m++) { - double* pd; if (m > 5) break; Sprintf(buf, "%s[%d]", vsym->name, m); - pd = point_process_pointer(pp, vsym, m); + auto pd = point_process_pointer(pp, vsym, m); if (pd) { hoc_ivpvalue(buf, pd, deflt, vsym->extra); } @@ -759,13 +760,13 @@ void MechanismStandard::panel(const char* label) { Object* pyactval = NULL; int size = hoc_total_array_data(sym, 0); if (pyact_) { - assert(nrnpy_callable_with_args); + assert(neuron::python::methods.callable_with_args); hoc_push_object(msobj_); hoc_pushx(double(i)); hoc_pushx(0.0); - pyactval = (*nrnpy_callable_with_args)(pyact_, 3); + pyactval = neuron::python::methods.callable_with_args(pyact_, 3); } else { - Sprintf(buf, "hoc_ac_ = %d %s", i, action_.string()); + Sprintf(buf, "hoc_ac_ = %d %s", i, action_.c_str()); } hoc_ivvaluerun_ex(sym->name, NULL, @@ -784,13 +785,13 @@ void MechanismStandard::panel(const char* label) { for (j = 1; j < size; ++j) { ++i; if (pyact_) { - assert(nrnpy_callable_with_args); + assert(neuron::python::methods.callable_with_args); hoc_push_object(msobj_); hoc_pushx(double(i)); hoc_pushx(double(j)); - pyactval = (*nrnpy_callable_with_args)(pyact_, 3); + pyactval = neuron::python::methods.callable_with_args(pyact_, 3); } else { - Sprintf(buf, "hoc_ac_ = %d %s", i, action_.string()); + Sprintf(buf, "hoc_ac_ = %d %s", i, action_.c_str()); } char buf2[200]; Sprintf(buf2, "%s[%d]", sym->name, j); @@ -836,7 +837,7 @@ double MechanismStandard::get(const char* name, int index) { if (!s) { hoc_execerror(name, "not in this property"); } - double* pval = np_->prop_pval(s, index); + auto const pval = np_->prop_pval(s, index); if (!pval) { return -1e300; } @@ -1051,6 +1052,12 @@ static double mt_is_artificial(void* v) { hoc_return_type_code = 2; return double(mt->is_artificial(int(chkarg(1, 0, mt->count())))); } +static double mt_is_ion(void* v) { + auto* mt = static_cast(v); + hoc_return_type_code = 2; + return double(mt->is_ion()); +} + static Object** mt_pp_begin(void* v) { MechanismType* mt = (MechanismType*) v; Point_process* pp = mt->pp_begin(); @@ -1115,6 +1122,7 @@ static Member_func mt_members[] = {{"select", mt_select}, {"is_netcon_target", mt_is_target}, {"has_net_event", mt_has_net_event}, {"is_artificial", mt_is_artificial}, + {"is_ion", mt_is_ion}, {"internal_type", mt_internal_type}, {0, 0}}; static Member_ret_obj_func mt_retobj_members[] = {{"pp_begin", mt_pp_begin}, @@ -1134,7 +1142,7 @@ void MechanismType_reg() { int* type_; int count_; int select_; - CopyString action_; + std::string action_; Object* pyact_; Section* sec_iter_; int inode_iter_; @@ -1243,6 +1251,10 @@ bool MechanismType::is_artificial(int i) { return (nrn_is_artificial_[j] ? true : false); } +bool MechanismType::is_ion() { + return nrn_is_ion(internal_type()); +} + void MechanismType::select(const char* name) { for (int i = 0; i < mti_->count_; ++i) { if (strcmp(name, memb_func[mti_->type_[i]].sym->name) == 0) { @@ -1300,18 +1312,15 @@ void MechanismType::menu() { Symbol* s = memb_func[mti_->type_[i]].sym; if (s->subtype != MORPHOLOGY) { if (mti_->pyact_) { - assert(nrnpy_callable_with_args); + assert(neuron::python::methods.callable_with_args); hoc_push_object(mtobj_); hoc_pushx(double(i)); - Object* pyactval = (*nrnpy_callable_with_args)(mti_->pyact_, 2); + Object* pyactval = neuron::python::methods.callable_with_args(mti_->pyact_, 2); hoc_ivbutton(s->name, NULL, pyactval); hoc_obj_unref(pyactval); } else { - Sprintf(buf, - "xbutton(\"%s\", \"hoc_ac_=%d %s\")\n", - s->name, - i, - mti_->action_.string()); + Sprintf( + buf, "xbutton(\"%s\", \"hoc_ac_=%d %s\")\n", s->name, i, mti_->action_.c_str()); oc.run(buf); } } diff --git a/src/nrniv/nrnmenu.h b/src/nrniv/nrnmenu.h index 4cc8345b8a..f310394990 100644 --- a/src/nrniv/nrnmenu.h +++ b/src/nrniv/nrnmenu.h @@ -1,7 +1,6 @@ #ifndef nrnmenu_h #define nrnmenu_h -#include #include "ndatclas.h" class MechTypeImpl; @@ -39,7 +38,7 @@ class MechanismStandard: public Resource { int name_cnt_; int offset_; int vartype_; - CopyString action_; + std::string action_; Object* pyact_; Symbol** glosym_; void mschk(const char*); @@ -53,6 +52,7 @@ class MechanismType: public Resource { bool is_netcon_target(int); bool has_net_event(int); bool is_artificial(int); + bool is_ion(); void select(const char*); const char* selected(); void insert(Section*); diff --git a/src/nrniv/nrnpy.cpp b/src/nrniv/nrnpy.cpp index b7dd38a0ff..9ce27f4dcd 100644 --- a/src/nrniv/nrnpy.cpp +++ b/src/nrniv/nrnpy.cpp @@ -1,10 +1,9 @@ +#include "nrnpy.h" #include <../../nrnconf.h> // For Linux and Max OS X, // Solve the problem of not knowing what version of Python the user has by // possibly deferring linking to libnrnpython.so to run time using the proper // Python interface - -#include <../nrnpython/nrnpython_config.h> #include #include #include @@ -15,396 +14,296 @@ #include #include +#include -// needed by nrnmusic.cpp but must exist if python is loaded. -#if USE_PYTHON -typedef struct _object PyObject; -PyObject* (*nrnpy_p_ho2po)(Object*); -Object* (*nrnpy_p_po2ho)(PyObject*); -#endif // USE_PYTHON +namespace neuron::python { +// Declared extern in nrnpy.h, defined here. +impl_ptrs methods; +} // namespace neuron::python +// Backwards-compatibility hack +int (*nrnpy_hoccommand_exec)(Object*); extern int nrn_nopython; -extern int nrnpy_nositeflag; -extern char* nrnpy_pyexe; +extern std::string nrnpy_pyexe; extern int nrn_is_python_extension; -int* nrnpy_site_problem_p; -extern int (*p_nrnpython_start)(int); -void nrnpython(); -static void (*p_nrnpython_real)(); -static void (*p_nrnpython_reg_real)(); -char* hoc_back2forward(char* s); -char* hoc_forward2back(char* s); +using nrnpython_reg_real_t = void (*)(neuron::python::impl_ptrs*); #if DARWIN extern void nrn_possible_mismatched_arch(const char*); #endif -// following is undefined or else has the value of sys.api_version -// at time of configure (using the python first in the PATH). -#if defined(NRNPYTHON_DYNAMICLOAD) - +#ifdef NRNPYTHON_DYNAMICLOAD #include "nrnwrap_dlfcn.h" -#if !defined(RTLD_NOLOAD) -#define RTLD_NOLOAD 0 -#endif // RTLD_NOLOAD - extern char* neuron_home; - -#if NRNPYTHON_DYNAMICLOAD >= 30 - -#ifdef MINGW -static const char* ver[] = {"3.8", 0}; -#else -static const char* ver[] = {"3.11", "3.10", "3.9", "3.8", 0}; -#endif // !MINGW - +static nrnpython_reg_real_t load_nrnpython(); #else - -static const char* ver[] = {0}; - +extern "C" void nrnpython_reg_real(neuron::python::impl_ptrs*); #endif -static int iver; // which python is loaded? -static void* python_already_loaded(); -static void* load_python(); -static void load_nrnpython(int, const char*); -#else //! defined(NRNPYTHON_DYNAMICLOAD) -extern "C" int nrnpython_start(int); -extern "C" void nrnpython_reg_real(); -extern "C" void nrnpython_real(); -#endif // defined(NRNPYTHON_DYNAMICLOAD) - -char* nrnpy_pyhome; - void nrnpython() { -#if USE_PYTHON - if (p_nrnpython_real) { - (*p_nrnpython_real)(); - return; + if (neuron::python::methods.hoc_nrnpython) { + neuron::python::methods.hoc_nrnpython(); + } else { + hoc_retpushx(0.); } -#endif - hoc_retpushx(0.); } // Stub class for when Python does not exist static void* p_cons(Object*) { - return 0; + return nullptr; } -static void p_destruct(void* v) {} -static Member_func p_members[] = {{0, 0}}; - -#if NRNPYTHON_DYNAMICLOAD -static char* nrnpy_pylib; - -static void siteprob(void) { - if (nrnpy_site_problem_p && (*nrnpy_site_problem_p)) { - printf("Py_Initialize exited. PYTHONHOME probably needs to be set correctly.\n"); - if (nrnpy_pyhome) { - printf( - "The value of PYTHONHOME or our automatic guess based on the output of " - "nrnpyenv.sh:\n export PYTHONHOME=%s\ndid not work.\n", - nrnpy_pyhome); - } - printf( - "It will help to examine the output of:\nnrnpyenv.sh\n\ -and set the indicated environment variables, or avoid python by adding\n\ -nopython: on\n\ -to %s/lib/nrn.defaults (or .nrn.defaults in your $HOME directory)\n", - neuron_home); +static void p_destruct(void*) {} +static Member_func p_members[] = {{nullptr, nullptr}}; + +#ifdef NRNPYTHON_DYNAMICLOAD +static std::string nrnpy_pylib{}, nrnpy_pyversion{}; + +/** + * @brief Wrapper that executes a command and captures stdout. + * + * Throws std::runtime_error if the command does not execute cleanly. + */ +static std::string check_output(std::string command) { + std::FILE* const p = popen(command.c_str(), "r"); + if (!p) { + throw std::runtime_error("popen(" + command + ", \"r\") failed"); + } + std::string output; + std::array buffer{}; + while (std::fgets(buffer.data(), buffer.size() - 1, p)) { + output += buffer.data(); + } + if (auto const code = pclose(p)) { + std::ostringstream err; + err << "'" << command << "' did not terminate cleanly, pclose returned non-zero (" << code + << ") after the following output had been read:\n" + << output; + throw std::runtime_error(err.str()); } + return output; } +// Included in C++20 +static bool starts_with(std::string_view str, std::string_view prefix) { + return str.substr(0, prefix.size()) == prefix; +} +static bool ends_with(std::string_view str, std::string_view suffix) { + return str.size() >= suffix.size() && + str.substr(str.size() - suffix.size(), std::string_view::npos) == suffix; +} + +/** + * @brief Figure out which Python to load. + * + * When dynamic Python support is enabled, NEURON needs to figure out which + * libpythonX.Y to load, and then load it followed by the corresponding + * libnrnpythonX.Y. This can be steered both using commandline options and by + * using environment variables. The logic is as follows: + * + * * the -pyexe argument to nrniv (special) takes precedence over NRN_PYTHONEXE + * and we have to assume that the other NRN_PY* environment variables are not + * compatible with it and use nrnpyenv.sh to find compatible values for them, + * i.e. -pyexe implies that NRN_PY* are ignored. + * * if -pyexe is *not* passed, then we examine the NRN_PY* environment + * variables: + * * if all of them are set, nrnpyenv.sh is not run and they are assumed to + * form a coherent set + * * if only some, or none, of them are set, nrnpyenv.sh is run to fill in + * the missing values. NRN_PYTHONEXE is an input to nrnpyenv.sh, so if this + * is set then we will not search $PATH + */ static void set_nrnpylib() { - nrnpy_pylib = getenv("NRN_PYLIB"); - nrnpy_pyhome = getenv("NRN_PYTHONHOME"); - if (nrnpy_pylib && nrnpy_pyhome) { - return; - } - // copy allows free of the copy if needed - if (nrnpy_pylib) { - nrnpy_pylib = strdup(nrnpy_pylib); - } - if (nrnpy_pyhome) { - nrnpy_pyhome = strdup(nrnpy_pyhome); + std::array, 3> params{ + {{nrnpy_pylib, "NRN_PYLIB"}, + {nrnpy_pyexe, "NRN_PYTHONEXE"}, + {nrnpy_pyversion, "NRN_PYTHONVERSION"}}}; + auto const all_set = [¶ms]() { + return std::all_of(params.begin(), params.end(), [](auto const& p) { + return !p.first.empty(); + }); + }; + if (nrnpy_pyexe.empty()) { + // -pyexe was not passed, read from the environment + for (auto& [glob_var, env_var]: params) { + if (const char* v = std::getenv(env_var)) { + glob_var = v; + } + } + if (all_set()) { + // the environment specified everything, nothing more to do + return; + } } - + // Populate missing values using nrnpyenv.sh. Pass the possibly-null value of nrnpy_pyexe, which + // may have come from -pyexe or NRN_PYTHONEXE, to nrnpyenv.sh. Do all of this on rank 0, and + // broadcast the results to other ranks afterwards. if (nrnmpi_myid_world == 0) { - int linesz = 1024 + (nrnpy_pyexe ? strlen(nrnpy_pyexe) : 0); + // Construct a command to execute + auto const command = []() -> std::string { #ifdef MINGW - linesz += 3 * strlen(neuron_home); - char* line = new char[linesz + 1]; - char* bnrnhome = strdup(neuron_home); - char* fnrnhome = strdup(neuron_home); - hoc_forward2back(bnrnhome); - hoc_back2forward(fnrnhome); - std::snprintf(line, - linesz + 1, - "%s\\mingw\\usr\\bin\\bash %s/bin/nrnpyenv.sh %s --NEURON_HOME=%s", - bnrnhome, - fnrnhome, - (nrnpy_pyexe && strlen(nrnpy_pyexe) > 0) ? nrnpy_pyexe : "", - fnrnhome); - free(fnrnhome); - free(bnrnhome); + std::string bnrnhome{neuron_home}, fnrnhome{neuron_home}; + std::replace(bnrnhome.begin(), bnrnhome.end(), '/', '\\'); + std::replace(fnrnhome.begin(), fnrnhome.end(), '\\', '/'); + return bnrnhome + R"(\mingw\usr\bin\bash )" + fnrnhome + "/bin/nrnpyenv.sh " + + nrnpy_pyexe + " --NEURON_HOME=" + fnrnhome; #else - char* line = new char[linesz + 1]; - std::snprintf(line, - linesz + 1, - "bash %s/../../bin/nrnpyenv.sh %s", - neuron_home, - (nrnpy_pyexe && strlen(nrnpy_pyexe) > 0) ? nrnpy_pyexe : ""); + return "bash " + std::string{neuron_home} + "/../../bin/nrnpyenv.sh " + nrnpy_pyexe; #endif - FILE* p = popen(line, "r"); - if (!p) { - printf("could not popen '%s'\n", line); - } else { - if (!fgets(line, linesz, p)) { - printf("failed: %s\n", line); - } - while (fgets(line, linesz, p)) { - char* cp; - // must get rid of beginning '"' and trailing '"\n' - if (!nrnpy_pyhome && (cp = strstr(line, "export NRN_PYTHONHOME="))) { - cp += strlen("export NRN_PYTHONHOME=") + 1; - cp[strlen(cp) - 2] = '\0'; - if (nrnpy_pyhome) { - free(nrnpy_pyhome); - } - nrnpy_pyhome = strdup(cp); - } else if (!nrnpy_pylib && (cp = strstr(line, "export NRN_PYLIB="))) { - cp += strlen("export NRN_PYLIB=") + 1; - cp[strlen(cp) - 2] = '\0'; - if (nrnpy_pylib) { - free(nrnpy_pylib); - } - nrnpy_pylib = strdup(cp); + }(); + // Execute the command, capture its stdout and wrap that in a C++ stream. This will throw if + // the commnand fails. + std::istringstream cmd_stdout{check_output(command)}; + std::string line; + // if line is of the form: + // export FOO="bar" + // then proc_line(x, "FOO") sets x to bar + auto const proc_line = [](std::string_view line, auto& glob_var, std::string_view env_var) { + std::string_view const suffix{"\""}; + auto const prefix = "export " + std::string{env_var} + "=\""; + if (starts_with(line, prefix) && ends_with(line, suffix)) { + line.remove_prefix(prefix.size()); + line.remove_suffix(suffix.size()); + if (!glob_var.empty() && glob_var != line) { + std::cout << "WARNING: overriding " << env_var << '=' << glob_var << " with " + << line << std::endl; } + glob_var = line; } - pclose(p); + }; + // Process the output of nrnpyenv.sh line by line + while (std::getline(cmd_stdout, line)) { + for (auto& [glob_var, env_var]: params) { + proc_line(line, glob_var, env_var); + } + } + // After having run nrnpyenv.sh, we should know everything about the Python library that is + // to be loaded. + if (!all_set()) { + std::ostringstream err; + err << "After running nrnpyenv.sh (" << command << ") with output:\n" + << cmd_stdout.str() + << "\nwe are still missing information about the Python to be loaded:\n" + << " nrnpy_pyexe=" << nrnpy_pyexe << '\n' + << " nrnpy_pylib=" << nrnpy_pylib << '\n' + << " nrnpy_pyversion=" << nrnpy_pyversion << '\n'; + throw std::runtime_error(err.str()); } - delete[] line; } #if NRNMPI if (nrnmpi_numprocs_world > 1) { // 0 broadcasts to everyone else. - nrnmpi_char_broadcast_world(&nrnpy_pylib, 0); - nrnmpi_char_broadcast_world(&nrnpy_pyhome, 0); + nrnmpi_str_broadcast_world(nrnpy_pyexe, 0); + nrnmpi_str_broadcast_world(nrnpy_pylib, 0); + nrnmpi_str_broadcast_world(nrnpy_pyversion, 0); } #endif } - -#if 0 -static void set_pythonhome(void* handle){ - if (nrnmpi_myid == 0) {atexit(siteprob);} -#ifdef MINGW -#else - if (getenv("PYTHONHOME") || nrnpy_nositeflag) { return; } - if (nrnpy_pyhome) { - int res = setenv("PYTHONHOME", nrnpy_pyhome, 1); - assert(res == 0); - return; - } - - Dl_info dl_info; - void* s = dlsym(handle, "Py_Initialize"); - assert(s != NULL); - int success = dladdr(s, &dl_info); - if (success) { - //printf("%s\n", dl_info.dli_fname); - nrnpy_pyhome = strdup(dl_info.dli_fname); - char* p = nrnpy_pyhome; - int n = strlen(p); - int seen = 0; - for (int i = n-1; i > 0; --i) { - if (p[i] == '/') { - if (++seen >= 2) { - p[i] = '\0' ; - break; - } - } - } - int res = setenv("PYTHONHOME", p, 1); - assert(res == 0); - } -#endif -} -#endif // if 0 #endif +/** + * @brief Load + register an nrnpython library for a specific Python version. + * + * This finds the library (if needed because dynamic Python is enabled), opens it and gets + calls + * its nrnpython_reg_real method. This ensures that NEURON's global state knows about a Python + * implementation. + */ void nrnpython_reg() { - // printf("nrnpython_reg in nrnpy.cpp\n"); + nrnpython_reg_real_t reg_fn{}; #if USE_PYTHON - if (nrn_nopython) { - p_nrnpython_start = 0; - p_nrnpython_real = 0; - p_nrnpython_reg_real = 0; - } else { -#if NRNPYTHON_DYNAMICLOAD - void* handle = NULL; - + if (!nrn_nopython) { +#ifdef NRNPYTHON_DYNAMICLOAD + void* handle{}; if (!nrn_is_python_extension) { - // As last resort (or for python3) load $NRN_PYLIB - set_nrnpylib(); - // printf("nrnpy_pylib %s\n", nrnpy_pylib); - // printf("nrnpy_pyhome %s\n", nrnpy_pyhome); - if (nrnpy_pylib) { - handle = dlopen(nrnpy_pylib, RTLD_NOW | RTLD_GLOBAL); - if (!handle) { - fprintf(stderr, "Could not dlopen NRN_PYLIB: %s\n", nrnpy_pylib); -#if DARWIN - nrn_possible_mismatched_arch(nrnpy_pylib); -#endif - exit(1); - } + // find the details of the libpythonX.Y.so we are going to load. + try { + set_nrnpylib(); + } catch (std::exception const& e) { + std::cerr << "Could not determine Python library details: " << e.what() + << std::endl; + exit(1); } + handle = dlopen(nrnpy_pylib.c_str(), RTLD_NOW | RTLD_GLOBAL); if (!handle) { - python_already_loaded(); - } - if (!handle) { // embed python - handle = load_python(); - } -#if 0 - // No longer do this as Py_SetPythonHome is used - if (handle) { - // need to worry about the site.py problem - // can fix with a proper PYTHONHOME but need to know - // what path was used to load the python library. - set_pythonhome(handle); - } + std::cerr << "Could not dlopen NRN_PYLIB: " << nrnpy_pylib << std::endl; +#if DARWIN + nrn_possible_mismatched_arch(nrnpy_pylib.c_str()); #endif - } else { - // printf("nrn_is_python_extension = %d\n", nrn_is_python_extension); + exit(1); + } } - // for some mysterious reason on max osx 10.12 - // (perhaps due to System Integrity Protection?) when python is - // launched, python_already_loaded() returns a NULL handle unless - // the full path to the dylib is used. Since we know it is loaded - // in these circumstances, it is sufficient to go ahead and dlopen - // the nrnpython interface library if (handle || nrn_is_python_extension) { - load_nrnpython(nrn_is_python_extension, nrnpy_pylib); + // Load libnrnpython.X.Y.so + reg_fn = load_nrnpython(); } #else - p_nrnpython_start = nrnpython_start; - p_nrnpython_real = nrnpython_real; - p_nrnpython_reg_real = nrnpython_reg_real; + // Python enabled, but not dynamic + reg_fn = nrnpython_reg_real; #endif } - if (p_nrnpython_reg_real) { - (*p_nrnpython_reg_real)(); - if (nrnpy_site_problem_p) { - *nrnpy_site_problem_p = 1; - } + if (reg_fn) { + // Register Python-specific things in the NEURON global state + reg_fn(&neuron::python::methods); + // Compatibility hack for legacy MOD file in nrntest + nrnpy_hoccommand_exec = neuron::python::methods.hoccommand_exec; return; } #endif - class2oc("PythonObject", p_cons, p_destruct, p_members, NULL, NULL, NULL); + // Stub implementation of PythonObject if Python support was not enabled, or a nrnpython library + // could not be loaded. + class2oc("PythonObject", p_cons, p_destruct, p_members, nullptr, nullptr, nullptr); } -#if NRNPYTHON_DYNAMICLOAD // to end of file - -// important dlopen flags : -// RTLD_NOLOAD returns NULL if not open, or handle if it is resident. - -static void* ver_dlo(int flag) { - for (int i = 0; ver[i]; ++i) { - char name[100]; -#ifdef MINGW - Sprintf(name, "python%c%c.dll", ver[i][0], ver[i][2]); -#else -#if DARWIN - Sprintf(name, "libpython%s.dylib", ver[i]); -#else - Sprintf(name, "libpython%s.so", ver[i]); -#endif -#endif - void* handle = dlopen(name, flag); - iver = i; - if (handle) { - return handle; +#ifdef NRNPYTHON_DYNAMICLOAD // to end of file +static nrnpython_reg_real_t load_nrnpython() { + std::string pyversion{}; + if (auto const pv10 = nrn_is_python_extension; pv10 > 0) { + // pv10 is one of the packed integers like 310 (3.10) or 38 (3.8) + auto const factor = (pv10 >= 100) ? 100 : 10; + pyversion = std::to_string(pv10 / factor) + "." + std::to_string(pv10 % factor); + } else { + if (nrnpy_pylib.empty() || nrnpy_pyversion.empty()) { + std::cerr << "Do not know what Python to load [nrnpy_pylib=" << nrnpy_pylib + << " nrnpy_pyversion=" << nrnpy_pyversion << ']' << std::endl; + return nullptr; + } + pyversion = nrnpy_pyversion; + // It's possible to get this far with an incompatible version, if nrnpy_pyversion and + // friends were set from the environment to bypass nrnpyenv.sh, and nrniv -python was + // launched. + auto const& supported_versions = neuron::config::supported_python_versions; + auto const iter = + std::find(supported_versions.begin(), supported_versions.end(), pyversion); + if (iter == supported_versions.end()) { + std::cerr << "Python " << pyversion + << " is not supported by this NEURON installation (supported:"; + for (auto const& good_ver: supported_versions) { + std::cerr << ' ' << good_ver; + } + std::cerr << "). If you are seeing this message, your environment probably contains " + "NRN_PYLIB, NRN_PYTHONEXE and NRN_PYTHONVERSION settings that are " + "incompatible with this NEURON. Try unsetting them." + << std::endl; + return nullptr; } } - iver = -1; - return NULL; -} - -static void* python_already_loaded() { - void* handle = ver_dlo(RTLD_NOW | RTLD_GLOBAL | RTLD_NOLOAD); - // printf("python_already_loaded %d\n", iver); - return handle; -} - -static void* load_python() { - void* handle = ver_dlo(RTLD_NOW | RTLD_GLOBAL); - // printf("load_python %d\n", iver); - return handle; -} - -static void* load_sym(void* handle, const char* name) { - void* p = dlsym(handle, name); - if (!p) { - printf("Could not load %s\n", name); - exit(1); - } - return p; -} - -static void* load_nrnpython_helper(const char* npylib) { - char name[2048]; -#ifdef MINGW - Sprintf(name, "%s.dll", npylib); -#else // !MINGW -#if DARWIN - Sprintf(name, "%s/../../lib/%s.dylib", neuron_home, npylib); -#else // !DARWIN - Sprintf(name, "%s/../../lib/%s.so", neuron_home, npylib); -#endif // DARWIN -#endif // MINGW - void* handle = dlopen(name, RTLD_NOW); - return handle; -} - -// Get python version as integer from pythonlib path -static int pylib2pyver10(std::string pylib) { - // skip past last \ or / - const auto pos = pylib.find_last_of("/\\"); - if (pos != std::string::npos) { - pylib = pylib.substr(pos + 1); - } - - // erase nondigits - pylib.erase(std::remove_if(pylib.begin(), pylib.end(), [](char c) { return !std::isdigit(c); }), - pylib.end()); - - // parse number. 0 is fine to return as error (no need for stoi) - return std::atoi(pylib.c_str()); -} - -static void load_nrnpython(int pyver10, const char* pylib) { - void* handle = NULL; -#if (defined(__MINGW32__) || \ - (defined(USE_LIBNRNPYTHON_MAJORMINOR) && USE_LIBNRNPYTHON_MAJORMINOR == 1)) - char name[256]; - int pv10 = pyver10; - if (pyver10 < 1 && pylib) { - pv10 = pylib2pyver10(pylib); - } - Sprintf(name, "libnrnpython%d", pv10); - handle = load_nrnpython_helper(name); + // Construct libnrnpythonX.Y.so (or other platforms' equivalent) + std::string name; + name.append(neuron::config::shared_library_prefix); + name.append("nrnpython"); + name.append(pyversion); + name.append(neuron::config::shared_library_suffix); +#ifndef MINGW + // Build a path from neuron_home on macOS and Linux + name = neuron_home + ("/../../lib/" + name); +#endif + auto* const handle = dlopen(name.c_str(), RTLD_NOW); if (!handle) { - printf("Could not load %s\n", name); - printf("pyver10=%d pylib=%s\n", pyver10, pylib ? pylib : "NULL"); - return; + std::cerr << "Could not load " << name << std::endl; + std::cerr << "nrn_is_python_extension=" << nrn_is_python_extension << std::endl; + return nullptr; } -#else - handle = load_nrnpython_helper("libnrnpython3"); - if (!handle) { - printf("Could not load libnrnpython3\n"); - printf("pyver10=%d pylib=%s\n", pyver10, pylib ? pylib : "NULL"); - return; + auto* const reg = reinterpret_cast(dlsym(handle, "nrnpython_reg_real")); + if (!reg) { + std::cerr << "Could not load registration function from " << name << std::endl; } -#endif - p_nrnpython_start = (int (*)(int)) load_sym(handle, "nrnpython_start"); - p_nrnpython_real = (void (*)()) load_sym(handle, "nrnpython_real"); - p_nrnpython_reg_real = (void (*)()) load_sym(handle, "nrnpython_reg_real"); + return reg; } - #endif diff --git a/src/nrniv/nrnpy.h b/src/nrniv/nrnpy.h new file mode 100644 index 0000000000..ce3cb89b83 --- /dev/null +++ b/src/nrniv/nrnpy.h @@ -0,0 +1,70 @@ +#pragma once +#include +/** + * Declarations of global symbols in NEURON that have to be populated with python-version-specific + * values when dynamic Python is enabled. These are set by the nrnpython_reg_real function, and + * defined in nrnpy.cpp. + */ +struct Object; +// PyObject is a typedef, so we can't forward-declare it as a type. This pattern is common enough in +// the wild that we hope Python won't dare change it. +struct _object; +typedef _object PyObject; +struct _ts; +typedef _ts PyThreadState; +struct Section; +struct Symbol; +namespace neuron::python { +/** + * @brief Collection of pointers to functions with python-version-specific implementations. + * + * When dynamic Python is enabled, these are filled in from a python-version-specific + * libnrnpythonX.Y library and then called from python-version-agnostic code inside NEURON. + */ +struct impl_ptrs { + Object* (*callable_with_args)(Object*, int narg){}; + double (*call_func)(Object*, int, int*){}; + char* (*call_picklef)(char*, std::size_t size, int narg, std::size_t* retsize){}; + void (*call_python_with_section)(Object*, Section*){}; + void (*cmdtool)(Object*, int type, double x, double y, int kd){}; + int (*guigetstr)(Object*, char**){}; + double (*guigetval)(Object*){}; + Object** (*gui_helper)(const char* name, Object* obj){}; + Object** (*gui_helper3)(const char* name, Object* obj, int handle_strptr){}; + char** (*gui_helper3_str)(const char*, Object*, int){}; + void (*guisetval)(Object*, double){}; + int (*hoccommand_exec)(Object*){}; + int (*hoccommand_exec_strret)(Object*, char*, int){}; + void (*hoc_nrnpython)(){}; + PyObject* (*ho2po)(Object*){}; + void (*hpoasgn)(Object*, int){}; + void (*interpreter_set_path)(std::string_view){}; + int (*interpreter_start)(int){}; + Object* (*mpi_alltoall_type)(int, int){}; + double (*object_to_double)(Object*){}; + void* (*opaque_obj2pyobj)(Object*){}; + Object* (*pickle2po)(char*, std::size_t size){}; + Object* (*po2ho)(PyObject*){}; + char* (*po2pickle)(Object*, std::size_t* size){}; + double (*praxis_efun)(Object* pycallable, Object* hvec){}; + int (*pysame)(Object* o1, Object* o2){}; + void (*py2n_component)(Object*, Symbol*, int, int){}; + void (*restore_thread)(PyThreadState*){}; + PyThreadState* (*save_thread)(){}; + // Such a common pattern it gets a wrapper + Object** try_gui_helper(const char* name, Object* obj) const { + if (gui_helper) { + return gui_helper(name, obj); + } else { + return nullptr; + } + } +}; +/** + * @brief Collection of pointers to functions with python-version-specific implementations. + * + * This is defined in nrnpy.cpp. + */ +extern impl_ptrs methods; +} // namespace neuron::python +extern Symbol* nrnpy_pyobj_sym_; diff --git a/src/nrniv/nrnste.cpp b/src/nrniv/nrnste.cpp index 76a2b4f121..639137a90e 100644 --- a/src/nrniv/nrnste.cpp +++ b/src/nrniv/nrnste.cpp @@ -15,8 +15,8 @@ static double ste_transition(void* v) { auto* const ste = static_cast(v); int src = (int) chkarg(1, 0, ste->nstate() - 1); int dest = (int) chkarg(2, 0, ste->nstate() - 1); - double* var1 = hoc_pgetarg(3); - double* var2 = hoc_pgetarg(4); + auto var1 = hoc_hgetarg(3); + auto var2 = hoc_hgetarg(4); std::unique_ptr hc{}; if (ifarg(5)) { Object* obj = NULL; @@ -32,7 +32,7 @@ static double ste_transition(void* v) { hc = std::make_unique(obj); } } - ste->transition(src, dest, var1, var2, std::move(hc)); + ste->transition(src, dest, std::move(var1), std::move(var2), std::move(hc)); return 1.; } diff --git a/src/nrniv/nrnste.h b/src/nrniv/nrnste.h index f1e117d6d4..ff952f6bf7 100644 --- a/src/nrniv/nrnste.h +++ b/src/nrniv/nrnste.h @@ -1,4 +1,6 @@ #pragma once +#include "neuron/container/data_handle.hpp" + #include // StateTransitionEvent is a finite state machine in which a transtion occurs // when the transition condition is true. For speed the transition condition @@ -20,8 +22,7 @@ struct STETransition { void activate(); // add ste_ to watch list void deactivate(); // remove ste_ from watch list - double* var1_; - double* var2_; + neuron::container::data_handle var1_{}, var2_{}; std::unique_ptr hc_{}; StateTransitionEvent* ste_{}; std::unique_ptr stec_; @@ -37,7 +38,11 @@ struct STEState { struct StateTransitionEvent { StateTransitionEvent(int nstate, Point_process*); ~StateTransitionEvent(); - void transition(int src, int dest, double* var1, double* var, std::unique_ptr); + void transition(int src, + int dest, + neuron::container::data_handle var1, + neuron::container::data_handle var2, + std::unique_ptr); void state(int i); // set current state -- update watch list. int state() { return istate_; diff --git a/src/nrniv/nvector_nrnparallel_ld.cpp b/src/nrniv/nvector_nrnparallel_ld.cpp index 7c2448f04b..ffa410c0fe 100644 --- a/src/nrniv/nvector_nrnparallel_ld.cpp +++ b/src/nrniv/nvector_nrnparallel_ld.cpp @@ -21,23 +21,11 @@ #include #include -/* for NRNMPI_DYNAMICLOAD */ #include -#if NRNMPI_DYNAMICLOAD -extern "C" void nrnmpi_dbl_allreduce_vec(double* src, double* dest, int cnt, int type); -extern "C" void nrnmpi_longdbl_allreduce_vec(long double* src, - long double* dest, - int cnt, - int type); -extern "C" void nrnmpi_long_allreduce_vec(long* src, long* dest, int cnt, int type); +#include extern int nrnmpi_numprocs; -#endif #include "nvector_nrnparallel_ld.h" -#if NRNMPI_DYNAMICLOAD -#else -extern MPI_Comm nrnmpi_comm; -#endif #include "sundialsmath.h" #include "sundialstypes.h" @@ -97,12 +85,7 @@ N_Vector N_VNewEmpty_NrnParallelLD(MPI_Comm comm, long int local_length, long in /* Compute global length as sum of local lengths */ n = local_length; -#if NRNMPI_DYNAMICLOAD nrnmpi_long_allreduce_vec(&n, &Nsum, 1, 1); -#else - comm = nrnmpi_comm; - MPI_Allreduce(&n, &Nsum, 1, MPI_LONG, MPI_SUM, comm); -#endif if (Nsum != global_length) { printf(BAD_N); return (NULL); @@ -428,11 +411,7 @@ void N_VSpace_NrnParallelLD(N_Vector v, long int* lrw, long int* liw) { int npes; comm = NV_COMM_P_LD(v); -#if NRNMPI_DYNAMICLOAD npes = nrnmpi_numprocs; -#else - MPI_Comm_size(comm, &npes); -#endif *lrw = NV_GLOBLENGTH_P_LD(v); *liw = 2 * npes; @@ -905,31 +884,9 @@ static realtype VAllReduce_NrnParallelLD(realtype d, int op, MPI_Comm comm) { * min if op = 3. * The operation is over all processors in the communicator */ - realtype out = 0.0; - -#if NRNMPI_DYNAMICLOAD nrnmpi_dbl_allreduce_vec(&d, &out, 1, op); -#else - switch (op) { - case 1: - MPI_Allreduce(&d, &out, 1, MPI_DOUBLE, MPI_SUM, comm); - break; - - case 2: - MPI_Allreduce(&d, &out, 1, MPI_DOUBLE, MPI_MAX, comm); - break; - - case 3: - MPI_Allreduce(&d, &out, 1, MPI_DOUBLE, MPI_MIN, comm); - break; - - default: - break; - } -#endif - - return (out); + return out; } static realtype VAllReduce_long_NrnParallelLD(realtype d, int op, MPI_Comm comm) { @@ -942,11 +899,7 @@ static realtype VAllReduce_long_NrnParallelLD(realtype d, int op, MPI_Comm comm) */ assert(op == 1); long double ld_in{d}, ld_out{}; -#if NRNMPI_DYNAMICLOAD nrnmpi_longdbl_allreduce_vec(&ld_in, &ld_out, 1, op); -#else - MPI_Allreduce(&ld_in, &ld_out, 1, MPI_LONG_DOUBLE, MPI_SUM, comm); -#endif return ld_out; } diff --git a/src/nrniv/nvector_nrnparallel_ld.h b/src/nrniv/nvector_nrnparallel_ld.h index 89508a7c68..4684857d45 100644 --- a/src/nrniv/nvector_nrnparallel_ld.h +++ b/src/nrniv/nvector_nrnparallel_ld.h @@ -65,13 +65,7 @@ mv temp nvector_nrnparallel_ld.cpp #include -#if NRNMPI_DYNAMICLOAD -#define MPI_DOUBLE double -#define MPI_LONG long -#define MPI_Comm int -#else -#include -#endif +#define MPI_Comm int #include "nvector.h" #include "sundialstypes.h" diff --git a/src/nrniv/nvector_nrnthread.cpp b/src/nrniv/nvector_nrnthread.cpp index 311f74603b..7084489011 100644 --- a/src/nrniv/nvector_nrnthread.cpp +++ b/src/nrniv/nvector_nrnthread.cpp @@ -42,8 +42,8 @@ #if NRN_ENABLE_THREADS static MUTDEC #endif - /* argument passing between NrnThread and Serial */ - static N_Vector x_; +/* argument passing between NrnThread and Serial */ +static N_Vector x_; static N_Vector y_; static N_Vector z_; static N_Vector w_; diff --git a/src/nrniv/nvector_nrnthread_ld.cpp b/src/nrniv/nvector_nrnthread_ld.cpp index 01c1f98838..24e1b6e5b3 100644 --- a/src/nrniv/nvector_nrnthread_ld.cpp +++ b/src/nrniv/nvector_nrnthread_ld.cpp @@ -42,8 +42,8 @@ #if NRN_ENABLE_THREADS static MUTDEC #endif - /* argument passing between NrnThreadLD and Serial */ - static N_Vector x_; +/* argument passing between NrnThreadLD and Serial */ +static N_Vector x_; static N_Vector y_; static N_Vector z_; static N_Vector w_; diff --git a/src/nrniv/partrans.cpp b/src/nrniv/partrans.cpp index febdf073b7..a6779bad9e 100644 --- a/src/nrniv/partrans.cpp +++ b/src/nrniv/partrans.cpp @@ -1,7 +1,7 @@ #include <../../nrnconf.h> - #include "partrans.h" // sgid_t and SetupTransferInfo for CoreNEURON -#include "treeset.h" + +#include "neuron/container/data_handle.hpp" #include #include @@ -11,48 +11,60 @@ #include #include #include -#if defined(HAVE_STDINT_H) #include -#endif -#include // Introduced for NonVSrcUpdateInfo -#include // Replaces NrnHash for MapSgid2Int and MapNode2PDbl +#include +#include // Replaces NrnHash for MapSgid2Int #include #include +#if NRNMPI +#include "have2want.hpp" +#endif + #if NRNLONGSGID -#if PARANEURON -extern void sgid_alltoallv(sgid_t* s, int* scnt, int* sdispl, sgid_t* r, int* rcnt, int* rdispl) { +#if NRNMPI +static void sgid_alltoallv(Data& s, Data& r) { if (nrn_sparse_partrans > 0) { - nrnmpi_long_alltoallv_sparse(s, scnt, sdispl, r, rcnt, rdispl); + nrnmpi_long_alltoallv_sparse(s.data.data(), + s.cnt.data(), + s.displ.data(), + r.data.data(), + r.cnt.data(), + r.displ.data()); } else { - nrnmpi_long_alltoallv(s, scnt, sdispl, r, rcnt, rdispl); + nrnmpi_long_alltoallv(s.data.data(), + s.cnt.data(), + s.displ.data(), + r.data.data(), + r.cnt.data(), + r.displ.data()); } } -#endif // PARANEURON +#endif // NRNMPI #else // not NRNLONGSGID -#if PARANEURON -extern void sgid_alltoallv(sgid_t* s, int* scnt, int* sdispl, sgid_t* r, int* rcnt, int* rdispl) { +#if NRNMPI +static void sgid_alltoallv(Data& s, Data& r) { if (nrn_sparse_partrans > 0) { - nrnmpi_int_alltoallv_sparse(s, scnt, sdispl, r, rcnt, rdispl); + nrnmpi_int_alltoallv_sparse(s.data.data(), + s.cnt.data(), + s.displ.data(), + r.data.data(), + r.cnt.data(), + r.displ.data()); } else { - nrnmpi_int_alltoallv(s, scnt, sdispl, r, rcnt, rdispl); + nrnmpi_int_alltoallv(s.data.data(), + s.cnt.data(), + s.displ.data(), + r.data.data(), + r.cnt.data(), + r.displ.data()); } } -#endif // PARANEURON +#endif // NRNMPI #endif // not NRNLONGSGID -void nrnmpi_source_var(); -void nrnmpi_target_var(); -void nrnmpi_setup_transfer(); -void nrn_partrans_clear(); -static void mpi_transfer(); -static void thread_transfer(NrnThread*); -static void thread_vi_compute(NrnThread*); -static void mk_ttd(); -extern double t; -extern int nrn_node_ptr_change_cnt_; extern const char* bbcore_write_version; // see lengthy comment in ../nrnoc/fadvance.cpp // nrnmpi_v_transfer requires existence of nrnthread_v_transfer even if there @@ -126,30 +138,22 @@ file. */ -#if 1 || PARANEURON extern void (*nrnthread_v_transfer_)(NrnThread*); // before nonvint and BEFORE INITIAL extern void (*nrnthread_vi_compute_)(NrnThread*); extern void (*nrnmpi_v_transfer_)(); // before nrnthread_v_transfer and after update. Called by // thread 0. extern void (*nrn_mk_transfer_thread_data_)(); -#endif -#if PARANEURON +#if NRNMPI extern double nrnmpi_transfer_wait_; -extern void nrnmpi_barrier(); -extern void nrnmpi_int_allgather(int*, int*, int); -extern int nrnmpi_int_allmax(int); -extern void sgid_alltoallv(sgid_t*, int*, int*, sgid_t*, int*, int*); -extern void nrnmpi_int_alltoallv(int*, int*, int*, int*, int*, int*); -extern void nrnmpi_dbl_alltoallv(double*, int*, int*, double*, int*, int*); -extern void nrnmpi_dbl_alltoallv_sparse(double*, int*, int*, double*, int*, int*); #endif -void nrn_partrans_update_ptrs(); - struct TransferThreadData { int cnt; - double** tv; // pointers to the ParallelContext.target_var - double** sv; // pointers to the ParallelContext.source_var (or into MPI target buffer) + std::vector> tv; // pointers to the + // ParallelContext.target_var + std::vector> sv; // pointers to the + // ParallelContext.source_var (or into + // MPI target buffer) }; static TransferThreadData* transfer_thread_data_; static int n_transfer_thread_data_; @@ -157,15 +161,11 @@ static int n_transfer_thread_data_; // for the case where we need vi = v + vext as the source voltage struct SourceViBuf { int cnt; - Node** nd; - double* val; + std::vector nd; + std::vector val; }; -static SourceViBuf* source_vi_buf_; -static int n_source_vi_buf_; - +static std::vector source_vi_buf_; typedef std::unordered_map MapSgid2Int; -typedef std::unordered_map MapNode2PDbl; -typedef std::vector DblPList; typedef std::vector NodePList; #define PPList partrans_PPList typedef std::vector PPList; @@ -174,36 +174,35 @@ typedef std::vector SgidList; static double* insrc_buf_; // Receives the interprocessor data destined for other threads. static double* outsrc_buf_; -static double** poutsrc_; // prior to mpi copy src value to proper place in outsrc_buf_ -static int* poutsrc_indices_; // for recalc pointers -static int insrc_buf_size_, *insrccnt_, *insrcdspl_; -static int outsrc_buf_size_, *outsrccnt_, *outsrcdspl_; +static std::vector> poutsrc_; // prior to mpi copy src value + // to proper place in + // outsrc_buf_ +static int* poutsrc_indices_; // for recalc pointers +static int insrc_buf_size_; +static std::vector insrccnt_; +static std::vector insrcdspl_; +static int outsrc_buf_size_; +static std::vector outsrccnt_; +static std::vector outsrcdspl_; static MapSgid2Int sid2insrc_; // received interprocessor sid data is // associated with which insrc_buf index. Created by nrnmpi_setup_transfer // and used by mk_ttd // ordered by calls to nrnmpi_target_var() -static DblPList targets_; // list of target double* -static SgidList sgid2targets_; // list of target sgid -static PPList target_pntlist_; // list of target Point_process -static IntList target_parray_index_; // to recompute targets_ for cache_efficint +static std::vector> targets_; // list of target variables +static SgidList sgid2targets_; // list of target sgid +static PPList target_pntlist_; // list of target Point_process // ordered by calls to nrnmpi_source_var() -typedef std::vector DblPVec; static NodePList visources_; // list of source Node*, (multiples possible) static SgidList sgids_; // source gids static MapSgid2Int sgid2srcindex_; // sgid2srcindex[sgids[i]] == i -typedef std::map> NonVSrcUpdateInfo; -static NonVSrcUpdateInfo non_vsrc_update_info_; // source ssid -> (type,parray_index) - +// source ssid -> (type,parray_index) +static std::unordered_map> + non_vsrc_update_info_; static int max_targets_; - -static int target_ptr_update_cnt_ = 0; -static int target_ptr_need_update_cnt_ = 0; -static int vptr_change_cnt_ = 0; - static bool is_setup_; // deleted when setup_transfer called @@ -222,29 +221,36 @@ static void delete_imped_info() { // pv2node extended to any range variable in the section // This helper searches over all the mechanisms in the node. -// If *pv exists, store mechtype and parray_index. -static bool non_vsrc_setinfo(sgid_t ssid, Node* nd, double const* pv) { +// If h refers to a RANGE variable inside a mechanism in the Node, store the mechanism type, the +// index of the RANGE variable, and the array index into that RANGE variable. +static bool non_vsrc_setinfo(sgid_t ssid, + Node* nd, + neuron::container::data_handle const& h) { for (Prop* p = nd->prop; p; p = p->next) { - if (pv >= p->param && pv < (p->param + p->param_size)) { - non_vsrc_update_info_[ssid] = std::pair(p->_type, pv - p->param); - // printf("non_vsrc_setinfo %p %d %ld %s\n", pv, p->_type, pv-p->param, - // memb_func[p->_type].sym->name); - return true; + for (auto i = 0; i < p->param_num_vars(); ++i) { + for (auto j = 0; j < p->param_array_dimension(i); ++j) { + if (h == p->param_handle(i, j)) { + non_vsrc_update_info_[ssid] = {p->_type, {i, j}}; + return true; + } + } } } return false; } -static double* non_vsrc_update(Node* nd, int type, int ix) { +static neuron::container::data_handle non_vsrc_update(Node* nd, + int type, + neuron::container::field_index ix) { for (Prop* p = nd->prop; p; p = p->next) { if (type == p->_type) { - return p->param + ix; + return p->param_handle(ix); } } - hoc_execerr_ext("partrans update: could not find parameter index %d of %s", - ix, + hoc_execerr_ext("partrans update: could not find parameter index (%d, %d) of %s", + ix.field, + ix.array_index, memb_func[type].sym->name); - return NULL; // avoid coverage false negative as hoc_execerror does not return. } // Find the Node associated with the voltage. @@ -252,75 +258,45 @@ static double* non_vsrc_update(Node* nd, int type, int ix) { // Extended to any pointer to range variable in the section. // If not a voltage save pv associated with mechtype, p_array_index // in non_vsrc_update_info_ -static Node* pv2node(sgid_t ssid, double* pv) { - Section* sec = chk_access(); - Node* nd = sec->parentnode; - if (nd) { - if (&NODEV(nd) == pv || non_vsrc_setinfo(ssid, nd, pv)) { +static Node* pv2node(sgid_t ssid, neuron::container::data_handle const& v) { + Section* const sec = chk_access(); + if (auto* const nd = sec->parentnode; nd) { + if (v == nd->v_handle() || non_vsrc_setinfo(ssid, nd, v)) { return nd; } } for (int i = 0; i < sec->nnode; ++i) { - nd = sec->pnode[i]; - if (&NODEV(nd) == pv || non_vsrc_setinfo(ssid, nd, pv)) { + auto* const nd = sec->pnode[i]; + if (v == nd->v_handle() || non_vsrc_setinfo(ssid, nd, v)) { return nd; } } - hoc_execerr_ext("Pointer to src is not in the currently accessed section %s", secname(sec)); - return nullptr; // avoid coverage false negative. } +static void thread_transfer(NrnThread* _nt); void nrnmpi_source_var() { nrnthread_v_transfer_ = thread_transfer; // otherwise can't check is_setup_ is_setup_ = false; - double* psv = hoc_pgetarg(1); // but might not be a voltage - double x = *getarg(2); - if (x < 0) { - hoc_execerr_ext("source_var sgid must be >= 0: arg 2 is %g\n", x); - } - sgid_t sgid = (sgid_t) x; - if (sgid2srcindex_.find(sgid) != sgid2srcindex_.end()) { + // Get the source variable pointer and promote it to a data_handle if + // possible (i.e. if it is a Node voltage, for the moment) + auto const psv = hoc_hgetarg(1); + auto const sgid = []() -> sgid_t { + double const x{*hoc_getarg(2)}; + if (x < 0) { + hoc_execerr_ext("source_var sgid must be >= 0: arg 2 is %g\n", x); + } + return x; + }(); + auto const [_, inserted] = sgid2srcindex_.emplace(sgid, visources_.size()); + if (!inserted) { hoc_execerr_ext("source var sgid %lld already in use.", (long long) sgid); } - sgid2srcindex_[sgid] = visources_.size(); visources_.push_back(pv2node(sgid, psv)); sgids_.push_back(sgid); // printf("nrnmpi_source_var %p source_val=%g sgid=%ld\n", psv, *psv, (long)sgid); } -static int compute_parray_index(Point_process* pp, double* ptv) { - if (!pp) { - return -1; - } - size_t i = ptv - pp->prop->param; - assert(i >= 0 && i < size_t(pp->prop->param_size)); - return int(i); -} -static double* tar_ptr(Point_process* pp, int index) { - return pp->prop->param + index; -} - -static void target_ptr_update() { - // printf("target_ptr_update\n"); - if (targets_.size()) { - int n = targets_.size(); - for (int i = 0; i < n; ++i) { - Point_process* pp = target_pntlist_[i]; - if (!pp) { - hoc_execerr_ext( - "Do not know the POINT_PROCESS target for source id %zd (Hint: insert target " - "instance of the target ref as the first argument.", - size_t(sgid2targets_[i])); - } - double* pd = tar_ptr(target_pntlist_[i], target_parray_index_[i]); - targets_[i] = pd; - } - } - mk_ttd(); - target_ptr_update_cnt_ = target_ptr_need_update_cnt_; -} - void nrnmpi_target_var() { Point_process* pp{}; Object* ob{}; @@ -331,86 +307,43 @@ void nrnmpi_target_var() { ob = *hoc_objgetarg(iarg++); pp = ob2pntproc(ob); } - double* ptv = hoc_pgetarg(iarg++); + auto ptv = hoc_hgetarg(iarg++); double x = *hoc_getarg(iarg++); if (x < 0) { hoc_execerr_ext("target_var sgid must be >= 0: arg %d is %g\n", iarg - 1, x); } - if (pp && (ptv < pp->prop->param || ptv >= (pp->prop->param + pp->prop->param_size))) { + if (pp && !pp->prop->owns(ptv)) { hoc_execerr_ext("Target ref not in %s", hoc_object_name(ob)); } auto const sgid = static_cast(x); targets_.push_back(ptv); target_pntlist_.push_back(pp); - target_parray_index_.push_back(compute_parray_index(pp, ptv)); sgid2targets_.push_back(sgid); // printf("nrnmpi_target_var %p target_val=%g sgid=%ld\n", ptv, *ptv, (long)sgid); } -void nrn_partrans_update_ptrs() { - // These pointer changes require that the targets be range variables - // of a point process and the sources be range variables - - // update the poutsrc that have no extracellular - for (int i = 0; i < outsrc_buf_size_; ++i) { - int isrc = poutsrc_indices_[i]; - Node* nd = visources_[isrc]; - auto it = non_vsrc_update_info_.find(sgids_[isrc]); - if (it != non_vsrc_update_info_.end()) { - poutsrc_[i] = non_vsrc_update(nd, it->second.first, it->second.second); - } else if (!nd->extnode) { - poutsrc_[i] = &(NODEV(nd)); - } else { - // pointers into SourceViBuf updated when - // latter is (re-)created - } - } - vptr_change_cnt_ = nrn_node_ptr_change_cnt_; - // the target vgap pointers also need updating but they will not - // change til after this returns ... (verify this) - ++target_ptr_need_update_cnt_; -} - -// static FILE* xxxfile; - static void rm_ttd() { if (!transfer_thread_data_) { return; } - for (int i = 0; i < n_transfer_thread_data_; ++i) { - TransferThreadData& ttd = transfer_thread_data_[i]; - if (ttd.cnt) { - delete[] ttd.tv; - delete[] ttd.sv; - } - } - delete[] transfer_thread_data_; - transfer_thread_data_ = 0; + delete[] std::exchange(transfer_thread_data_, nullptr); n_transfer_thread_data_ = 0; - nrnthread_v_transfer_ = 0; + nrnthread_v_transfer_ = nullptr; } static void rm_svibuf() { - if (!source_vi_buf_) { + if (source_vi_buf_.empty()) { return; } - for (int i = 0; i < n_source_vi_buf_; ++i) { - SourceViBuf& svib = source_vi_buf_[i]; - if (svib.cnt) { - delete[] svib.nd; - delete[] svib.val; - } - } - delete[] source_vi_buf_; - source_vi_buf_ = 0; - n_source_vi_buf_ = 0; - nrnthread_vi_compute_ = 0; + source_vi_buf_.clear(); + nrnthread_vi_compute_ = nullptr; } -static MapNode2PDbl* mk_svibuf() { +static void thread_vi_compute(NrnThread* _nt); +static std::unordered_map mk_svibuf() { rm_svibuf(); if (visources_.empty()) { - return NULL; + return {}; } // any use of extracellular? int has_ecell = 0; @@ -421,12 +354,10 @@ static MapNode2PDbl* mk_svibuf() { } } if (!has_ecell) { - return NULL; + return {}; } - source_vi_buf_ = new SourceViBuf[nrn_nthread]; - n_source_vi_buf_ = nrn_nthread; - NonVSrcUpdateInfo::iterator it; + source_vi_buf_.resize(nrn_nthread); for (int tid = 0; tid < nrn_nthread; ++tid) { source_vi_buf_[tid].cnt = 0; @@ -434,7 +365,7 @@ static MapNode2PDbl* mk_svibuf() { // count for (size_t i = 0; i < visources_.size(); ++i) { Node* nd = visources_[i]; - it = non_vsrc_update_info_.find(sgids_[i]); + auto const it = non_vsrc_update_info_.find(sgids_[i]); if (nd->extnode && it == non_vsrc_update_info_.end()) { assert(nd->_nt >= nrn_threads && nd->_nt < (nrn_threads + nrn_nthread)); ++source_vi_buf_[nd->_nt->id].cnt; @@ -443,21 +374,18 @@ static MapNode2PDbl* mk_svibuf() { // allocate for (int tid = 0; tid < nrn_nthread; ++tid) { SourceViBuf& svib = source_vi_buf_[tid]; - if (svib.cnt) { - svib.nd = new Node*[svib.cnt]; - svib.val = new double[svib.cnt]; - } + svib.nd.resize(svib.cnt); + svib.val.resize(svib.cnt); svib.cnt = 0; // recount on fill } // fill for (size_t i = 0; i < visources_.size(); ++i) { Node* nd = visources_[i]; - it = non_vsrc_update_info_.find(sgids_[i]); + auto const it = non_vsrc_update_info_.find(sgids_[i]); if (nd->extnode && it == non_vsrc_update_info_.end()) { int tid = nd->_nt->id; SourceViBuf& svib = source_vi_buf_[tid]; - svib.nd[svib.cnt] = nd; - ++svib.cnt; + svib.nd[svib.cnt++] = nd; } } // now the only problem is how to get TransferThreadData and poutsrc_ @@ -467,22 +395,28 @@ static MapNode2PDbl* mk_svibuf() { // We can do the poutsrc_ now by creating a temporary Node* to // double* map .. The TransferThreadData can be done later // in mk_ttd using the same map and then deleted. - MapNode2PDbl* ndvi2pd = new MapNode2PDbl(1000); + std::unordered_map ndvi2pd{1000}; + // TODO can this be handle-ified? for (int tid = 0; tid < nrn_nthread; ++tid) { SourceViBuf& svib = source_vi_buf_[tid]; + assert(svib.nd.size() == svib.cnt); + assert(svib.val.size() == svib.cnt); for (int i = 0; i < svib.cnt; ++i) { Node* nd = svib.nd[i]; - (*ndvi2pd)[nd] = svib.val + i; + ndvi2pd[nd] = &svib.val[i]; // pointer to a vector owned by source_vi_buf_ } } for (int i = 0; i < outsrc_buf_size_; ++i) { int isrc = poutsrc_indices_[i]; Node* nd = visources_[isrc]; - it = non_vsrc_update_info_.find(sgids_[isrc]); + auto const it = non_vsrc_update_info_.find(sgids_[isrc]); if (nd->extnode && it == non_vsrc_update_info_.end()) { - auto search = ndvi2pd->find(nd); - nrn_assert(ndvi2pd->find(nd) != ndvi2pd->end()); - poutsrc_[i] = search->second; + auto const search = ndvi2pd.find(nd); + nrn_assert(search != ndvi2pd.end()); + // olupton 2022-11-28: looks like search->second is always a pointer to a private vector + // in source_vi_buf_, so do_not_search makes sense. + poutsrc_[i] = neuron::container::data_handle{neuron::container::do_not_search, + search->second}; } } nrnthread_vi_compute_ = thread_vi_compute; @@ -491,12 +425,9 @@ static MapNode2PDbl* mk_svibuf() { static void mk_ttd() { int i, j, tid, n; - MapNode2PDbl* ndvi2pd = mk_svibuf(); + auto ndvi2pd = mk_svibuf(); rm_ttd(); if (targets_.empty()) { - if (ndvi2pd) { - delete ndvi2pd; - } // some MPI transfer code paths require that all ranks // have a nrn_thread_v_transfer. // As mentioned in http://static.msi.umn.edu/tutorial/scicomp/general/MPI/content3_new.html @@ -542,8 +473,8 @@ static void mk_ttd() { for (tid = 0; tid < nrn_nthread; ++tid) { TransferThreadData& ttd = transfer_thread_data_[tid]; if (ttd.cnt) { - ttd.tv = new double*[ttd.cnt]; - ttd.sv = new double*[ttd.cnt]; + ttd.tv.resize(ttd.cnt); + ttd.sv.resize(ttd.cnt); } ttd.cnt = 0; } @@ -556,7 +487,7 @@ static void mk_ttd() { } TransferThreadData& ttd = transfer_thread_data_[tid]; j = ttd.cnt++; - ttd.tv[j] = targets_[i]; + ttd.tv.at(j) = targets_.at(i); // perhaps inter- or intra-thread, perhaps interprocessor // if inter- or intra-thread, perhaps SourceViBuf sgid_t sid = sgid2targets_[i]; @@ -570,35 +501,38 @@ static void mk_ttd() { if (it != non_vsrc_update_info_.end()) { ttd.sv[j] = non_vsrc_update(nd, it->second.first, it->second.second); } else if (nd->extnode) { - auto search = ndvi2pd->find(nd); - nrn_assert(search != ndvi2pd->end()); - ttd.sv[j] = search->second; + auto search = ndvi2pd.find(nd); + nrn_assert(search != ndvi2pd.end()); + // olupton 2022-11-28: looks like search->second is always a pointer to a private + // vector in source_vi_buf_, so do_not_search makes sense. + ttd.sv[j] = neuron::container::data_handle{neuron::container::do_not_search, + search->second}; } else { - ttd.sv[j] = &(NODEV(nd)); + ttd.sv[j] = nd->v_handle(); } } else { auto search = sid2insrc_.find(sid); if (search != sid2insrc_.end()) { err = false; - ttd.sv[j] = insrc_buf_ + search->second; + // olupton 2022-11-28: insrc_buf_ is not part of the global model data structure, so + // do_not_search is appropriate + ttd.sv[j] = neuron::container::data_handle{neuron::container::do_not_search, + insrc_buf_ + search->second}; } } if (err == true) { hoc_execerr_ext("No source_var for target_var sid = %lld\n", (long long) sid); } } - if (ndvi2pd) { - delete ndvi2pd; - } nrnthread_v_transfer_ = thread_transfer; } -void thread_vi_compute(NrnThread* _nt) { +static void thread_vi_compute(NrnThread* _nt) { // vi+vext needed by either mpi or thread transfer copied into // the source value buffer for this thread. Note that relevant // poutsrc_ and ttd[_nt->id].sv items // point into this source value buffer - if (!source_vi_buf_) { + if (source_vi_buf_.empty()) { return; } SourceViBuf& svb = source_vi_buf_[_nt->id]; @@ -609,23 +543,28 @@ void thread_vi_compute(NrnThread* _nt) { } } -void mpi_transfer() { +static void mpi_transfer() { int i, n = outsrc_buf_size_; - if (nrn_node_ptr_change_cnt_ > vptr_change_cnt_) { - nrn_partrans_update_ptrs(); - } for (i = 0; i < n; ++i) { outsrc_buf_[i] = *poutsrc_[i]; } -#if PARANEURON +#if NRNMPI if (nrnmpi_numprocs > 1) { double wt = nrnmpi_wtime(); if (nrn_sparse_partrans > 0) { - nrnmpi_dbl_alltoallv_sparse( - outsrc_buf_, outsrccnt_, outsrcdspl_, insrc_buf_, insrccnt_, insrcdspl_); + nrnmpi_dbl_alltoallv_sparse(outsrc_buf_, + outsrccnt_.data(), + outsrcdspl_.data(), + insrc_buf_, + insrccnt_.data(), + insrcdspl_.data()); } else { - nrnmpi_dbl_alltoallv( - outsrc_buf_, outsrccnt_, outsrcdspl_, insrc_buf_, insrccnt_, insrcdspl_); + nrnmpi_dbl_alltoallv(outsrc_buf_, + outsrccnt_.data(), + outsrcdspl_.data(), + insrc_buf_, + insrccnt_.data(), + insrcdspl_.data()); } nrnmpi_transfer_wait_ += nrnmpi_wtime() - wt; errno = 0; @@ -634,15 +573,13 @@ void mpi_transfer() { // insrc_buf_ will get transferred to targets by thread_transfer } -void thread_transfer(NrnThread* _nt) { +static void thread_transfer(NrnThread* _nt) { if (!is_setup_) { hoc_execerror("ParallelContext.setup_transfer()", "needs to be called."); } if (targets_.empty()) { return; } - - // fprintf(xxxfile, "%g\n", t); // an edited old comment prior to allowing simultaneous threads and mpi. // for threads we do direct transfers under the assumption // that v is being transferred and they were set in a @@ -660,9 +597,6 @@ void thread_transfer(NrnThread* _nt) { // For now we presume we have dealt with these matters and // do the transfer. assert(n_transfer_thread_data_ == nrn_nthread); - if (target_ptr_need_update_cnt_ > target_ptr_update_cnt_) { - target_ptr_update(); - } TransferThreadData& ttd = transfer_thread_data_[_nt->id]; for (int i = 0; i < ttd.cnt; ++i) { *(ttd.tv[i]) = *(ttd.sv[i]); @@ -681,17 +615,8 @@ void thread_transfer(NrnThread* _nt) { // " But this was a mistake as many mpi implementations do not allow overlap // of send and receive buffers. -// 22-08-2014 For setup of the All2allv pattern, use the rendezvous rank -// idiom. -#define HAVEWANT_t sgid_t -#define HAVEWANT_alltoallv sgid_alltoallv -#define HAVEWANT2Int MapSgid2Int -#if PARANEURON -#include "have2want.cpp" -#endif - void nrnmpi_setup_transfer() { -#if !PARANEURON +#if !NRNMPI if (nrnmpi_numprocs > 1) { hoc_execerror( "To use ParallelContext.setup_transfer when nhost > 1, NEURON must be configured with " @@ -701,49 +626,28 @@ void nrnmpi_setup_transfer() { #endif int nhost = nrnmpi_numprocs; is_setup_ = true; - // printf("nrnmpi_setup_transfer\n"); delete_imped_info(); - if (insrc_buf_) { - delete[] insrc_buf_; - insrc_buf_ = 0; - } - if (outsrc_buf_) { - delete[] outsrc_buf_; - outsrc_buf_ = 0; - } + delete[] std::exchange(insrc_buf_, nullptr); + delete[] std::exchange(outsrc_buf_, nullptr); + outsrc_buf_size_ = 0; sid2insrc_.clear(); - if (poutsrc_) { - delete[] poutsrc_; - poutsrc_ = 0; - } - if (poutsrc_indices_) { - delete[] poutsrc_indices_; - poutsrc_indices_ = 0; - } -#if PARANEURON + poutsrc_.clear(); + delete[] std::exchange(poutsrc_indices_, nullptr); +#if NRNMPI // if there are no targets anywhere, we do not need to do anything max_targets_ = nrnmpi_int_allmax(targets_.size()); if (max_targets_ == 0) { return; } if (nrnmpi_numprocs > 1) { - if (insrccnt_) { - delete[] insrccnt_; - insrccnt_ = NULL; - } - if (insrcdspl_) { - delete[] insrcdspl_; - insrcdspl_ = NULL; - } - if (outsrccnt_) { - delete[] outsrccnt_; - outsrccnt_ = NULL; - } - if (outsrcdspl_) { - delete[] outsrcdspl_; - outsrcdspl_ = NULL; - } - + insrccnt_.clear(); + insrccnt_.shrink_to_fit(); + insrcdspl_.clear(); + insrcdspl_.shrink_to_fit(); + outsrccnt_.clear(); + outsrccnt_.shrink_to_fit(); + outsrcdspl_.clear(); + outsrcdspl_.shrink_to_fit(); // This is an old comment prior to using the want_to_have rendezvous // rank function in want2have.cpp. The old method did not scale // to more sgids than could fit on a single rank, because @@ -778,105 +682,73 @@ void nrnmpi_setup_transfer() { // sids needed by this machine. The 'seen' table values are unused // but the keys are all the (unique) sgid needed by this process. // At the end seen is in fact what we want for sid2insrc_. - int needsrc_cnt = 0; int szalloc = targets_.size(); szalloc = szalloc ? szalloc : 1; // At the moment sid2insrc_ is serving as 'seen' sid2insrc_.clear(); - sid2insrc_.reserve(szalloc); // for single counting - sgid_t* needsrc = new sgid_t[szalloc]; // more than we need + sid2insrc_.reserve(szalloc); // for single counting + std::vector needsrc{}; for (size_t i = 0; i < sgid2targets_.size(); ++i) { sgid_t sid = sgid2targets_[i]; auto search = sid2insrc_.find(sid); if (search == sid2insrc_.end()) { sid2insrc_[sid] = 0; // at the moment, value does not matter - needsrc[needsrc_cnt++] = sid; + needsrc.push_back(sid); } } // 1 continued) Create an array of sources this rank owns. // This already exists as a vector in the SgidList sgids_ but // that is private so go ahead and copy. - sgid_t* ownsrc = new sgid_t[sgids_.size() + 1]; // not 0 length if count is 0 - for (size_t i = 0; i < sgids_.size(); ++i) { - ownsrc[i] = sgids_[i]; - } + std::vector ownsrc = sgids_; // 2) Call the have_to_want function. - sgid_t* send_to_want; - int *send_to_want_cnt, *send_to_want_displ; - sgid_t* recv_from_have; - int *recv_from_have_cnt, *recv_from_have_displ; - - have_to_want(ownsrc, - sgids_.size(), - needsrc, - needsrc_cnt, - send_to_want, - send_to_want_cnt, - send_to_want_displ, - recv_from_have, - recv_from_have_cnt, - recv_from_have_displ, - default_rendezvous); + auto [send_to_want, recv_from_have] = have_to_want(ownsrc, needsrc, sgid_alltoallv); // sanity check. all the sgids we are asked to send, we actually have - int n = send_to_want_displ[nhost]; -#if 0 // done in passing in step 3 below - for (int i=0; i < n; ++i) { - sgid_t sgid = send_to_want[i]; - nrn_assert(sgid2srcindex_.find(sgid) != sgid2srcindex_.end()); - } -#endif + int n = send_to_want.displ[nhost]; // sanity check. all the sgids we receive, we actually need. // also set the sid2insrc_ value to the proper recv_from_have index. - n = recv_from_have_displ[nhost]; + n = recv_from_have.displ[nhost]; for (int i = 0; i < n; ++i) { - sgid_t sgid = recv_from_have[i]; + sgid_t sgid = recv_from_have.data[i]; nrn_assert(sid2insrc_.find(sgid) != sid2insrc_.end()); sid2insrc_[sgid] = i; } - // clean up a little - delete[] std::exchange(ownsrc, nullptr); - delete[] std::exchange(needsrc, nullptr); - delete[] std::exchange(recv_from_have, nullptr); - // 3) First return triple creates the proper outsrc_buf_. // Now that we know what machines are interested in our sids... // construct outsrc_buf, outsrc_buf_size, outsrccnt_, outsrcdspl_ // and poutsrc_; - outsrccnt_ = send_to_want_cnt; - outsrcdspl_ = send_to_want_displ; + std::swap(outsrccnt_, send_to_want.cnt); + std::swap(outsrcdspl_, send_to_want.displ); outsrc_buf_size_ = outsrcdspl_[nrnmpi_numprocs]; - szalloc = outsrc_buf_size_ ? outsrc_buf_size_ : 1; + szalloc = std::max(1, outsrc_buf_size_); outsrc_buf_ = new double[szalloc]; - poutsrc_ = new double*[szalloc]; + poutsrc_.resize(szalloc); poutsrc_indices_ = new int[szalloc]; for (int i = 0; i < outsrc_buf_size_; ++i) { - sgid_t sid = send_to_want[i]; + sgid_t sid = send_to_want.data[i]; auto search = sgid2srcindex_.find(sid); nrn_assert(search != sgid2srcindex_.end()); Node* nd = visources_[search->second]; - NonVSrcUpdateInfo::iterator it; - it = non_vsrc_update_info_.find(sid); + auto const it = non_vsrc_update_info_.find(sid); if (it != non_vsrc_update_info_.end()) { poutsrc_[i] = non_vsrc_update(nd, it->second.first, it->second.second); } else if (!nd->extnode) { - poutsrc_[i] = &(NODEV(nd)); + poutsrc_[i] = nd->v_handle(); } else { // the v+vext case can only be done after mk_svib() } poutsrc_indices_[i] = search->second; outsrc_buf_[i] = double(sid); // see step 5 } - delete[] send_to_want; // 4) The second triple is creates the insrc_buf_. // From the recv_from_have and sid2insrc_ table, construct the insrc... - insrccnt_ = recv_from_have_cnt; - insrcdspl_ = recv_from_have_displ; + std::swap(insrccnt_, recv_from_have.cnt); + std::swap(insrcdspl_, recv_from_have.displ); insrc_buf_size_ = insrcdspl_[nrnmpi_numprocs]; szalloc = insrc_buf_size_ ? insrc_buf_size_ : 1; insrc_buf_ = new double[szalloc]; @@ -884,7 +756,7 @@ void nrnmpi_setup_transfer() { nrnmpi_v_transfer_ = mpi_transfer; } -#endif // PARANEURON +#endif // NRNMPI nrn_mk_transfer_thread_data_ = mk_ttd; if (!v_structure_change) { mk_ttd(); @@ -892,38 +764,26 @@ void nrnmpi_setup_transfer() { } void nrn_partrans_clear() { - nrnthread_v_transfer_ = NULL; - nrnthread_vi_compute_ = NULL; - nrnmpi_v_transfer_ = NULL; + nrnthread_v_transfer_ = nullptr; + nrnthread_vi_compute_ = nullptr; + nrnmpi_v_transfer_ = nullptr; sgid2srcindex_.clear(); sgids_.resize(0); visources_.resize(0); sgid2targets_.resize(0); target_pntlist_.resize(0); - target_parray_index_.resize(0); - targets_.resize(0); + targets_.clear(); max_targets_ = 0; rm_svibuf(); rm_ttd(); - if (insrc_buf_) { - delete[] insrc_buf_; - insrc_buf_ = NULL; - } - if (outsrc_buf_) { - delete[] outsrc_buf_; - outsrc_buf_ = NULL; - } + delete[] std::exchange(insrc_buf_, nullptr); + delete[] std::exchange(outsrc_buf_, nullptr); + outsrc_buf_size_ = 0; sid2insrc_.clear(); - if (poutsrc_) { - delete[] poutsrc_; - poutsrc_ = NULL; - } - if (poutsrc_indices_) { - delete[] poutsrc_indices_; - poutsrc_indices_ = NULL; - } + poutsrc_.clear(); + delete[] std::exchange(poutsrc_indices_, nullptr); non_vsrc_update_info_.clear(); - nrn_mk_transfer_thread_data_ = 0; + nrn_mk_transfer_thread_data_ = nullptr; } // assume one thread and no extracellular @@ -942,7 +802,7 @@ void pargap_jacobi_setup(int mode) { delete_imped_info(); imped_change_cnt = structure_change_cnt; } - if (imped_current_type_count_ == 0 && targets_.size() > 0) { + if (imped_current_type_count_ == 0 && !targets_.empty()) { for (size_t i = 0; i < targets_.size(); ++i) { Point_process* pp = target_pntlist_[i]; if (!pp) { @@ -992,9 +852,6 @@ void pargap_jacobi_setup(int mode) { } } } - if (target_ptr_need_update_cnt_ > target_ptr_update_cnt_) { - target_ptr_update(); - } TransferThreadData* ttd = transfer_thread_data_; if (mode == 0) { // setup if (visources_.size()) { @@ -1012,63 +869,71 @@ void pargap_jacobi_setup(int mode) { } } else { // tear down for (size_t i = 0; i < visources_.size(); ++i) { - NODEV(visources_[i]) = vgap1[i]; + visources_[i]->v() = vgap1[i]; } - if (ttd) + if (ttd) { for (int i = 0; i < ttd->cnt; ++i) { *(ttd->tv[i]) = vgap2[i]; } - if (vgap1) { - delete[] vgap1; - vgap1 = NULL; - } - if (vgap2) { - delete[] vgap2; - vgap2 = NULL; } + delete[] std::exchange(vgap1, nullptr); + delete[] std::exchange(vgap2, nullptr); } } -void pargap_jacobi_rhs(double* b, double* x) { - // helper for complex impedance with parallel gap junctions - // b = b - R*x R are the off diagonal gap elements of the jacobian. - // we presume 1 thread. First nrn_thread[0].end equations are in node order. - if (!nrnthread_v_transfer_) { - return; - } - - NrnThread* _nt = nrn_threads; +void pargap_jacobi_rhs(std::vector>& b, + const std::vector>& x) { + // First loop for real, second for imag + for (int real_imag = 0; real_imag < 2; ++real_imag) { + // helper for complex impedance with parallel gap junctions + // b = b - R*x R are the off diagonal gap elements of the jacobian. + // we presume 1 thread. First nrn_thread[0].end equations are in node order. + if (!nrnthread_v_transfer_) { + return; + } - // transfer gap node voltages to gap vpre - for (size_t i = 0; i < visources_.size(); ++i) { - Node* nd = visources_[i]; - NODEV(nd) = x[nd->v_node_index]; - } - mpi_transfer(); - thread_transfer(_nt); + NrnThread* _nt = nrn_threads; - // set gap node voltages to 0 so we can use nrn_cur to set rhs - for (size_t i = 0; i < visources_.size(); ++i) { - Node* nd = visources_[i]; - NODEV(nd) = 0.0; - } - // Initialize rhs to 0. - for (int i = 0; i < _nt->end; ++i) { - VEC_RHS(i) = 0.0; - } + // transfer gap node voltages to gap vpre + for (size_t i = 0; i < visources_.size(); ++i) { + Node* nd = visources_[i]; + if (real_imag == 0) { + nd->v() = x[nd->v_node_index].real(); + } else { + nd->v() = x[nd->v_node_index].imag(); + } + } + mpi_transfer(); + thread_transfer(_nt); - for (int k = 0; k < imped_current_type_count_; ++k) { - int type = imped_current_type_[k]; - Memb_list* ml = imped_current_ml_[k]; - (*memb_func[type].current)(_nt, ml, type); - } + // set gap node voltages to 0 so we can use nrn_cur to set rhs + for (size_t i = 0; i < visources_.size(); ++i) { + Node* nd = visources_[i]; + nd->v() = 0.0; + } + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + auto* const vec_rhs = _nt->node_rhs_storage(); + // Initialize rhs to 0. + for (int i = 0; i < _nt->end; ++i) { + vec_rhs[i] = 0.0; + } + for (int k = 0; k < imped_current_type_count_; ++k) { + int type = imped_current_type_[k]; + Memb_list* ml = imped_current_ml_[k]; + memb_func[type].current(sorted_token, _nt, ml, type); + } - // possibly many gap junctions in same node (and possible even different - // types) but rhs is the accumulation of all those instances at each node - // so ... The only thing that can go wrong is if there are intances of - // gap junctions that are not being used (not in the target list). - for (int i = 0; i < _nt->end; ++i) { - b[i] += VEC_RHS(i); + // possibly many gap junctions in same node (and possible even different + // types) but rhs is the accumulation of all those instances at each node + // so ... The only thing that can go wrong is if there are intances of + // gap junctions that are not being used (not in the target list). + for (int i = 0; i < _nt->end; ++i) { + if (real_imag == 0) { + b[i] += vec_rhs[i]; + } else { + b[i] += std::complex(0, vec_rhs[i]); + } + } } } @@ -1181,7 +1046,8 @@ static SetupTransferInfo* nrncore_transfer_info(int cn_nthread) { int tid = nt ? nt->id : 0; int type = pp->prop->_type; Memb_list& ml = *(nrn_threads[tid]._ml_list[type]); - int ix = targets_[i] - ml._data[0]; + int ix = ml.legacy_index(targets_[i]); + assert(ix >= 0); auto& g = gi[tid]; g.tar_sid.push_back(sid); @@ -1198,18 +1064,25 @@ static SetupTransferInfo* nrncore_transfer_info(int cn_nthread) { int tid = nd->_nt ? nd->_nt->id : 0; int type = -1; // default voltage int ix = 0; // fill below - NonVSrcUpdateInfo::iterator it = non_vsrc_update_info_.find(sid); + auto const it = non_vsrc_update_info_.find(sid); if (it != non_vsrc_update_info_.end()) { // not a voltage source type = it->second.first; - ix = it->second.second; // this entire context needs to be reworked. If the source is a // point process, then if more than one in this nd, it is an error. - double* d = non_vsrc_update(nd, type, ix); + auto d = non_vsrc_update(nd, type, it->second.second); NrnThread* nt = nd->_nt ? nd->_nt : nrn_threads; Memb_list& ml = *nt->_ml_list[type]; - ix = d - ml._data[0]; + ix = ml.legacy_index(d); + assert(ix >= 0); } else { // is a voltage source - ix = nd->_v - nrn_threads[tid]._actual_v; + // Calculate the offset of the Node voltage in the section of + // the underlying storage vector that is dedicated to NrnThread + // number `tid`. Warning: this is only correct if no + // modifications have been made to any Node since + // reorder_secorder() was last called. + auto const cache_token = nrn_ensure_model_data_are_sorted(); + ix = nd->_node_handle.current_row() - + cache_token.thread_cache(tid).node_data_offset; assert(nd->extnode == NULL); // only if v assert(ix >= 0 && ix < nrn_threads[tid].end); } diff --git a/src/nrniv/ppshape.cpp b/src/nrniv/ppshape.cpp index 91a19e2432..a9303b70fa 100644 --- a/src/nrniv/ppshape.cpp +++ b/src/nrniv/ppshape.cpp @@ -9,9 +9,6 @@ #endif // HAVE_IV #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - // ppshape registration static double pp_append(void* v) { diff --git a/src/nrniv/prcellstate.cpp b/src/nrniv/prcellstate.cpp index 606e8e0176..840d22f306 100644 --- a/src/nrniv/prcellstate.cpp +++ b/src/nrniv/prcellstate.cpp @@ -4,17 +4,13 @@ #include "nrniv_mf.h" #include "netcon.h" #include -#include "OS/list.h" #include "neuron.h" +#include "utils/enumerate.h" #define precision 15 void nrn_prcellstate(int gid, const char* filesuffix); -declarePtrList(NetConList, NetCon); // NetCons in same order as Point_process -implementPtrList(NetConList, NetCon); // and there may be several per pp. - - static void pr_memb(int type, Memb_list* ml, int* cellnodes, @@ -37,7 +33,7 @@ static void pr_memb(int type, pnt2index.emplace(pp, pnt2index.size()); } for (int j = 0; j < size; ++j) { - fprintf(f, " %d %d %.*g\n", cellnodes[inode], j, precision, ml->_data[i][j]); + fprintf(f, " %d %d %.*g\n", cellnodes[inode], j, precision, ml->data(i, j)); } } } @@ -51,10 +47,7 @@ static void pr_netcon(NrnThread& nt, FILE* f, const std::map& pnt2in // List of NetCon for each of the NET_RECEIVE point process instances // ... all NetCon list in the hoc NetCon cTemplate - NetConList** nclist = new NetConList*[pnt2index.size()]; - for (size_t i = 0; i < pnt2index.size(); ++i) { - nclist[i] = new NetConList(1); - } + std::vector> nclist(pnt2index.size()); int nc_cnt = 0; Symbol* ncsym = hoc_lookup("NetCon"); hoc_Item* q; @@ -64,17 +57,15 @@ static void pr_netcon(NrnThread& nt, FILE* f, const std::map& pnt2in Point_process* pp = nc->target_; const auto& it = pnt2index.find(pp); if (it != pnt2index.end()) { - nclist[it->second]->append(nc); + nclist[it->second].push_back(nc); ++nc_cnt; } } fprintf(f, "netcons %d\n", nc_cnt); fprintf(f, " pntindex srcgid active delay weights\n"); - for (size_t i = 0; i < pnt2index.size(); ++i) { - for (int j = 0; j < nclist[i]->count(); ++j) { - NetCon* nc = nclist[i]->item(j); - int srcgid = -3; - srcgid = (nc->src_) ? nc->src_->gid_ : -3; + for (const auto&& [i, ncl]: enumerate(nclist)) { + for (const auto& nc: ncl) { + int srcgid = (nc->src_) ? nc->src_->gid_ : -3; if (srcgid < 0 && nc->src_ && nc->src_->osrc_) { const char* name = nc->src_->osrc_->ctemplate->sym->name; fprintf(f, "%zd %s %d %.*g", i, name, nc->active_ ? 1 : 0, precision, nc->delay_); @@ -90,40 +81,19 @@ static void pr_netcon(NrnThread& nt, FILE* f, const std::map& pnt2in fprintf(f, "\n"); } } - // cleanup - for (size_t i = 0; i < pnt2index.size(); ++i) { - delete nclist[i]; - } - delete[] nclist; } static void pr_realcell(PreSyn& ps, NrnThread& nt, FILE* f) { + assert(ps.thvar_); // threshold variable is a voltage - printf("thvar=%p actual_v=%p end=%p\n", ps.thvar_, nt._actual_v, nt._actual_v + nt.end); - int inode = -1; - if (ps.thvar_ < nt._actual_v || ps.thvar_ >= (nt._actual_v + nt.end)) { - if (ps.ssrc_) { /* not cache efficient, search the nodes in this section */ - printf("%s\n", ps.ssrc_ ? secname(ps.ssrc_) : "unknown"); - for (int i = 0; i < ps.ssrc_->nnode; ++i) { - Node* nd = ps.ssrc_->pnode[i]; - if (ps.thvar_ == nd->_v) { - inode = nd->v_node_index; - break; - } - } - if (inode < 0) { /* check parent node */ - Node* nd = ps.ssrc_->parentnode; - if (ps.thvar_ == nd->_v) { - inode = nd->v_node_index; - } - } - } - if (inode < 0) { - hoc_execerror("gid not associated with a voltage", 0); - } - } else { - inode = ps.thvar_ - nt._actual_v; - } + + // If the "modern" data is "sorted" then the order should match the "legacy" + // data structures that still live alongside it + auto const cache_token = nrn_ensure_model_data_are_sorted(); + assert( + ps.thvar_.refers_to(neuron::model().node_data())); + int const inode = ps.thvar_.current_row() - cache_token.thread_cache(nt.id).node_data_offset; + // hoc_execerror("gid not associated with a voltage", 0); // and the root node is ... int rnode = inode; @@ -149,7 +119,7 @@ static void pr_realcell(PreSyn& ps, NrnThread& nt, FILE* f) { fprintf(f, "inode parent area a b\n"); for (int i = 0; i < nt.end; ++i) if (cellnodes[i] >= 0) { - Node* nd = nt._v_node[i]; // if not cach_efficient then _actual_area=NULL + Node* nd = nt._v_node[i]; fprintf(f, "%d %d %.*g %.*g %.*g\n", cellnodes[i], @@ -157,9 +127,9 @@ static void pr_realcell(PreSyn& ps, NrnThread& nt, FILE* f) { precision, NODEAREA(nd), precision, - nt._actual_a[i], + nd->a(), precision, - nt._actual_b[i]); + nd->b()); } fprintf(f, "inode v\n"); for (int i = 0; i < nt.end; ++i) diff --git a/src/nrniv/pysecname2sec.cpp b/src/nrniv/pysecname2sec.cpp index 1da45e9b5d..2f0d4a0201 100644 --- a/src/nrniv/pysecname2sec.cpp +++ b/src/nrniv/pysecname2sec.cpp @@ -2,13 +2,13 @@ #include #include -#include #include #include #include "nrnsymdiritem.h" +#include "utils/enumerate.h" #include #include @@ -246,29 +246,26 @@ void nrnpy_pysecname2sec_remove(Section* sec) { #endif } -void nrn_symdir_load_pysec(SymbolList& sl, void* v) { +void nrn_symdir_load_pysec(std::vector& sl, void* v) { activate(); if (!v) { // top level items are any of the four types - for (Name2CellorSec::iterator it = n2cs.begin(); it != n2cs.end(); ++it) { - CellorSec& cs = it->second; + for (auto&& [symbol, cs]: n2cs) { if (cs.first != NONETYPE && cs.first != OVERLOADCOUNT) { - SymbolItem* si = new SymbolItem(it->first.c_str(), 0); + SymbolItem* si = new SymbolItem(symbol.c_str(), 0); si->pysec_type_ = cs.first == CELLTYPE ? PYSECOBJ : PYSECNAME; si->pysec_ = (Section*) cs.second; - sl.append(si); + sl.push_back(si); } } } else { // in cell items are either OVERLOADCOUNT or SECTYPE - Name2CellorSec* n2s = (Name2CellorSec*) v; - for (Name2CellorSec::iterator it = n2s->begin(); it != n2s->end(); ++it) { - CellorSec& cs = it->second; + for (auto&& [symbol, cs]: *static_cast(v)) { if (cs.first == SECTYPE) { - SymbolItem* si = new SymbolItem(it->first.c_str(), 0); + auto* si = new SymbolItem(symbol.c_str(), 0); si->pysec_type_ = PYSECNAME; si->pysec_ = (Section*) cs.second; - sl.append(si); + sl.push_back(si); } } } diff --git a/src/nrniv/savstate.cpp b/src/nrniv/savstate.cpp index 46f453d054..eeb6ca81bd 100644 --- a/src/nrniv/savstate.cpp +++ b/src/nrniv/savstate.cpp @@ -13,6 +13,7 @@ #include "tqueue.h" #include "netcon.h" #include "vrecitem.h" +#include "utils/enumerate.h" typedef void (*ReceiveFunc)(Point_process*, double*, double); @@ -24,7 +25,7 @@ extern ReceiveFunc* pnt_receive; extern NetCvode* net_cvode_instance; extern TQueue* net_cvode_instance_event_queue(NrnThread*); extern hoc_Item* net_cvode_instance_psl(); -extern PlayRecList* net_cvode_instance_prl(); +extern std::vector* net_cvode_instance_prl(); extern double t; extern short* nrn_is_artificial_; static void tqcallback(const TQItem* tq, int i); @@ -249,7 +250,7 @@ void SaveState::ssi_def() { // param array including PARAMETERs. if (pnt_receive[im]) { ssi[im].offset = 0; - ssi[im].size = np->prop()->param_size; + ssi[im].size = np->prop()->param_size(); // sum over array dimensions } else { int type = STATE; for (Symbol* sym = np->first_var(); np->more_var(); sym = np->next_var()) { @@ -257,6 +258,10 @@ void SaveState::ssi_def() { sym->subtype == _AMBIGUOUS) { if (ssi[im].offset < 0) { ssi[im].offset = np->prop_index(sym); + } else { + // assert what we assume: that after this code the variables we want are + // `size` contiguous legacy indices starting at `offset` + assert(ssi[im].offset + ssi[im].size == np->prop_index(sym)); } ssi[im].size += hoc_total_array_data(sym, 0); } @@ -475,8 +480,8 @@ void SaveState::alloc() { allocacell(acell_[j], i); ++j; } - PlayRecList* prl = net_cvode_instance_prl(); - nprs_ = prl->count(); + std::vector* prl = net_cvode_instance_prl(); + nprs_ = prl->size(); if (nprs_) { prs_ = new PlayRecordSave*[nprs_]; } @@ -602,11 +607,10 @@ void SaveState::save() { assert(t == nt->_t); } t_ = t; - int inode; for (int isec = 0; isec < nsec_; ++isec) { SecState& ss = ss_[isec]; Section* sec = ss.sec; - for (inode = 0; inode < ss.nnode; ++inode) { + for (int inode = 0; inode < ss.nnode; ++inode) { NodeState& ns = ss.ns[inode]; Node* nd = sec->pnode[inode]; savenode(ns, nd); @@ -623,11 +627,10 @@ void SaveState::save() { ++j; } if (nprs_) { - PlayRecList* prl = net_cvode_instance_prl(); - int i; - assert(nprs_ == prl->count()); - for (i = 0; i < nprs_; ++i) { - prs_[i] = prl->item(i)->savestate_save(); + std::vector* prl = net_cvode_instance_prl(); + assert(nprs_ == prl->size()); + for (auto&& [i, e]: enumerate(*prl)) { + prs_[i] = e->savestate_save(); } } savenet(); @@ -642,8 +645,7 @@ void SaveState::save() { void SaveState::savenode(NodeState& ns, Node* nd) { ns.v = NODEV(nd); int istate = 0; - Prop* p; - for (p = nd->prop; p; p = p->next) { + for (Prop* p = nd->prop; p; p = p->next) { if (ssi[p->_type].size == 0) { continue; } @@ -658,7 +660,7 @@ void SaveState::savenode(NodeState& ns, Node* nd) { #endif { for (int ip = ssi[type].offset; ip < max; ++ip) { - ns.state[istate++] = p->param[ip]; + ns.state[istate++] = p->param_legacy(ip); } } } @@ -669,9 +671,8 @@ void SaveState::saveacell(ACellState& ac, int type) { int sz = ssi[type].size; double* p = ac.state; for (int i = 0; i < ml.nodecount; ++i) { - double* d = ml._data[i]; for (int j = 0; j < sz; ++j) { - (*p++) = d[j]; + (*p++) = ml.data(i, j); } } } @@ -685,11 +686,10 @@ void SaveState::restore(int type) { FOR_THREADS(nt) { nt->_t = t_; } - int inode; for (int isec = 0; isec < nsec_; ++isec) { SecState& ss = ss_[isec]; Section* sec = ss.sec; - for (inode = 0; inode < ss.nnode; ++inode) { + for (int inode = 0; inode < ss.nnode; ++inode) { NodeState& ns = ss.ns[inode]; Node* nd = sec->pnode[inode]; restorenode(ns, nd); @@ -708,12 +708,11 @@ void SaveState::restore(int type) { if (type == 1) { return; } - PlayRecList* prl = net_cvode_instance_prl(); - // during a local step the PlayRecList is augmented with GLineRecord + std::vector* prl = net_cvode_instance_prl(); + // during a local step prl is augmented with GLineRecord // assert(nprs_ == prl->count()); - assert(nprs_ <= prl->count()); - int i; - for (i = 0; i < nprs_; ++i) { + assert(nprs_ <= prl->size()); + for (int i = 0; i < nprs_; ++i) { prs_[i]->savestate_restore(); } restorenet(); @@ -726,11 +725,9 @@ void SaveState::restore(int type) { } void SaveState::restorenode(NodeState& ns, Node* nd) { - NODEV(nd) = ns.v; - ; + nd->v() = ns.v; int istate = 0; - Prop* p; - for (p = nd->prop; p; p = p->next) { + for (Prop* p = nd->prop; p; p = p->next) { if (ssi[p->_type].size == 0) { continue; } @@ -745,7 +742,7 @@ void SaveState::restorenode(NodeState& ns, Node* nd) { #endif { for (int ip = ssi[type].offset; ip < max; ++ip) { - p->param[ip] = ns.state[istate++]; + p->param_legacy(ip) = ns.state[istate++]; } } } @@ -756,9 +753,8 @@ void SaveState::restoreacell(ACellState& ac, int type) { int sz = ssi[type].size; double* p = ac.state; for (int i = 0; i < ml.nodecount; ++i) { - double* d = ml._data[i]; for (int j = 0; j < sz; ++j) { - d[j] = (*p++); + ml.data(i, j) = (*p++); } } } @@ -933,27 +929,21 @@ void SaveState::write(OcFile* ocf, bool close) { } void SaveState::savenet() { - int i, j, n; - double* w; hoc_Item* q; - Object* ob; - NetCon* d; - PreSyn* ps; - i = 0; + int i = 0; ITERATE(q, nct->olist) { - ob = OBJ(q); - d = (NetCon*) ob->u.this_pointer; - n = ncs_[i].nstate; - w = ncs_[i].state; - for (j = 0; j < n; ++j) { + Object* ob = OBJ(q); + const NetCon* d = (NetCon*) ob->u.this_pointer; + int n = ncs_[i].nstate; + double* w = ncs_[i].state; + for (int j = 0; j < n; ++j) { w[j] = d->weight_[j]; } ++i; } - i = 0; - if (net_cvode_instance_psl()) + if (int i = 0; net_cvode_instance_psl()) { ITERATE(q, net_cvode_instance_psl()) { - ps = (PreSyn*) VOIDITM(q); + auto* ps = static_cast(VOIDITM(q)); ps->hi_index_ = i; pss_[i].flag = ps->flag_; pss_[i].valthresh = ps->valthresh_; @@ -961,6 +951,7 @@ void SaveState::savenet() { pss_[i].told = ps->told_; ++i; } + } alloc_tq(); tqcnt_ = 0; NrnThread* nt; @@ -984,29 +975,23 @@ void SaveState::tqsave(const TQItem* q, int) { } void SaveState::restorenet() { - int i, j, n; - double* w; - hoc_Item* q; - Object* ob; - NetCon* d; - PreSyn* ps; // NetCon's - i = 0; + int i = 0; + hoc_Item* q; ITERATE(q, nct->olist) { - ob = OBJ(q); - d = (NetCon*) ob->u.this_pointer; - n = ncs_[i].nstate; - w = ncs_[i].state; - for (j = 0; j < n; ++j) { + Object* ob = OBJ(q); + NetCon* d = (NetCon*) ob->u.this_pointer; + int n = ncs_[i].nstate; + const double* w = ncs_[i].state; + for (int j = 0; j < n; ++j) { d->weight_[j] = w[j]; } ++i; } // PreSyn's - i = 0; - if (net_cvode_instance_psl()) + if (int i = 0; net_cvode_instance_psl()) ITERATE(q, net_cvode_instance_psl()) { - ps = (PreSyn*) VOIDITM(q); + auto* ps = static_cast(VOIDITM(q)); ps->hi_index_ = i; ps->flag_ = pss_[i].flag; ps->valthresh_ = pss_[i].valthresh; @@ -1019,8 +1004,7 @@ void SaveState::restorenet() { // clear it clear_event_queue(); // restore it - n = tqs_->nstate; - for (i = 0; i < n; ++i) { + for (int i = 0; i < tqs_->nstate; ++i) { tqs_->items[i]->savestate_restore(tqs_->tdeliver[i], net_cvode_instance); } } @@ -1033,8 +1017,7 @@ void SaveState::readnet(FILE* f) { if (nncs_ != 0) { ncs_ = new NetConState[nncs_]; } - int i, n, type; - for (i = 0; i < nncs_; ++i) { + for (int i = 0; i < nncs_; ++i) { ASSERTfgets(buf, 200, f); sscanf(buf, "%d %d\n", &ncs_[i].object_index, &ncs_[i].nstate); if (ncs_[i].nstate) { @@ -1049,7 +1032,7 @@ void SaveState::readnet(FILE* f) { pss_ = new PreSynState[npss_]; ASSERTfread(pss_, sizeof(PreSynState), npss_, f); PreSyn* ps; - i = 0; + int i = 0; hoc_Item* q; if (net_cvode_instance_psl()) ITERATE(q, net_cvode_instance_psl()) { @@ -1060,6 +1043,7 @@ void SaveState::readnet(FILE* f) { assert(npss_ == i); } + int n = 0; ASSERTfgets(buf, 200, f); sscanf(buf, "%d\n", &n); tqs_->nstate = n; @@ -1067,9 +1051,10 @@ void SaveState::readnet(FILE* f) { tqs_->items = new DiscreteEvent*[n]; tqs_->tdeliver = new double[n]; ASSERTfread(tqs_->tdeliver, sizeof(double), n, f); - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { DiscreteEvent* de = NULL; ASSERTfgets(buf, 200, f); + int type = 0; sscanf(buf, "%d\n", &type); switch (type) { case DiscreteEventType: @@ -1104,8 +1089,7 @@ void SaveState::readnet(FILE* f) { void SaveState::writenet(FILE* f) { fprintf(f, "%d\n", nncs_); - int i, n; - for (i = 0; i < nncs_; ++i) { + for (int i = 0; i < nncs_; ++i) { fprintf(f, "%d %d\n", ncs_[i].object_index, ncs_[i].nstate); if (ncs_[i].nstate) { ASSERTfwrite(ncs_[i].state, sizeof(double), ncs_[i].nstate, f); @@ -1115,11 +1099,11 @@ void SaveState::writenet(FILE* f) { if (npss_) { ASSERTfwrite(pss_, sizeof(PreSynState), npss_, f); } - n = tqs_->nstate; + int n = tqs_->nstate; fprintf(f, "%d\n", n); if (n) { ASSERTfwrite(tqs_->tdeliver, sizeof(double), n, f); - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { tqs_->items[i]->savestate_write(f); } } @@ -1135,14 +1119,11 @@ bool SaveState::checknet(bool warn) { } return false; } - int i; hoc_Item* q; - Object* ob; - NetCon* d; - i = 0; + int i = 0; ITERATE(q, nct->olist) { - ob = OBJ(q); - d = (NetCon*) ob->u.this_pointer; + Object* ob = OBJ(q); + const auto* d = static_cast(ob->u.this_pointer); if (ob->index != ncs_[i].object_index) { if (warn) { fprintf(stderr, @@ -1187,14 +1168,11 @@ void SaveState::allocnet() { if (nncs_ != 0) { ncs_ = new NetConState[nncs_]; } - int i, n; hoc_Item* q; - Object* ob; - NetCon* d; - i = 0; + int i = 0; ITERATE(q, nct->olist) { - ob = OBJ(q); - d = (NetCon*) ob->u.this_pointer; + Object* ob = OBJ(q); + const auto* d = static_cast(ob->u.this_pointer); ncs_[i].object_index = ob->index; ncs_[i].nstate = d->cnt_; if (d->cnt_) { @@ -1202,11 +1180,10 @@ void SaveState::allocnet() { } ++i; } - PreSyn* ps; npss_ = 0; if (net_cvode_instance_psl()) ITERATE(q, net_cvode_instance_psl()) { - ps = (PreSyn*) VOIDITM(q); + auto* ps = static_cast(VOIDITM(q)); ps->hi_index_ = npss_; ++npss_; } @@ -1218,9 +1195,8 @@ void SaveState::allocnet() { // The event TQueue is highly volatile so it needs to be freed and allocated // on every save and fread void SaveState::free_tq() { - int i; if (tqs_->nstate) { - for (i = 0; i < tqs_->nstate; ++i) { + for (int i = 0; i < tqs_->nstate; ++i) { delete tqs_->items[i]; } tqs_->nstate = 0; @@ -1229,7 +1205,6 @@ void SaveState::free_tq() { } } void SaveState::alloc_tq() { - int n; free_tq(); tqcnt_ = 0; NrnThread* nt; @@ -1239,7 +1214,7 @@ void SaveState::alloc_tq() { callback_mode = 0; tq->forall_callback(tqcallback); } - n = tqcnt_; + int n = tqcnt_; tqs_->nstate = n; if (n) { tqs_->items = new DiscreteEvent*[n]; diff --git a/src/nrniv/secbrows.cpp b/src/nrniv/secbrows.cpp index 1e87e30fe2..89194dc299 100644 --- a/src/nrniv/secbrows.cpp +++ b/src/nrniv/secbrows.cpp @@ -16,10 +16,8 @@ #endif #include "nrnoc2iv.h" +#include "nrnpy.h" #include "membfunc.h" -void (*nrnpy_call_python_with_section)(Object*, Section*) = NULL; -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); //----------------------------------------- static double sb_select(void* v) { @@ -167,8 +165,8 @@ void OcSectionBrowser::accept() { } nrn_pushsec(psec_[i]); if (accept_is_pycallback_) { - if (nrnpy_call_python_with_section) { - (*nrnpy_call_python_with_section)(accept_pycallback_, psec_[i]); + if (neuron::python::methods.call_python_with_section) { + neuron::python::methods.call_python_with_section(accept_pycallback_, psec_[i]); } else { // should not be able to get here } @@ -224,8 +222,8 @@ void OcSectionBrowser::select(GlyphIndex i) { if (psec_[i]->prop) { nrn_pushsec(psec_[i]); if (select_is_pycallback_) { - if (nrnpy_call_python_with_section) { - (*nrnpy_call_python_with_section)(select_pycallback_, psec_[i]); + if (neuron::python::methods.call_python_with_section) { + neuron::python::methods.call_python_with_section(select_pycallback_, psec_[i]); } else { // should not be able to get here } diff --git a/src/nrniv/shape.cpp b/src/nrniv/shape.cpp index 01b94875c1..389c17b1e7 100644 --- a/src/nrniv/shape.cpp +++ b/src/nrniv/shape.cpp @@ -38,7 +38,6 @@ #include "ocobserv.h" #include "parse.hpp" #include "ivoc.h" -#include "treeset.h" #define Shape_Section_ "Section PlotShape" #define Shape_Rotate_ "Rotate3D PlotShape" @@ -181,9 +180,6 @@ bool OcShapeHandler::event(Event&) { } #endif // HAVE_IV -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - // Shape class registration for oc static double sh_view(void* v) { TRY_GUI_REDIRECT_ACTUAL_DOUBLE("Shape.view", v); @@ -882,7 +878,6 @@ ShapeScene::ShapeScene(SectionList* sl) r3b_ = new Rotate3Band(NULL, new RubberCallback(ShapeScene)(this, &ShapeScene::transform3d)); r3b_->ref(); observe(sl); - var_name_ = NULL; wk.style()->find_attribute("shape_beveljoin", beveljoin_); MenuItem* mi; @@ -965,9 +960,6 @@ ShapeScene::~ShapeScene() { Resource::unref(sg_); Resource::unref(r3b_); delete shape_changed_; - if (var_name_) { - delete var_name_; - } } void ShapeScene::erase_all() { @@ -1044,24 +1036,17 @@ PolyGlyph* ShapeScene::shape_section_list() { } void ShapeScene::name(const char* s) { - if (!var_name_) { - var_name_ = new CopyString(s); - } else { - *var_name_ = s; - } + var_name_ = s; } void ShapeScene::save_phase2(std::ostream& o) { - char buf[256]; - if (var_name_) { - if ((var_name_->string())[var_name_->length() - 1] == '.') { - Sprintf(buf, "%sappend(save_window_)", var_name_->string()); + if (!var_name_.empty()) { + if (var_name_.back() == '.') { + o << var_name_ << "append(save_window_)" << std::endl; } else { - Sprintf(buf, "%s = save_window_", var_name_->string()); + o << var_name_ << " = save_window_" << std::endl; } - o << buf << std::endl; - Sprintf(buf, "save_window_.save_name(\"%s\")", var_name_->string()); - o << buf << std::endl; + o << "save_window_.save_name(\"" << var_name_ << "\")" << std::endl; } Graph::save_phase2(o); } @@ -1274,8 +1259,6 @@ ShapeSection::ShapeSection(Section* sec) { section_ref(sec_); color_ = Scene::default_foreground(); color_->ref(); - old_ = NULL; - pvar_ = NULL; colorseg_ = NULL; colorseg_size_ = 0; scale(1.); @@ -1500,50 +1483,26 @@ bool ShapeSection::good() const { return sec_->prop != 0; } -void ShapeSection::update_ptrs() { - if (!pvar_) { - return; - } - int i, n = section()->nnode - 1; - for (i = 0; i < n; ++i) { - pvar_[i] = nrn_recalc_ptr(pvar_[i]); - } -} - void ShapeSection::set_range_variable(Symbol* sym) { clear_variable(); if (!good()) { return; } - int i, n = section()->nnode - 1; - pvar_ = new double*[n]; - old_ = new const Color*[n]; - bool any = false; - if (nrn_exists(sym, section()->pnode[0])) { - for (i = 0; i < n; ++i) { - pvar_[i] = static_cast( - nrn_rangepointer(section(), sym, nrn_arc_position(section(), section()->pnode[i]))); - old_[i] = NULL; - if (pvar_[i]) { - any = true; - } - } - } else { - for (i = 0; i < n; ++i) { - pvar_[i] = 0; - old_[i] = NULL; + auto* const sec = section(); + auto const n = sec->nnode - 1; + pvar_.clear(); + old_.clear(); + pvar_.resize(n); + old_.resize(n); + if (nrn_exists(sym, sec->pnode[0])) { + for (int i = 0; i < n; ++i) { + pvar_[i] = nrn_rangepointer(sec, sym, nrn_arc_position(sec, sec->pnode[i])); } } } void ShapeSection::clear_variable() { - if (pvar_) { - delete[] pvar_; - pvar_ = NULL; - } - if (old_) { - delete[] old_; - old_ = NULL; - } + pvar_.clear(); + old_.clear(); if (colorseg_) { for (int i = 0; i < colorseg_size_; ++i) { colorseg_[i]->unref(); @@ -1578,10 +1537,10 @@ xmin_, a.left(),ymin_,a.bottom(),xmax_,a.right()); void ShapeSection::fast_draw(Canvas* c, Coord x, Coord y, bool b) const { Section* sec = section(); IfIdraw(pict()); - if (pvar_ || (colorseg_ && colorseg_size_ == sec_->nnode - 1)) { + if (!pvar_.empty() || (colorseg_ && colorseg_size_ == sec_->nnode - 1)) { const Color* color; ColorValue* cv; - if (pvar_) { + if (!pvar_.empty()) { cv = ShapeScene::current_draw_scene()->color_value(); } if (sec->nnode == 2) { @@ -1595,7 +1554,7 @@ void ShapeSection::fast_draw(Canvas* c, Coord x, Coord y, bool b) const { } if (color != old_[0] || b) { b = true; - ((ShapeSection*) this)->old_[0] = color; + const_cast(this)->old_[0] = color; } } if (b) { @@ -1639,7 +1598,7 @@ void ShapeSection::fast_draw(Canvas* c, Coord x, Coord y, bool b) const { color = cv->no_value(); } if (color != old_[iseg] || b) { - ((ShapeSection*) this)->old_[iseg] = color; + const_cast(this)->old_[iseg] = color; b = true; } } @@ -1683,7 +1642,7 @@ void ShapeSection::fast_draw(Canvas* c, Coord x, Coord y, bool b) const { color = cv->no_value(); } if (color != old_[iseg] || b) { - ((ShapeSection*) this)->old_[iseg] = color; + const_cast(this)->old_[iseg] = color; b = true; } } diff --git a/src/nrniv/shape.h b/src/nrniv/shape.h index fc1658e8fe..fdf0ccfc67 100644 --- a/src/nrniv/shape.h +++ b/src/nrniv/shape.h @@ -80,7 +80,7 @@ class ShapeScene: public Graph { // entire neuron SectionHandler* section_handler_; PolyGlyph* sg_; Rotate3Band* r3b_; - CopyString* var_name_; + std::string var_name_; ShapeChangeObserver* shape_changed_; }; @@ -134,7 +134,6 @@ class ShapeSection: public FastShape { // single section Coord scale() { return len_scale_; } - void update_ptrs(); private: void trapezoid(Canvas*, const Color*, int i) const; @@ -146,11 +145,11 @@ class ShapeSection: public FastShape { // single section void fastidious_draw(Canvas*, const Color*, int, float, float) const; #endif private: - double** pvar_; + std::vector> pvar_; Section* sec_; Coord len_scale_; const Color* color_; - const Color** old_; + std::vector old_; const Color** colorseg_; int colorseg_size_; // so know when to unref colorseg_ items. Coord xmin_, xmax_, ymin_, ymax_; diff --git a/src/nrniv/shapeplt.cpp b/src/nrniv/shapeplt.cpp index 55483e759d..994cdddb11 100644 --- a/src/nrniv/shapeplt.cpp +++ b/src/nrniv/shapeplt.cpp @@ -1,6 +1,7 @@ #include <../../nrnconf.h> #include "classreg.h" #include "gui-redirect.h" +#include "ocnotify.h" #if HAVE_IV @@ -40,8 +41,8 @@ extern Symlist* hoc_built_in_symlist; #endif // HAVE_IV -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); +extern int hoc_return_type_code; + void* (*nrnpy_get_pyobj)(Object* obj) = 0; void (*nrnpy_decref)(void* pyobj) = 0; @@ -212,11 +213,29 @@ static double sh_printfile(void* v) { static double sh_show(void* v) { TRY_GUI_REDIRECT_ACTUAL_DOUBLE("PlotShape.show", v); + hoc_return_type_code = 1; #if HAVE_IV IFGUI ShapeScene* s = (ShapeScene*) v; - s->shape_type(int(chkarg(1, 0., 2.))); + if (ifarg(1)) { + s->shape_type(int(chkarg(1, 0., 2.))); + } else { + return s->shape_type(); + } +} +else { + if (ifarg(1)) { + ((ShapePlotData*) v)->set_mode(int(chkarg(1, 0., 2.))); + } else { + return ((ShapePlotData*) v)->get_mode(); + } ENDGUI +#else + if (ifarg(1)) { + ((ShapePlotData*) v)->set_mode(int(chkarg(1, 0., 2.))); + } else { + return ((ShapePlotData*) v)->get_mode(); + } #endif return 1.; } @@ -250,7 +269,7 @@ static double sh_hinton(void* v) { #if HAVE_IV IFGUI ShapeScene* ss = (ShapeScene*) v; - double* pd = hoc_pgetarg(1); + neuron::container::data_handle pd = hoc_hgetarg(1); double xsize = chkarg(4, 1e-9, 1e9); double ysize = xsize; if (ifarg(5)) { @@ -530,15 +549,6 @@ void ShapePlot::observe(SectionList* sl) { } } -void ShapePlot::update_ptrs() { - PolyGlyph* pg = shape_section_list(); - GlyphIndex i, cnt = pg->count(); - for (i = 0; i < cnt; ++i) { - ShapeSection* ss = (ShapeSection*) pg->component(i); - ss->update_ptrs(); - } -} - void ShapePlot::erase_all() { Resource::unref(spi_->colorbar_); spi_->colorbar_ = NULL; @@ -637,7 +647,7 @@ extern void mswin_delete_object(void*); void ShapePlot::draw(Canvas* c, const Allocation& a) const { if (spi_->fast_) { -#if defined(WIN32) || MAC +#if defined(WIN32) // win32 clipping is much more strict than X11 clipping even though the // implementations seem to agree that clipping is the intersection of // all clip requests on the clip stack in canvas. Clipping is originally @@ -655,9 +665,6 @@ void ShapePlot::draw(Canvas* c, const Allocation& a) const { XYView* v = XYView::current_draw_view(); c->push_clipping(true); -#if MAC - c->clip_rect(v->left(), v->bottom(), v->right(), v->top()); -#endif #if defined(WIN32) // Consider the commit message: // ------- @@ -695,13 +702,10 @@ void ShapePlot::draw(Canvas* c, const Allocation& a) const { ((FastShape*) (gi->body()))->fast_draw(c, x, y, false); } } -#if defined(WIN32) || MAC - c->pop_clipping(); #if defined(WIN32) + c->pop_clipping(); mswin_delete_object(new_clip); -#endif v->damage_all(); - ; #endif spi_->fast_ = false; } else { @@ -764,7 +768,7 @@ void ShapePlotImpl::select_variable() { sc->ref(); while (sc->post_for(XYView::current_pick_view()->canvas()->window())) { Symbol* s; - s = hoc_table_lookup(sc->selected()->string(), hoc_built_in_symlist); + s = hoc_table_lookup(sc->selected().c_str(), hoc_built_in_symlist); if (s) { sp_->variable(s); break; @@ -1158,21 +1162,23 @@ FastGraphItem::FastGraphItem(FastShape* g, bool s, bool p) FastShape::FastShape() {} FastShape::~FastShape() {} -Hinton::Hinton(double* pd, Coord xsize, Coord ysize, ShapeScene* ss) { +Hinton::Hinton(neuron::container::data_handle pd, + Coord xsize, + Coord ysize, + ShapeScene* ss) { pd_ = pd; old_ = NULL; // not referenced xsize_ = xsize / 2; ysize_ = ysize / 2; ss_ = ss; - Oc oc; - oc.notify_when_freed(pd_, this); + neuron::container::notify_when_handle_dies(pd_, this); } Hinton::~Hinton() { Oc oc; oc.notify_pointer_disconnect(this); } void Hinton::update(Observable*) { - pd_ = NULL; + pd_ = {}; ss_->remove(ss_->glyph_index(this)); } void Hinton::request(Requisition& req) const { @@ -1220,6 +1226,7 @@ ShapePlotData::ShapePlotData(Symbol* sym, Object* sl) { ++sl_->refcount; } varobj(NULL); + show_mode = 1; } ShapePlotData::~ShapePlotData() { @@ -1240,6 +1247,14 @@ float ShapePlotData::high() { return hi; } +int ShapePlotData::get_mode() { + return show_mode; +} + +void ShapePlotData::set_mode(int mode) { + show_mode = mode; +} + void ShapePlotData::scale(float min, float max) { lo = min; hi = max; @@ -1252,7 +1267,7 @@ void ShapePlotData::variable(Symbol* sym) { const char* ShapePlotData::varname() const { if (sym_ == NULL) { - return "v"; + return ""; } return sym_->name; } diff --git a/src/nrniv/shapeplt.h b/src/nrniv/shapeplt.h index 40b78ea71b..b488b35cbb 100644 --- a/src/nrniv/shapeplt.h +++ b/src/nrniv/shapeplt.h @@ -37,12 +37,15 @@ class ShapePlotData: public ShapePlotInterface { virtual float high(); virtual Object* neuron_section_list(); virtual bool has_iv_view(); + int get_mode(); + void set_mode(int mode); private: Symbol* sym_; float lo, hi; Object* sl_; void* py_var_; + int show_mode; }; #if HAVE_IV @@ -72,7 +75,6 @@ class ShapePlot: public ShapeScene, public ShapePlotInterface { virtual float high(); virtual bool has_iv_view(); virtual Object* neuron_section_list(); - void update_ptrs(); void has_iv_view(bool); private: @@ -107,7 +109,7 @@ class ColorValue: public Resource, public Observable { class Hinton: public Observer, public FastShape { public: - Hinton(double*, Coord xsize, Coord ysize, ShapeScene*); + Hinton(neuron::container::data_handle, Coord xsize, Coord ysize, ShapeScene*); virtual ~Hinton(); virtual void request(Requisition&) const; virtual void allocate(Canvas*, const Allocation&, Extension&); @@ -116,7 +118,7 @@ class Hinton: public Observer, public FastShape { virtual void update(Observable*); private: - double* pd_; + neuron::container::data_handle pd_{}; const Color* old_; Coord xsize_, ysize_; ShapeScene* ss_; diff --git a/src/nrniv/spaceplt.cpp b/src/nrniv/spaceplt.cpp index fde510c5c8..11fa8ab7c4 100644 --- a/src/nrniv/spaceplt.cpp +++ b/src/nrniv/spaceplt.cpp @@ -4,7 +4,6 @@ #include -#include #include #if HAVE_IV #include "graph.h" @@ -51,58 +50,29 @@ class RangeExpr { }; #if !HAVE_IV -class NoIVGraphVector { - public: - NoIVGraphVector(const char*); - virtual ~NoIVGraphVector(); +struct NoIVGraphVector { + NoIVGraphVector(const char* /* name */) {} + virtual ~NoIVGraphVector() {} void begin(); - void add(float, double*); + void add(float, neuron::container::data_handle); int count(); - CopyString name_; - int count_, size_; - double** py_; - float* x_; + std::vector x_{}; + std::vector> py_{}; }; -NoIVGraphVector::NoIVGraphVector(const char* name) { - name_ = name; - size_ = 0; - count_ = 0; - py_ = NULL; - x_ = NULL; -} -NoIVGraphVector::~NoIVGraphVector() { - if (py_) { - delete[] py_; - delete[] x_; - } -} int NoIVGraphVector::count() { - return count_; + auto const s = x_.size(); + assert(s == py_.size()); + return s; } void NoIVGraphVector::begin() { - count_ = 0; - if (!size_) { - size_ = 20; - py_ = new double*[size_]; - x_ = new float[size_]; - } -} -void NoIVGraphVector::add(float x, double* y) { - if (count_ == size_) { - size_ *= 2; - double** py = new double*[size_]; - float* px = new float[size_]; - for (int i = 0; i < count_; i++) { - py[i] = py_[i]; - px[i] = x_[i]; - } - delete[] py_; - delete[] x_; - py_ = py; - x_ = px; - } - py_[count_] = y; - x_[count_++] = x; + x_.clear(); + py_.clear(); + x_.reserve(20); + py_.reserve(20); +} +void NoIVGraphVector::add(float x, neuron::container::data_handle y) { + x_.push_back(x); + py_.push_back(std::move(y)); } #endif @@ -141,7 +111,7 @@ class RangeVarPlot: public NoIVGraphVector { Section *begin_section_, *end_section_; float x_begin_, x_end_, origin_; SecPosList* sec_list_; - CopyString expr_; + std::string expr_; int shape_changed_; int struc_changed_; double d2root_; // distance to root of closest point to root @@ -470,7 +440,7 @@ void RangeVarPlot::request(Requisition& req) const { void RangeVarPlot::save(std::ostream& o) { char buf[256]; o << "objectvar rvp_" << std::endl; - Sprintf(buf, "rvp_ = new RangeVarPlot(\"%s\")", expr_.string()); + Sprintf(buf, "rvp_ = new RangeVarPlot(\"%s\")", expr_.c_str()); o << buf << std::endl; Sprintf(buf, "%s rvp_.begin(%g)", hoc_section_pathname(begin_section_), x_begin_); o << buf << std::endl; @@ -544,16 +514,16 @@ void RangeVarPlot::fill_pointers() { if (rexp_) { rexp_->fill(); } else { - sscanf(expr_.string(), "%[^[]", buf); + sscanf(expr_.c_str(), "%[^[]", buf); sym = hoc_lookup(buf); if (!sym) { return; } - Sprintf(buf, "%s(hoc_ac_)", expr_.string()); + Sprintf(buf, "%s(hoc_ac_)", expr_.c_str()); } int noexist = 0; // don't plot single points that don't exist bool does_exist; - double* pval = NULL; + neuron::container::data_handle pval{}; for (long i = 0; i < xcnt; ++i) { Section* sec = (*sec_list_)[i].sec; hoc_ac_ = (*sec_list_)[i].x; @@ -566,12 +536,13 @@ void RangeVarPlot::fill_pointers() { } if (does_exist) { if (rexp_) { - pval = rexp_->pval(int(i)); + // TODO avoid conversion + pval = neuron::container::data_handle{rexp_->pval(int(i))}; } else { - pval = hoc_val_pointer(buf); + pval = hoc_val_handle(buf); } if (noexist > 1) { - add((*sec_list_)[i - 1].len + origin_, 0); + add((*sec_list_)[i - 1].len + origin_, {}); add((*sec_list_)[i - 1].len + origin_, pval); } if (i == 1 && noexist == 1) { @@ -582,7 +553,7 @@ void RangeVarPlot::fill_pointers() { } else { if (noexist == 1) { add((*sec_list_)[i - 1].len + origin_, pval); - add((*sec_list_)[i - 1].len + origin_, 0); + add((*sec_list_)[i - 1].len + origin_, {}); } if (i == xcnt - 1 && noexist == 0) { add((*sec_list_)[i].len + origin_, pval); diff --git a/src/nrniv/splitcell.cpp b/src/nrniv/splitcell.cpp index b21bbf7fc5..24ea32b034 100644 --- a/src/nrniv/splitcell.cpp +++ b/src/nrniv/splitcell.cpp @@ -18,7 +18,7 @@ setting up and transfer of matrix information. Note that gid information about the subtrees is no longer required by this implementation. */ -#if PARANEURON +#if NRNMPI void nrnmpi_split_clear(); extern void (*nrnmpi_splitcell_compute_)(); extern void nrnmpi_send_doubles(double*, int cnt, int dest, int tag); @@ -46,7 +46,7 @@ static double* transfer_p_[4]; // that_host must be adjacent to nrnmpi_myid void nrnmpi_splitcell_connect(int that_host) { -#if PARANEURON +#if NRNMPI Section* rootsec = chk_access(); if (std::abs(nrnmpi_myid - that_host) != 1) { hoc_execerror("cells may be split only on adjacent hosts", 0); @@ -76,7 +76,7 @@ void nrnmpi_splitcell_connect(int that_host) { #endif } -#if PARANEURON +#if NRNMPI void nrnmpi_split_clear() { if (nrnmpi_splitcell_compute_ == splitcell_compute) { diff --git a/src/nrniv/vrecord.cpp b/src/nrniv/vrecord.cpp index aa6ddef222..131595070c 100644 --- a/src/nrniv/vrecord.cpp +++ b/src/nrniv/vrecord.cpp @@ -1,7 +1,5 @@ #include <../../nrnconf.h> -#include -#include #if HAVE_IV #include "ivoc.h" #endif @@ -30,7 +28,6 @@ void nrn_vecsim_remove(void* v) { void nrn_vecsim_add(void* v, bool record) { IvocVect *yvec, *tvec, *dvec; extern short* nrn_is_artificial_; - double* pvar = NULL; char* s = NULL; double ddt; Object* ppobj = NULL; @@ -46,6 +43,7 @@ void nrn_vecsim_add(void* v, bool record) { hoc_execerror("Optional first arg is not a POINT_PROCESS", 0); } } + neuron::container::data_handle dh{}; if (record == false && hoc_is_str_arg(iarg + 1)) { // statement involving $1 // Vector.play("proced($1)", ...) s = gargstr(iarg + 1); @@ -63,7 +61,7 @@ void nrn_vecsim_add(void* v, bool record) { } else { // Vector.play(&SEClamp[0].amp1, ...) // Vector.record(&SEClamp[0].i, ...) - pvar = hoc_pgetarg(iarg + 1); + dh = hoc_hgetarg(iarg + 1); } tvec = NULL; dvec = NULL; @@ -95,13 +93,13 @@ void nrn_vecsim_add(void* v, bool record) { nrn_vecsim_remove(yvec); } if (tvec) { - new VecRecordDiscrete(pvar, yvec, tvec, ppobj); + new VecRecordDiscrete(std::move(dh), yvec, tvec, ppobj); } else if (ddt > 0.) { - new VecRecordDt(pvar, yvec, ddt, ppobj); - } else if (pvar == &t) { + new VecRecordDt(std::move(dh), yvec, ddt, ppobj); + } else if (static_cast(dh) == &t) { new TvecRecord(chk_access(), yvec, ppobj); } else { - new YvecRecord(pvar, yvec, ppobj); + new YvecRecord(std::move(dh), yvec, ppobj); } } else { if (con) { @@ -112,7 +110,7 @@ void nrn_vecsim_add(void* v, bool record) { if (s) { new VecPlayContinuous(s, yvec, tvec, dvec, ppobj); } else { - new VecPlayContinuous(pvar, yvec, tvec, dvec, ppobj); + new VecPlayContinuous(std::move(dh), yvec, tvec, dvec, ppobj); } } else { if (!tvec && ddt == -1.) { @@ -121,20 +119,24 @@ void nrn_vecsim_add(void* v, bool record) { if (s) { new VecPlayStep(s, yvec, tvec, ddt, ppobj); } else { - new VecPlayStep(pvar, yvec, tvec, ddt, ppobj); + new VecPlayStep(std::move(dh), yvec, tvec, ddt, ppobj); } } } } -VecPlayStep::VecPlayStep(double* pd, IvocVect* y, IvocVect* t, double dt, Object* ppobj) - : PlayRecord(pd, ppobj) { +VecPlayStep::VecPlayStep(neuron::container::data_handle dh, + IvocVect* y, + IvocVect* t, + double dt, + Object* ppobj) + : PlayRecord(std::move(dh), ppobj) { // printf("VecPlayStep\n"); init(y, t, dt); } VecPlayStep::VecPlayStep(const char* s, IvocVect* y, IvocVect* t, double dt, Object* ppobj) - : PlayRecord(&NODEV(chk_access()->pnode[0]), ppobj) { + : PlayRecord(chk_access()->pnode[0]->v_handle(), ppobj) { // printf("VecPlayStep\n"); init(y, t, dt); si_ = new StmtInfo(s); @@ -204,7 +206,14 @@ void VecPlayStep::deliver(double tt, NetCvode* ns) { si_->play_one(y_->elem(current_index_++)); nrn_hoc_unlock(); } else { - *pd_ = y_->elem(current_index_++); + auto const val = y_->elem(current_index_++); + if (pd_) { + *pd_ = val; + } else { + std::ostringstream oss; + oss << "VecPlayStep::deliver: invalid " << pd_; + throw std::runtime_error(std::move(oss).str()); + } } if (current_index_ < y_->size()) { if (t_) { @@ -223,12 +232,12 @@ void VecPlayStep::pr() { Printf("%s.x[%d]\n", hoc_object_name(y_->obj_), current_index_); } -VecPlayContinuous::VecPlayContinuous(double* pd, +VecPlayContinuous::VecPlayContinuous(neuron::container::data_handle pd, IvocVect* y, IvocVect* t, IvocVect* discon, Object* ppobj) - : PlayRecord(pd, ppobj) { + : PlayRecord(std::move(pd), ppobj) { // printf("VecPlayContinuous\n"); init(y, t, discon); } @@ -238,7 +247,7 @@ VecPlayContinuous::VecPlayContinuous(const char* s, IvocVect* t, IvocVect* discon, Object* ppobj) - : PlayRecord(&NODEV(chk_access()->pnode[0]), ppobj) { + : PlayRecord(chk_access()->pnode[0]->v_handle(), ppobj) { // printf("VecPlayContinuous\n"); init(y, t, discon); si_ = new StmtInfo(s); diff --git a/src/nrnmpi/bbsmpipack.cpp b/src/nrnmpi/bbsmpipack.cpp index c939cb852d..0112a9530c 100644 --- a/src/nrnmpi/bbsmpipack.cpp +++ b/src/nrnmpi/bbsmpipack.cpp @@ -12,9 +12,7 @@ #include #if NRNMPI -#if HAVE_STRING_H #include -#endif #include #include #include @@ -22,18 +20,7 @@ #include #include -#if 0 -#define guard(f) nrn_assert(f == MPI_SUCCESS) -#else -#define guard(f) \ - { \ - int _i = f; \ - if (_i != MPI_SUCCESS) { \ - printf("%s %d\n", #f, _i); \ - assert(0); \ - } \ - } -#endif +#define nrn_mpi_assert(arg) nrn_assert(arg == MPI_SUCCESS) #define nrnmpidebugleak 0 #define debug 0 @@ -71,7 +58,7 @@ static void unpack(void* buf, int count, int my_datatype, bbsmpibuf* r, const ch r->size); #endif assert(r->upkpos >= 0 && r->size >= r->upkpos); - guard(MPI_Unpack(r->buf, r->size, &r->upkpos, type, 2, MPI_INT, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Unpack(r->buf, r->size, &r->upkpos, type, 2, MPI_INT, nrn_bbs_comm)); #if debug printf("%d unpack r=%p size=%d upkpos=%d type[0]=%d datatype=%d type[1]=%d count=%d\n", nrnmpi_myid_bbs, @@ -95,7 +82,8 @@ static void unpack(void* buf, int count, int my_datatype, bbsmpibuf* r, const ch } assert(type[0] == my_datatype); assert(type[1] == count); - guard(MPI_Unpack(r->buf, r->size, &r->upkpos, buf, count, mytypes[my_datatype], nrn_bbs_comm)); + nrn_mpi_assert( + MPI_Unpack(r->buf, r->size, &r->upkpos, buf, count, mytypes[my_datatype], nrn_bbs_comm)); } void nrnmpi_upkbegin(bbsmpibuf* r) { @@ -113,12 +101,12 @@ void nrnmpi_upkbegin(bbsmpibuf* r) { hoc_execerror("subworld process with nhost > 0 cannot use", "the bulletin board"); } r->upkpos = 0; - guard(MPI_Unpack(r->buf, r->size, &r->upkpos, &p, 1, MPI_INT, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Unpack(r->buf, r->size, &r->upkpos, &p, 1, MPI_INT, nrn_bbs_comm)); if (p > r->size) { printf("\n %d nrnmpi_upkbegin keypos=%d size=%d\n", nrnmpi_myid_bbs, p, r->size); } assert(p <= r->size); - guard(MPI_Unpack(r->buf, r->size, &p, &type, 1, MPI_INT, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Unpack(r->buf, r->size, &p, &type, 1, MPI_INT, nrn_bbs_comm)); #if debug printf("%d nrnmpi_upkbegin type=%d keypos=%d\n", nrnmpi_myid_bbs, type, p); #endif @@ -232,7 +220,7 @@ void nrnmpi_pkbegin(bbsmpibuf* r) { printf( "%d nrnmpi_pkbegin %p size=%d pkposition=%d\n", nrnmpi_myid_bbs, r, r->size, r->pkposition); #endif - guard(MPI_Pack(&type, 1, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Pack(&type, 1, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); } void nrnmpi_enddata(bbsmpibuf* r) { @@ -242,7 +230,7 @@ void nrnmpi_enddata(bbsmpibuf* r) { #if debug printf("%d nrnmpi_enddata %p size=%d pkposition=%d\n", nrnmpi_myid_bbs, r, r->size, p); #endif - guard(MPI_Pack_size(1, MPI_INT, nrn_bbs_comm, &isize)); + nrn_mpi_assert(MPI_Pack_size(1, MPI_INT, nrn_bbs_comm, &isize)); oldsize = r->size; resize(r, r->pkposition + isize); #if debug @@ -250,7 +238,7 @@ void nrnmpi_enddata(bbsmpibuf* r) { printf("%d %p need %d more. end up with total of %d\n", nrnmpi_myid_bbs, r, isize, r->size); } #endif - guard(MPI_Pack(&type, 1, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Pack(&type, 1, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); #if debug printf("%d nrnmpi_enddata buf=%p size=%d pkposition=%d\n", nrnmpi_myid_bbs, @@ -258,7 +246,7 @@ void nrnmpi_enddata(bbsmpibuf* r) { r->size, r->pkposition); #endif - guard(MPI_Pack(&p, 1, MPI_INT, r->buf, r->size, &type, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Pack(&p, 1, MPI_INT, r->buf, r->size, &type, nrn_bbs_comm)); #if debug printf("%d after nrnmpi_enddata, %d was packed at beginning and 0 was packed before %d\n", nrnmpi_myid_bbs, @@ -280,8 +268,8 @@ static void pack(void* inbuf, int incount, int my_datatype, bbsmpibuf* r, const r->pkposition, e); #endif - guard(MPI_Pack_size(incount, mytypes[my_datatype], nrn_bbs_comm, &dsize)); - guard(MPI_Pack_size(2, MPI_INT, nrn_bbs_comm, &isize)); + nrn_mpi_assert(MPI_Pack_size(incount, mytypes[my_datatype], nrn_bbs_comm, &dsize)); + nrn_mpi_assert(MPI_Pack_size(2, MPI_INT, nrn_bbs_comm, &isize)); oldsize = r->size; resize(r, r->pkposition + dsize + isize); #if debug @@ -295,8 +283,8 @@ static void pack(void* inbuf, int incount, int my_datatype, bbsmpibuf* r, const #endif type[0] = my_datatype; type[1] = incount; - guard(MPI_Pack(type, 2, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); - guard(MPI_Pack( + nrn_mpi_assert(MPI_Pack(type, 2, MPI_INT, r->buf, r->size, &r->pkposition, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Pack( inbuf, incount, mytypes[my_datatype], r->buf, r->size, &r->pkposition, nrn_bbs_comm)); #if debug printf("%d pack done pkposition=%d\n", nrnmpi_myid_bbs, r->pkposition); @@ -359,9 +347,9 @@ void nrnmpi_bbssend(int dest, int tag, bbsmpibuf* r) { if (r) { assert(r->buf && r->keypos <= r->size); - guard(MPI_Send(r->buf, r->size, MPI_PACKED, dest, tag, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Send(r->buf, r->size, MPI_PACKED, dest, tag, nrn_bbs_comm)); } else { - guard(MPI_Send(NULL, 0, MPI_PACKED, dest, tag, nrn_bbs_comm)); + nrn_mpi_assert(MPI_Send(nullptr, 0, MPI_PACKED, dest, tag, nrn_bbs_comm)); } errno = 0; #if debug @@ -378,8 +366,8 @@ int nrnmpi_bbsrecv(int source, bbsmpibuf* r) { #if debug printf("%d nrnmpi_bbsrecv %p\n", nrnmpi_myid_bbs, r); #endif - guard(MPI_Probe(source, MPI_ANY_TAG, nrn_bbs_comm, &status)); - guard(MPI_Get_count(&status, MPI_PACKED, &size)); + nrn_mpi_assert(MPI_Probe(source, MPI_ANY_TAG, nrn_bbs_comm, &status)); + nrn_mpi_assert(MPI_Get_count(&status, MPI_PACKED, &size)); #if debug printf("%d nrnmpi_bbsrecv probe size=%d source=%d tag=%d\n", nrnmpi_myid_bbs, @@ -388,7 +376,8 @@ int nrnmpi_bbsrecv(int source, bbsmpibuf* r) { status.MPI_TAG); #endif resize(r, size); - guard(MPI_Recv(r->buf, r->size, MPI_PACKED, source, MPI_ANY_TAG, nrn_bbs_comm, &status)); + nrn_mpi_assert( + MPI_Recv(r->buf, r->size, MPI_PACKED, source, MPI_ANY_TAG, nrn_bbs_comm, &status)); errno = 0; /* Some MPI implementations limit tags to be less than full MPI_INT domain In the past we allowed TODO mesages to have tags > 20 (FIRSTID of src/parallel/bbssrv.h) @@ -427,14 +416,14 @@ int nrnmpi_bbssendrecv(int dest, int tag, bbsmpibuf* s, bbsmpibuf* r) { int nrnmpi_iprobe(int* size, int* tag, int* source) { int flag = 0; MPI_Status status; - guard(MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, nrn_bbs_comm, &flag, &status)); + nrn_mpi_assert(MPI_Iprobe(MPI_ANY_SOURCE, MPI_ANY_TAG, nrn_bbs_comm, &flag, &status)); if (flag) { if (source) *source = status.MPI_SOURCE; if (tag) *tag = status.MPI_TAG; if (size) - guard(MPI_Get_count(&status, MPI_PACKED, size)); + nrn_mpi_assert(MPI_Get_count(&status, MPI_PACKED, size)); } return flag; } @@ -442,13 +431,13 @@ int nrnmpi_iprobe(int* size, int* tag, int* source) { void nrnmpi_probe(int* size, int* tag, int* source) { int flag = 0; MPI_Status status; - guard(MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, nrn_bbs_comm, &status)); + nrn_mpi_assert(MPI_Probe(MPI_ANY_SOURCE, MPI_ANY_TAG, nrn_bbs_comm, &status)); if (source) *source = status.MPI_SOURCE; if (tag) *tag = status.MPI_TAG; if (size) - guard(MPI_Get_count(&status, MPI_PACKED, size)); + nrn_mpi_assert(MPI_Get_count(&status, MPI_PACKED, size)); } bbsmpibuf* nrnmpi_newbuf(int size) { diff --git a/src/nrnmpi/memory_usage.cpp b/src/nrnmpi/memory_usage.cpp new file mode 100644 index 0000000000..0583c693b7 --- /dev/null +++ b/src/nrnmpi/memory_usage.cpp @@ -0,0 +1,50 @@ +/* do not want the redef in the dynamic load case */ +#include + +#if NRNMPI_DYNAMICLOAD +#include +#endif + +#include + +#if NRNMPI +#include +#include +#include "nrnmpi_impl.h" + +#include + +#include "neuron/container/memory_usage.hpp" + +static void sum_reduce_memory_usage(void* invec, void* inoutvec, int* len_, MPI_Datatype*) { + int len = *len_; + + auto a = static_cast(invec); + auto ab = static_cast(inoutvec); + + for (int i = 0; i < len; ++i) { + ab[i] += a[i]; + } +} + +void nrnmpi_memory_stats(neuron::container::MemoryStats& stats, + neuron::container::MemoryUsage const& local_memory_usage) { + MPI_Op op; + MPI_Op_create(sum_reduce_memory_usage, /* commute = */ 1, &op); + + MPI_Datatype memory_usage_mpitype; + MPI_Type_contiguous(sizeof(neuron::container::MemoryUsage), MPI_BYTE, &memory_usage_mpitype); + MPI_Type_commit(&memory_usage_mpitype); + + MPI_Allreduce(&local_memory_usage, &stats.total, 1, memory_usage_mpitype, op, nrnmpi_comm); + + MPI_Op_free(&op); + MPI_Type_free(&memory_usage_mpitype); +} + +void nrnmpi_print_memory_stats(neuron::container::MemoryStats const& memory_stats) { + if (nrnmpi_myid_world == 0) { + std::cout << format_memory_usage(memory_stats.total) << "\n"; + } +} +#endif diff --git a/src/nrnmpi/mkdynam.sh b/src/nrnmpi/mkdynam.sh index 4d196c95fb..dd063dc6ff 100644 --- a/src/nrnmpi/mkdynam.sh +++ b/src/nrnmpi/mkdynam.sh @@ -1,5 +1,7 @@ #!/usr/bin/env bash +set -e + names=`sed -n ' /extern /s/extern [a-z*]* \(nrnmpi_[a-zA-Z0-9_]*\)(.*);/\1/p ' nrnmpidec.h` @@ -10,9 +12,12 @@ sed -n ' /extern [^v]/s/extern \([a-z*]*\) \(nrnmpi_[a-zA-Z0-9_]*\)\(.*\);/\1 \2\3 {@ return (*p_\2)\3;@}/p ' nrnmpidec.h | tr '@' '\n' | sed ' /p_nrnmpi/ { -s/, [a-zA-Z0-9_*]* /, /g -s/)([a-zA-Z_0-9*]* /)(/ +s/, [a-zA-Z0-9_:*&]* /, /g +s/)([a-zA-Z0-9_:*&]* /)(/ +s/const& //g s/char\* //g +s/char\*\* //g +s/std::string& //g } '> nrnmpi_dynam_wrappers.inc diff --git a/src/nrnmpi/mpispike.cpp b/src/nrnmpi/mpispike.cpp index 0dd9a58b41..c84e2d0ae1 100644 --- a/src/nrnmpi/mpispike.cpp +++ b/src/nrnmpi/mpispike.cpp @@ -20,6 +20,11 @@ #include "mpispike.h" #include +#include +#include + +#define nrn_mpi_assert(arg) nrn_assert(arg == MPI_SUCCESS) + extern void nrnbbs_context_wait(); static int np; @@ -63,7 +68,7 @@ void nrnmpi_spike_initialize() { static MPI_Datatype spikebuf_type; -static void make_spikebuf_type() { +static void make_spikebuf_type(int* nout_) { NRNMPI_Spikebuf s; int block_lengths[3]; MPI_Aint displacements[3]; @@ -92,7 +97,12 @@ static void make_spikebuf_type() { } #endif -int nrnmpi_spike_exchange() { +int nrnmpi_spike_exchange(int* ovfl, + int* nout_, + int* nin_, + NRNMPI_Spike* spikeout_, + NRNMPI_Spike** spikein_, + int* icapacity_) { int i, n, novfl, n1; if (!displs) { np = nrnmpi_numprocs; @@ -100,26 +110,26 @@ int nrnmpi_spike_exchange() { hoc_malchk(); displs[0] = 0; #if nrn_spikebuf_size > 0 - make_spikebuf_type(); + make_spikebuf_type(nout_); #endif } nrnbbs_context_wait(); #if nrn_spikebuf_size == 0 - MPI_Allgather(&nout_, 1, MPI_INT, nin_, 1, MPI_INT, nrnmpi_comm); + MPI_Allgather(nout_, 1, MPI_INT, nin_, 1, MPI_INT, nrnmpi_comm); n = nin_[0]; for (i = 1; i < np; ++i) { displs[i] = n; n += nin_[i]; } if (n) { - if (icapacity_ < n) { - icapacity_ = n + 10; - free(spikein_); - spikein_ = (NRNMPI_Spike*) hoc_Emalloc(icapacity_ * sizeof(NRNMPI_Spike)); + if (*icapacity_ < n) { + *icapacity_ = n + 10; + free(*spikein_); + *spikein_ = (NRNMPI_Spike*) hoc_Emalloc(*icapacity_ * sizeof(NRNMPI_Spike)); hoc_malchk(); } MPI_Allgatherv( - spikeout_, nout_, spike_type, spikein_, nin_, displs, spike_type, nrnmpi_comm); + spikeout_, *nout_, spike_type, *spikein_, nin_, displs, spike_type, nrnmpi_comm); } #else MPI_Allgather(spbufout_, 1, spikebuf_type, spbufin_, 1, spikebuf_type, nrnmpi_comm); @@ -143,16 +153,16 @@ int nrnmpi_spike_exchange() { } } if (novfl) { - if (icapacity_ < novfl) { - icapacity_ = novfl + 10; - free(spikein_); - spikein_ = (NRNMPI_Spike*) hoc_Emalloc(icapacity_ * sizeof(NRNMPI_Spike)); + if (*icapacity_ < novfl) { + *icapacity_ = novfl + 10; + free(*spikein_); + *spikein_ = (NRNMPI_Spike*) hoc_Emalloc(*icapacity_ * sizeof(NRNMPI_Spike)); hoc_malchk(); } - n1 = (nout_ > nrn_spikebuf_size) ? nout_ - nrn_spikebuf_size : 0; - MPI_Allgatherv(spikeout_, n1, spike_type, spikein_, nin_, displs, spike_type, nrnmpi_comm); + n1 = (*nout_ > nrn_spikebuf_size) ? *nout_ - nrn_spikebuf_size : 0; + MPI_Allgatherv(spikeout_, n1, spike_type, *spikein_, nin_, displs, spike_type, nrnmpi_comm); } - ovfl_ = novfl; + *ovfl = novfl; #endif return n; } @@ -175,7 +185,15 @@ a sequence of spiketime, localgid pairs. There are nspike of them. The allgather sends the first part of the buf and the allgatherv buffer sends any overflow. */ -int nrnmpi_spike_exchange_compressed() { +int nrnmpi_spike_exchange_compressed(int localgid_size, + int ag_send_size, + int ag_send_nspike, + int* ovfl_capacity, + int* ovfl, + unsigned char* spfixout, + unsigned char* spfixin, + unsigned char** spfixin_ovfl, + int* nin_) { int i, novfl, n, ntot, idx, bs, bstot; /* n is #spikes, bs is #byte overflow */ if (!displs) { np = nrnmpi_numprocs; @@ -189,52 +207,51 @@ int nrnmpi_spike_exchange_compressed() { } nrnbbs_context_wait(); - MPI_Allgather( - spfixout_, ag_send_size_, MPI_BYTE, spfixin_, ag_send_size_, MPI_BYTE, nrnmpi_comm); + MPI_Allgather(spfixout, ag_send_size, MPI_BYTE, spfixin, ag_send_size, MPI_BYTE, nrnmpi_comm); novfl = 0; ntot = 0; bstot = 0; for (i = 0; i < np; ++i) { displs[i] = bstot; - idx = i * ag_send_size_; - n = spfixin_[idx++] * 256; - n += spfixin_[idx++]; + idx = i * ag_send_size; + n = spfixin[idx++] * 256; + n += spfixin[idx++]; ntot += n; nin_[i] = n; - if (n > ag_send_nspike_) { - bs = 2 + n * (1 + localgid_size_) - ag_send_size_; + if (n > ag_send_nspike) { + bs = 2 + n * (1 + localgid_size) - ag_send_size; byteovfl[i] = bs; bstot += bs; - novfl += n - ag_send_nspike_; + novfl += n - ag_send_nspike; } else { byteovfl[i] = 0; } } if (novfl) { - if (ovfl_capacity_ < novfl) { - ovfl_capacity_ = novfl + 10; - free(spfixin_ovfl_); - spfixin_ovfl_ = (unsigned char*) hoc_Emalloc(ovfl_capacity_ * (1 + localgid_size_) * + if (*ovfl_capacity < novfl) { + *ovfl_capacity = novfl + 10; + free(*spfixin_ovfl); + *spfixin_ovfl = (unsigned char*) hoc_Emalloc(*ovfl_capacity * (1 + localgid_size) * sizeof(unsigned char)); hoc_malchk(); } bs = byteovfl[nrnmpi_myid]; /* - note that the spfixout_ buffer is one since the overflow - is contiguous to the first part. But the spfixin_ovfl_ is - completely separate from the spfixin_ since the latter + note that the spfixout buffer is one since the overflow + is contiguous to the first part. But the spfixin_ovfl is + completely separate from the spfixin since the latter dynamically changes its size during a run. */ - MPI_Allgatherv(spfixout_ + ag_send_size_, + MPI_Allgatherv(spfixout + ag_send_size, bs, MPI_BYTE, - spfixin_ovfl_, + *spfixin_ovfl, byteovfl, displs, MPI_BYTE, nrnmpi_comm); } - ovfl_ = novfl; + *ovfl = novfl; return ntot; } @@ -271,13 +288,10 @@ static int MPI_Alltoallv_sparse(void* sendbuf, int* rdispls, MPI_Datatype recvtype, MPI_Comm comm) { - int status; int myrank; int nranks; - status = MPI_Comm_rank(comm, &myrank); - assert(status == MPI_SUCCESS); - status = MPI_Comm_size(comm, &nranks); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Comm_rank(comm, &myrank)); + nrn_mpi_assert(MPI_Comm_size(comm, &nranks)); int rankp; for (rankp = 0; nranks > (1 << rankp); rankp++) @@ -287,10 +301,8 @@ static int MPI_Alltoallv_sparse(void* sendbuf, ptrdiff_t send_elsize; ptrdiff_t recv_elsize; - status = MPI_Type_get_extent(sendtype, &lb, &send_elsize); - assert(status == MPI_SUCCESS); - status = MPI_Type_get_extent(recvtype, &lb, &recv_elsize); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Type_get_extent(sendtype, &lb, &send_elsize)); + nrn_mpi_assert(MPI_Type_get_extent(recvtype, &lb, &recv_elsize)); MPI_Request* requests = (MPI_Request*) hoc_Emalloc(nranks * 2 * sizeof(MPI_Request)); hoc_malchk(); @@ -306,18 +318,16 @@ static int MPI_Alltoallv_sparse(void* sendbuf, continue; if (recvcnts[target] == 0) continue; - status = MPI_Irecv((static_cast(recvbuf)) + recv_elsize * rdispls[target], - recvcnts[target], - recvtype, - target, - ALLTOALLV_SPARSE_TAG, - comm, - &requests[n_requests++]); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Irecv((static_cast(recvbuf)) + recv_elsize * rdispls[target], + recvcnts[target], + recvtype, + target, + ALLTOALLV_SPARSE_TAG, + comm, + &requests[n_requests++])); } - status = MPI_Barrier(comm); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Barrier(comm)); for (ngrp = 0; ngrp < (1 << rankp); ngrp++) { int target = myrank ^ ngrp; @@ -325,22 +335,19 @@ static int MPI_Alltoallv_sparse(void* sendbuf, continue; if (sendcnts[target] == 0) continue; - status = MPI_Isend((static_cast(sendbuf)) + send_elsize * sdispls[target], - sendcnts[target], - sendtype, - target, - ALLTOALLV_SPARSE_TAG, - comm, - &requests[n_requests++]); - assert(status == MPI_SUCCESS); - } - - status = MPI_Waitall(n_requests, requests, MPI_STATUSES_IGNORE); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Isend((static_cast(sendbuf)) + send_elsize * sdispls[target], + sendcnts[target], + sendtype, + target, + ALLTOALLV_SPARSE_TAG, + comm, + &requests[n_requests++])); + } + + nrn_mpi_assert(MPI_Waitall(n_requests, requests, MPI_STATUSES_IGNORE)); free(requests); - status = MPI_Barrier(comm); - assert(status == MPI_SUCCESS); + nrn_mpi_assert(MPI_Barrier(comm)); return MPI_SUCCESS; } @@ -478,22 +485,15 @@ void nrnmpi_char_broadcast(char* buf, int cnt, int root) { MPI_Bcast(buf, cnt, MPI_CHAR, root, nrnmpi_comm); } -void nrnmpi_char_broadcast_world(char** pstr, int root) { - int sz; - sz = *pstr ? (strlen(*pstr) + 1) : 0; +void nrnmpi_str_broadcast_world(std::string& str, int root) { + assert(str.size() <= std::numeric_limits::max()); + // broadcast the size from `root` to everyone + int sz = str.size(); MPI_Bcast(&sz, 1, MPI_INT, root, nrnmpi_world_comm); - if (nrnmpi_myid_world != root) { - if (*pstr) { - free(*pstr); - *pstr = NULL; - } - if (sz) { - *pstr = static_cast(hoc_Emalloc(sz * sizeof(char))); - hoc_malchk(); - } - } + // resize to the size we received from root + str.resize(sz); if (sz) { - MPI_Bcast(*pstr, sz, MPI_CHAR, root, nrnmpi_world_comm); + MPI_Bcast(str.data(), sz, MPI_CHAR, root, nrnmpi_world_comm); } } @@ -617,83 +617,58 @@ void nrnmpi_barrier() { MPI_Barrier(nrnmpi_comm); } -double nrnmpi_dbl_allreduce(double x, int type) { - double result; - MPI_Op t; - if (nrnmpi_numprocs < 2) { - return x; - } +static MPI_Op type2OP(int type) { if (type == 1) { - t = MPI_SUM; + return MPI_SUM; } else if (type == 2) { - t = MPI_MAX; + return MPI_MAX; } else { - t = MPI_MIN; + return MPI_MIN; } - MPI_Allreduce(&x, &result, 1, MPI_DOUBLE, t, nrnmpi_comm); +} + +double nrnmpi_dbl_allreduce(double x, int type) { + if (nrnmpi_numprocs < 2) { + return x; + } + double result; + MPI_Allreduce(&x, &result, 1, MPI_DOUBLE, type2OP(type), nrnmpi_comm); return result; } extern "C" void nrnmpi_dbl_allreduce_vec(double* src, double* dest, int cnt, int type) { - int i; - MPI_Op t; assert(src != dest); if (nrnmpi_numprocs < 2) { - for (i = 0; i < cnt; ++i) { + for (int i = 0; i < cnt; ++i) { dest[i] = src[i]; } return; } - if (type == 1) { - t = MPI_SUM; - } else if (type == 2) { - t = MPI_MAX; - } else { - t = MPI_MIN; - } - MPI_Allreduce(src, dest, cnt, MPI_DOUBLE, t, nrnmpi_comm); + MPI_Allreduce(src, dest, cnt, MPI_DOUBLE, type2OP(type), nrnmpi_comm); return; } void nrnmpi_longdbl_allreduce_vec(longdbl* src, longdbl* dest, int cnt, int type) { - int i; - MPI_Op t; assert(src != dest); if (nrnmpi_numprocs < 2) { - for (i = 0; i < cnt; ++i) { + for (int i = 0; i < cnt; ++i) { dest[i] = src[i]; } return; } - if (type == 1) { - t = MPI_SUM; - } else if (type == 2) { - t = MPI_MAX; - } else { - t = MPI_MIN; - } - MPI_Allreduce(src, dest, cnt, MPI_LONG_DOUBLE, t, nrnmpi_comm); + MPI_Allreduce(src, dest, cnt, MPI_LONG_DOUBLE, type2OP(type), nrnmpi_comm); return; } void nrnmpi_long_allreduce_vec(long* src, long* dest, int cnt, int type) { - int i; - MPI_Op t; assert(src != dest); if (nrnmpi_numprocs < 2) { - for (i = 0; i < cnt; ++i) { + for (int i = 0; i < cnt; ++i) { dest[i] = src[i]; } return; } - if (type == 1) { - t = MPI_SUM; - } else if (type == 2) { - t = MPI_MAX; - } else { - t = MPI_MIN; - } - MPI_Allreduce(src, dest, cnt, MPI_LONG, t, nrnmpi_comm); + MPI_Allreduce(src, dest, cnt, MPI_LONG, type2OP(type), nrnmpi_comm); return; } diff --git a/src/nrnmpi/mpispike.h b/src/nrnmpi/mpispike.h index b96263cf4a..d9d031810a 100644 --- a/src/nrnmpi/mpispike.h +++ b/src/nrnmpi/mpispike.h @@ -13,35 +13,6 @@ typedef struct { } NRNMPI_Spikebuf; #endif - -#define icapacity_ nrnmpi_i_capacity_ -#define spikeout_ nrnmpi_spikeout_ -#define spikein_ nrnmpi_spikein_ -#define nout_ nrnmpi_nout_ -#define nin_ nrnmpi_nin_ -extern int nout_; -extern int* nin_; -extern int icapacity_; -extern NRNMPI_Spike* spikeout_; -extern NRNMPI_Spike* spikein_; - -#define spfixout_ nrnmpi_spikeout_fixed_ -#define spfixin_ nrnmpi_spikein_fixed_ -#define spfixin_ovfl_ nrnmpi_spikein_fixed_ovfl_ -#define localgid_size_ nrnmpi_localgid_size_ -#define ag_send_size_ nrnmpi_ag_send_size_ -#define ag_send_nspike_ nrnmpi_send_nspike_ -#define ovfl_capacity_ nrnmpi_ovfl_capacity_ -#define ovfl_ nrnmpi_ovfl_ -extern int localgid_size_; /* bytes */ -extern int ag_send_size_; /* bytes */ -extern int ag_send_nspike_; /* spikes */ -extern int ovfl_capacity_; /* spikes */ -extern int ovfl_; /* spikes */ -extern unsigned char* spfixout_; -extern unsigned char* spfixin_; -extern unsigned char* spfixin_ovfl_; - #if nrn_spikebuf_size > 0 #define spbufout_ nrnmpi_spbufout_ #define spbufin_ nrnmpi_spbufin_ diff --git a/src/nrnmpi/nrnmpi.cpp b/src/nrnmpi/nrnmpi.cpp index 5cce25fd6e..386e96ac89 100644 --- a/src/nrnmpi/nrnmpi.cpp +++ b/src/nrnmpi/nrnmpi.cpp @@ -24,11 +24,7 @@ extern double nrn_timeus(); #if NRNMPI #include -#define asrt(arg) nrn_assert(arg == MPI_SUCCESS) -#define USE_HPM 0 -#if USE_HPM -#include -#endif +#define nrn_mpi_assert(arg) nrn_assert(arg == MPI_SUCCESS) #if NRN_MUSIC #include "nrnmusicapi.h" @@ -42,6 +38,10 @@ MPI_Comm nrn_bbs_comm; static MPI_Group grp_bbs; static MPI_Group grp_net; +static int nrnmpi_numprocs_subworld = 1; +static int nrnmpi_subworld_id = -1; +static int nrnmpi_subworld_change_cnt = 0; + extern void nrnmpi_spike_initialize(); #define nrnmpidebugleak 0 @@ -114,12 +114,12 @@ for (i=0; i < *pargc; ++i) { #if (NRN_ENABLE_THREADS) int required = MPI_THREAD_SERIALIZED; int provided; - asrt(MPI_Init_thread(pargc, pargv, required, &provided)); + nrn_mpi_assert(MPI_Init_thread(pargc, pargv, required, &provided)); if (required > provided) { nrn_cannot_use_threads_and_mpi = 1; } #else - asrt(MPI_Init(pargc, pargv)); + nrn_mpi_assert(MPI_Init(pargc, pargv)); #endif nrnmpi_under_nrncontrol_ = 1; #if NRN_MUSIC @@ -130,20 +130,20 @@ for (i=0; i < *pargc; ++i) { #if NRN_MUSIC if (nrnmusic) { - asrt(MPI_Comm_dup(nrnmusic_comm, &nrnmpi_world_comm)); + nrn_mpi_assert(MPI_Comm_dup(nrnmusic_comm, &nrnmpi_world_comm)); } else { #else { #endif - asrt(MPI_Comm_dup(MPI_COMM_WORLD, &nrnmpi_world_comm)); + nrn_mpi_assert(MPI_Comm_dup(MPI_COMM_WORLD, &nrnmpi_world_comm)); } } grp_bbs = MPI_GROUP_NULL; grp_net = MPI_GROUP_NULL; - asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm)); - asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm)); - asrt(MPI_Comm_rank(nrnmpi_world_comm, &nrnmpi_myid_world)); - asrt(MPI_Comm_size(nrnmpi_world_comm, &nrnmpi_numprocs_world)); + nrn_mpi_assert(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm)); + nrn_mpi_assert(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm)); + nrn_mpi_assert(MPI_Comm_rank(nrnmpi_world_comm, &nrnmpi_myid_world)); + nrn_mpi_assert(MPI_Comm_size(nrnmpi_world_comm, &nrnmpi_numprocs_world)); nrnmpi_numprocs = nrnmpi_numprocs_bbs = nrnmpi_numprocs_world; nrnmpi_myid = nrnmpi_myid_bbs = nrnmpi_myid_world; nrnmpi_spike_initialize(); @@ -235,44 +235,44 @@ void nrnmpi_subworld_size(int n) { return; } if (nrnmpi_comm != MPI_COMM_NULL) { - asrt(MPI_Comm_free(&nrnmpi_comm)); + nrn_mpi_assert(MPI_Comm_free(&nrnmpi_comm)); nrnmpi_comm = MPI_COMM_NULL; } if (nrn_bbs_comm != MPI_COMM_NULL) { - asrt(MPI_Comm_free(&nrn_bbs_comm)); + nrn_mpi_assert(MPI_Comm_free(&nrn_bbs_comm)); nrn_bbs_comm = MPI_COMM_NULL; } if (grp_bbs != MPI_GROUP_NULL) { - asrt(MPI_Group_free(&grp_bbs)); + nrn_mpi_assert(MPI_Group_free(&grp_bbs)); grp_bbs = MPI_GROUP_NULL; } if (grp_net != MPI_GROUP_NULL) { - asrt(MPI_Group_free(&grp_net)); + nrn_mpi_assert(MPI_Group_free(&grp_net)); grp_net = MPI_GROUP_NULL; } MPI_Group wg; - asrt(MPI_Comm_group(nrnmpi_world_comm, &wg)); + nrn_mpi_assert(MPI_Comm_group(nrnmpi_world_comm, &wg)); int r = nrnmpi_myid_world; /* special cases */ if (n == 1) { - asrt(MPI_Group_incl(wg, 1, &r, &grp_net)); - asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm)); - asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm)); - asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); - asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); - asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); - asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); + nrn_mpi_assert(MPI_Group_incl(wg, 1, &r, &grp_net)); + nrn_mpi_assert(MPI_Comm_dup(nrnmpi_world_comm, &nrn_bbs_comm)); + nrn_mpi_assert(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm)); + nrn_mpi_assert(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); + nrn_mpi_assert(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); + nrn_mpi_assert(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); + nrn_mpi_assert(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); nrnmpi_subworld_id = nrnmpi_myid_bbs; nrnmpi_numprocs_subworld = nrnmpi_numprocs_bbs; } else if (n == nrnmpi_numprocs_world) { - asrt(MPI_Group_incl(wg, 1, &r, &grp_bbs)); - asrt(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm)); - asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm)); - asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); - asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); + nrn_mpi_assert(MPI_Group_incl(wg, 1, &r, &grp_bbs)); + nrn_mpi_assert(MPI_Comm_dup(nrnmpi_world_comm, &nrnmpi_comm)); + nrn_mpi_assert(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm)); + nrn_mpi_assert(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); + nrn_mpi_assert(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); if (r == 0) { - asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); - asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); + nrn_mpi_assert(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); + nrn_mpi_assert(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); } else { nrnmpi_myid_bbs = -1; nrnmpi_numprocs_bbs = -1; @@ -296,21 +296,21 @@ void nrnmpi_subworld_size(int n) { range[1] = nw - 1; } range[2] = 1; /* stride */ - asrt(MPI_Group_range_incl(wg, 1, &range, &grp_net)); - asrt(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm)); - asrt(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); - asrt(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); + nrn_mpi_assert(MPI_Group_range_incl(wg, 1, &range, &grp_net)); + nrn_mpi_assert(MPI_Comm_create(nrnmpi_world_comm, grp_net, &nrnmpi_comm)); + nrn_mpi_assert(MPI_Comm_rank(nrnmpi_comm, &nrnmpi_myid)); + nrn_mpi_assert(MPI_Comm_size(nrnmpi_comm, &nrnmpi_numprocs)); /* nrn_bbs_com ranks stride is nrnmpi_numprocs */ /* only rank 0 of each subworld participates in nrn_bbs_comm */ range[0] = 0; /* first world rank in nrn_bbs_comm */ range[1] = (nb - 1) * n; /* last world rank in nrn_bbs_comm */ range[2] = n; /* stride */ - asrt(MPI_Group_range_incl(wg, 1, &range, &grp_bbs)); - asrt(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm)); + nrn_mpi_assert(MPI_Group_range_incl(wg, 1, &range, &grp_bbs)); + nrn_mpi_assert(MPI_Comm_create(nrnmpi_world_comm, grp_bbs, &nrn_bbs_comm)); if (r % n == 0) { /* only rank 0 participates in nrn_bbs_comm */ - asrt(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); - asrt(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); + nrn_mpi_assert(MPI_Comm_rank(nrn_bbs_comm, &nrnmpi_myid_bbs)); + nrn_mpi_assert(MPI_Comm_size(nrn_bbs_comm, &nrnmpi_numprocs_bbs)); } else { nrnmpi_myid_bbs = -1; nrnmpi_numprocs_bbs = -1; @@ -322,7 +322,7 @@ void nrnmpi_subworld_size(int n) { } } nrnmpi_subworld_change_cnt++; - asrt(MPI_Group_free(&wg)); + nrn_mpi_assert(MPI_Group_free(&wg)); } /* so src/nrnpython/inithoc.cpp does not have to include a c++ mpi.h */ @@ -330,4 +330,12 @@ int nrnmpi_wrap_mpi_init(int* flag) { return MPI_Initialized(flag); } +void nrnmpi_get_subworld_info(int* cnt, int* index, int* rank, int* numprocs, int* numprocs_world) { + *cnt = nrnmpi_subworld_change_cnt; + *index = nrnmpi_subworld_id; + *rank = nrnmpi_myid; + *numprocs = nrnmpi_numprocs_subworld; + *numprocs_world = nrnmpi_numprocs_world; +} + #endif diff --git a/src/nrnmpi/nrnmpi_def_cinc b/src/nrnmpi/nrnmpi_def_cinc index d470eb34fb..9a446670c0 100644 --- a/src/nrnmpi/nrnmpi_def_cinc +++ b/src/nrnmpi/nrnmpi_def_cinc @@ -5,23 +5,5 @@ int nrnmpi_numprocs_world = 1; int nrnmpi_myid_world = 0; int nrnmpi_numprocs_bbs = 1; int nrnmpi_myid_bbs = 0; -// increment from within void nrnmpi_subworld_size(int n) -int nrnmpi_subworld_change_cnt = 0; -int nrnmpi_subworld_id = -1; -int nrnmpi_numprocs_subworld = 1; -int nrnmpi_nout_; -int* nrnmpi_nin_; -int nrnmpi_i_capacity_; -NRNMPI_Spike* nrnmpi_spikeout_; -NRNMPI_Spike* nrnmpi_spikein_; - -int nrnmpi_localgid_size_; -int nrnmpi_ag_send_size_; -int nrnmpi_send_nspike_; -int nrnmpi_ovfl_capacity_; -int nrnmpi_ovfl_; -unsigned char* nrnmpi_spikeout_fixed_; -unsigned char* nrnmpi_spikein_fixed_; -unsigned char* nrnmpi_spikein_fixed_ovfl_; -int nrn_cannot_use_threads_and_mpi; +int nrn_cannot_use_threads_and_mpi = 0; diff --git a/src/nrnmpi/nrnmpi_dynam.cpp b/src/nrnmpi/nrnmpi_dynam.cpp index e929f619d8..8ae0e4222f 100644 --- a/src/nrnmpi/nrnmpi_dynam.cpp +++ b/src/nrnmpi/nrnmpi_dynam.cpp @@ -29,6 +29,7 @@ extern const char* path_prefix_to_libnrniv(); #endif #include +#include // for nrnmpi_str_broadcast_world #include "mpispike.h" #include "nrnmpi_def_cinc" /* nrnmpi global variables */ @@ -92,7 +93,7 @@ static void* load_nrnmpi(const char* name, std::string& mes) { return handle; } -std::string nrnmpi_load(int is_python) { +std::string nrnmpi_load() { std::string pmes; void* handle = nullptr; // If libmpi already in memory, find name and dlopen that. @@ -231,9 +232,16 @@ std::string nrnmpi_load(int is_python) { return name; }; auto const nrn_mpi_library = mpi_path("nrnmpi_"); - // TODO this will be wrong if CoreNEURON is installed externally corenrn_mpi_library = mpi_path("corenrnmpi_"); + // This env variable is only needed in usage like neurodamus where + // `solve_core()` is directly called by MOD file and it doesn't have + // an easy way to know which MPI library to load. + // TODO: remove when BlueBrain/neurodamus/issues/17 is fixed. +#if defined(HAVE_SETENV) + setenv("NRN_CORENRN_MPI_LIB", corenrn_mpi_library.c_str(), 0); +#endif + if (!load_nrnmpi(nrn_mpi_library.c_str(), pmes)) { return pmes; } @@ -247,8 +255,8 @@ std::string nrnmpi_load(int is_python) { // nrnmpi_load cannot safely be called from nrnmpi.cpp because of pre/post-C++11 // ABI compatibility issues with std::string. See // https://github.com/neuronsimulator/nrn/issues/1963 for more information. -void nrnmpi_load_or_exit(bool is_python) { - auto const err = nrnmpi_load(is_python); +void nrnmpi_load_or_exit() { + auto const err = nrnmpi_load(); if (!err.empty()) { std::cout << err << std::endl; std::exit(1); diff --git a/src/nrnmpi/nrnmpidec.h b/src/nrnmpi/nrnmpidec.h index 3141db5585..34dc84782d 100644 --- a/src/nrnmpi/nrnmpidec.h +++ b/src/nrnmpi/nrnmpidec.h @@ -1,15 +1,17 @@ /* -This file is processed by mkdynam.sh and so it is important that -the prototypes be of the form "type foo(type arg, ...)" +This file is processed by mkdynam.sh and so it is important that the prototypes +be of the form "type foo(type arg, ...)". Moreover, the * needs to be attached +to the type, e.g. `T*` is valid, but `T *` isn't. */ #ifndef nrnmpidec_h #define nrnmpidec_h #include #include -typedef long double longdbl; +using longdbl = long double; #if NRNMPI #include +#include /* from bbsmpipack.cpp */ typedef struct bbsmpibuf { @@ -21,6 +23,13 @@ typedef struct bbsmpibuf { int refcount; } bbsmpibuf; +struct NRNMPI_Spike; + +namespace neuron::container { +struct MemoryStats; +struct MemoryUsage; +} // namespace neuron::container + // olupton 2022-07-06: dynamic MPI needs to dlopen some of these (slightly // redefined) symbol names, so keep C linkage for simplicity extern "C" { @@ -60,12 +69,16 @@ extern double nrnmpi_wtime(); extern void nrnmpi_terminate(); extern void nrnmpi_abort(int errcode); extern void nrnmpi_subworld_size(int n); +extern void nrnmpi_get_subworld_info(int* cnt, int* index, int* rank, int* numprocs, int* numprocs_world); +/* from memory_usage.cpp */ +extern void nrnmpi_memory_stats(neuron::container::MemoryStats& stats, neuron::container::MemoryUsage const& usage); +extern void nrnmpi_print_memory_stats(neuron::container::MemoryStats const& stats); /* from mpispike.cpp */ extern void nrnmpi_spike_initialize(); -extern int nrnmpi_spike_exchange(); -extern int nrnmpi_spike_exchange_compressed(); +extern int nrnmpi_spike_exchange(int* ovfl, int* nout, int* nin, NRNMPI_Spike* spikeout, NRNMPI_Spike** spikein, int* icapacity_); +extern int nrnmpi_spike_exchange_compressed(int localgid_size, int ag_send_size, int ag_send_nspike, int* ovfl_capacity, int* ovfl, unsigned char* spfixout, unsigned char* spfixin, unsigned char** spfixin_ovfl, int* nin_); extern double nrnmpi_mindelay(double maxdel); extern int nrnmpi_int_allmax(int i); extern void nrnmpi_int_gather(int* s, int* r, int cnt, int root); @@ -93,7 +106,7 @@ extern void nrnmpi_char_alltoallv(char* s, int* scnt, int* sdispl, char* r, int* extern void nrnmpi_dbl_broadcast(double* buf, int cnt, int root); extern void nrnmpi_int_broadcast(int* buf, int cnt, int root); extern void nrnmpi_char_broadcast(char* buf, int cnt, int root); -extern void nrnmpi_char_broadcast_world(char** pstr, int root); +extern void nrnmpi_str_broadcast_world(std::string& str, int root); extern int nrnmpi_int_sum_reduce(int in); extern void nrnmpi_assert_opstep(int opstep, double t); extern double nrnmpi_dbl_allmin(double x); diff --git a/src/nrnoc/cabcode.cpp b/src/nrnoc/cabcode.cpp index d49d61f55e..8cc9ac7a07 100644 --- a/src/nrnoc/cabcode.cpp +++ b/src/nrnoc/cabcode.cpp @@ -1,11 +1,12 @@ #include <../../nrnconf.h> /* /local/src/master/nrn/src/nrnoc/cabcode.cpp,v 1.37 1999/07/08 14:24:59 hines Exp */ -#define HOC_L_LIST 1 +#include #include #include #include -#include + +#define HOC_L_LIST 1 #include "section.h" #include "nrn_ansi.h" #include "nrniv_mf.h" @@ -14,6 +15,36 @@ #include "hocparse.h" #include "membdef.h" +static char* escape_bracket(const char* s) { + static char* b; + const char* p1; + char* p2; + if (!b) { + b = new char[256]; + } + for (p1 = s, p2 = b; *p1; ++p1, ++p2) { + switch (*p1) { + case '<': + *p2 = '['; + break; + case '>': + *p2 = ']'; + break; + case '[': + case ']': + *p2 = '\\'; + *(++p2) = *p1; + break; + default: + *p2 = *p1; + break; + } + } + *p2 = '\0'; + return b; +} + + extern int hoc_execerror_messages; #define symlist hoc_symlist @@ -126,19 +157,7 @@ void nrn_popsec(void) { if (!sec) { return; } -#if 0 - if (sec->prop && sec->prop->dparam[0].sym) { - printf("popsec %s\n", sec->prop->dparam[0].sym->name); - }else{ - printf("popsec unnamed or with no properties\n"); - } -#endif - if (--sec->refcount <= 0) { -#if 0 - printf("sec freed after pop\n"); -#endif - nrn_section_free(sec); - } + section_unref(sec); } } @@ -161,7 +180,7 @@ void clear_sectionlist(void) /* merely change all SECTION to UNDEF */ printf("clear_sectionlist not fixed yet, doing nothing\n"); return; Symbol *s; - + free_point_process(); if (symlist) for (s=symlist->first; s; s = s->next) { if (s->type == SECTION) { @@ -268,9 +287,9 @@ static Section* new_section(Object* ob, Symbol* sym, int i) { sec = sec_alloc(); section_ref(sec); prop = prop_alloc(&(sec->prop), CABLESECTION, (Node*) 0); - prop->dparam[0] = sym; + prop->dparam[0] = {neuron::container::do_not_search, sym}; prop->dparam[5] = i; - prop->dparam[6] = ob; + prop->dparam[6] = {neuron::container::do_not_search, ob}; #if USE_PYTHON prop->dparam[PROP_PY_INDEX] = nullptr; #endif @@ -296,7 +315,7 @@ void new_sections(Object* ob, Symbol* sym, Item** pitm, int size) { } else { pitm[i] = lappendsec(section_list, sec); } - sec->prop->dparam[8] = pitm[i]; + sec->prop->dparam[8] = {neuron::container::do_not_search, pitm[i]}; } } @@ -411,22 +430,18 @@ void cab_alloc(Prop* p) { pd[4] = DEF_rallbranch; pd[7] = DEF_Ra; p->dparam = pd; - p->param_size = CAB_SIZE; /* this one is special since it refers to dparam */ + p->dparam_size = CAB_SIZE; /* this one is special since it refers to dparam */ } void morph_alloc(Prop* p) { - double* pd; - pd = nrn_prop_data_alloc(MORPHOLOGY, 1, p); - pd[0] = DEF_diam; /* microns */ + assert(p->param_size() == 1); + p->param(0) = DEF_diam; /* microns */ diam_changed = 1; - p->param = pd; - p->param_size = 1; } double nrn_diameter(Node* nd) { - Prop* p; - p = nrn_mechanism(MORPHOLOGY, nd); - return p->param[0]; + Prop* p = nrn_mechanism(MORPHOLOGY, nd); + return p->param(0); } void nrn_chk_section(Symbol* s) { @@ -604,6 +619,7 @@ void nrn_disconnect(Section* sec) { } section_unref(oldpsec); tree_changed = 1; + neuron::model().node_data().mark_as_unsorted(); } static void connectsec_impl(Section* parent, Section* sec) { @@ -649,7 +665,7 @@ static void connectsec_impl(Section* parent, Section* sec) { if (oldpsec) { section_unref(oldpsec); } else if (oldpnode) { - nrn_node_destruct1(oldpnode); + delete oldpnode; } tree_changed = 1; diam_changed = 1; @@ -924,11 +940,11 @@ void mech_uninsert1(Section* sec, Symbol* s) { } } -void nrn_rangeconst(Section* sec, Symbol* s, double* pd, int op) { +void nrn_rangeconst(Section* sec, Symbol* s, neuron::container::data_handle pd, int op) { short n, i; Node* nd; int indx; - double* dpr; + neuron::container::data_handle dpr{}; double d = *pd; n = sec->nnode - 1; if (s->u.rng.type == VINDEX) { @@ -936,17 +952,17 @@ void nrn_rangeconst(Section* sec, Symbol* s, double* pd, int op) { if (op) { *pd = hoc_opasgn(op, NODEV(nd), d); } - NODEV(nd) = *pd; + nd->v() = *pd; nd = node_ptr(sec, 1., (double*) 0); if (op) { *pd = hoc_opasgn(op, NODEV(nd), d); } - NODEV(nd) = *pd; + nd->v() = *pd; for (i = 0; i < n; i++) { if (op) { *pd = hoc_opasgn(op, NODEV(sec->pnode[i]), d); } - NODEV(sec->pnode[i]) = *pd; + sec->pnode[i]->v() = *pd; } } else { if (s->u.rng.type == IMEMFAST) { @@ -979,7 +995,8 @@ void nrn_rangeconst(Section* sec, Symbol* s, double* pd, int op) { if (s->u.rng.index == 0) { diam_changed = 1; } - dpr = nrn_vext_pd(s, indx, node_ptr(sec, 0., (double*) 0)); + dpr = neuron::container::data_handle{ + nrn_vext_pd(s, indx, node_ptr(sec, 0., nullptr))}; if (dpr) { if (op) { *dpr = hoc_opasgn(op, *dpr, d); @@ -987,7 +1004,8 @@ void nrn_rangeconst(Section* sec, Symbol* s, double* pd, int op) { *dpr = d; } } - dpr = nrn_vext_pd(s, indx, node_ptr(sec, 1., (double*) 0)); + dpr = neuron::container::data_handle{ + nrn_vext_pd(s, indx, node_ptr(sec, 1., nullptr))}; if (dpr) { if (op) { *dpr = hoc_opasgn(op, *dpr, d); @@ -1000,17 +1018,16 @@ void nrn_rangeconst(Section* sec, Symbol* s, double* pd, int op) { } } -void range_const(void) /* rangevariable symbol at pc, value on stack */ -{ - Section* sec; - double d; - int op; - Symbol* s = (pc++)->sym; - - op = (pc++)->i; - d = xpop(); - sec = nrn_sec_pop(); - nrn_rangeconst(sec, s, &d, op); +// rangevariable symbol at pc, value on stack +void range_const() { + Symbol* s = (hoc_pc++)->sym; + int const op{(hoc_pc++)->i}; + double d{hoc_xpop()}; + auto* const sec = nrn_sec_pop(); + nrn_rangeconst(sec, + s, + neuron::container::data_handle{neuron::container::do_not_search, &d}, + op); hoc_pushx(d); } @@ -1071,31 +1088,17 @@ static Datum* pdprop(Symbol* s, int indx, Section* sec, short inode) { return m->dparam + s->u.rng.index + indx; } -void connectpointer(void) { /* pointer symbol at pc, target variable on stack, maybe - range variable location on stack */ - Datum* dat; - double* pd; - double d; - Symbol* s = (pc++)->sym; - pd = hoc_pxpop(); +// pointer symbol at pc, target variable on stack, maybe range variable location on stack +void connectpointer() { + auto* const s = (hoc_pc++)->sym; + auto const pd = hoc_pop_handle(); if (s->subtype != NRNPOINTER) { hoc_execerror(s->name, "not a model variable POINTER"); } -#if 0 -/* can't be since parser sees object syntax and generates different code. */ - if (s->type == NRNPNTVAR) { - dat = ppnrnpnt(s); - }else -#endif - { - short i; - Section* sec; - - d = hoc_xpop(); - sec = nrn_sec_pop(); - i = node_index(sec, d); - dat = pdprop(s, range_vec_indx(s), sec, i); - } + auto const d = hoc_xpop(); + auto* const sec = nrn_sec_pop(); + auto const i = node_index(sec, d); + auto* const dat = pdprop(s, range_vec_indx(s), sec, i); *dat = pd; } @@ -1145,7 +1148,7 @@ void range_interpolate(void) /*symbol at pc, 4 values on stack*/ short i, i1, i2, di; Section* sec; double y1, y2, x1, x2, x, dx, thet, y; - double* dpr; + neuron::container::data_handle dpr{}; Symbol* s = (pc++)->sym; int indx, op; Node* nd; @@ -1168,17 +1171,17 @@ void range_interpolate(void) /*symbol at pc, 4 values on stack*/ if (x1 == 0. || x1 == 1.) { nd = node_ptr(sec, x1, (double*) 0); if (op) { - NODEV(nd) = hoc_opasgn(op, NODEV(nd), y1); + nd->v() = hoc_opasgn(op, NODEV(nd), y1); } else { - NODEV(nd) = y1; + nd->v() = y1; } } if (x2 == 1. || x2 == 0.) { nd = node_ptr(sec, x2, (double*) 0); if (op) { - NODEV(nd) = hoc_opasgn(op, NODEV(nd), y2); + nd->v() = hoc_opasgn(op, NODEV(nd), y2); } else { - NODEV(nd) = y2; + nd->v() = y2; } } for (i = i1; i != i2; i += di) { @@ -1191,9 +1194,9 @@ void range_interpolate(void) /*symbol at pc, 4 values on stack*/ if (thet >= -1e-9 && thet <= 1. + 1e-9) { y = y1 * (1. - thet) + y2 * thet; if (op) { - NODEV(nd) = hoc_opasgn(op, NODEV(nd), y); + nd->v() = hoc_opasgn(op, NODEV(nd), y); } else { - NODEV(nd) = y; + nd->v() = y; } } } @@ -1235,7 +1238,8 @@ void range_interpolate(void) /*symbol at pc, 4 values on stack*/ } if (s->u.rng.type == EXTRACELL) { if (x1 == 0. || x1 == 1.) { - dpr = nrn_vext_pd(s, indx, node_ptr(sec, x1, (double*) 0)); + dpr = neuron::container::data_handle{ + nrn_vext_pd(s, indx, node_ptr(sec, x1, nullptr))}; if (dpr) { if (op) { *dpr = hoc_opasgn(op, *dpr, y1); @@ -1245,7 +1249,8 @@ void range_interpolate(void) /*symbol at pc, 4 values on stack*/ } } if (x2 == 1. || x2 == 0.) { - dpr = nrn_vext_pd(s, indx, node_ptr(sec, x2, (double*) 0)); + dpr = neuron::container::data_handle{ + nrn_vext_pd(s, indx, node_ptr(sec, x2, nullptr))}; if (dpr) { if (op) { *dpr = hoc_opasgn(op, *dpr, y2); @@ -1274,7 +1279,7 @@ int nrn_exists(Symbol* s, Node* node) { } } -double* nrn_rangepointer(Section* sec, Symbol* s, double d) { +neuron::container::data_handle nrn_rangepointer(Section* sec, Symbol* s, double d) { /* if you change this change nrnpy_rangepointer as well */ short i; Node* nd; @@ -1282,16 +1287,12 @@ double* nrn_rangepointer(Section* sec, Symbol* s, double d) { if (s->u.rng.type == VINDEX) { nd = node_ptr(sec, d, nullptr); - return &NODEV(nd); + return nd->v_handle(); } if (s->u.rng.type == IMEMFAST) { if (nrn_use_fast_imem) { nd = node_ptr(sec, d, nullptr); - if (!nd->_nt) { - v_setup_vectors(); - assert(nd->_nt); - } - return nd->_nt->_nrn_fast_imem->_nrn_sav_rhs + nd->v_node_index; + return nd->sav_rhs_handle(); } else { hoc_execerror( "cvode.use_fast_imem(1) has not been executed so i_membrane_ does not exist", 0); @@ -1300,10 +1301,9 @@ double* nrn_rangepointer(Section* sec, Symbol* s, double d) { indx = range_vec_indx(s); #if EXTRACELLULAR if (s->u.rng.type == EXTRACELL) { - double* pd; - pd = nrn_vext_pd(s, indx, node_ptr(sec, d, (double*) 0)); + double* const pd{nrn_vext_pd(s, indx, node_ptr(sec, d, (double*) 0))}; if (pd) { - return pd; + return neuron::container::data_handle{pd}; } } #endif @@ -1311,25 +1311,26 @@ double* nrn_rangepointer(Section* sec, Symbol* s, double d) { return dprop(s, indx, sec, i); } -/* return nil if failure instead of hoc_execerror +/* return nullptr if failure instead of hoc_execerror and return pointer to the 0 element if an array */ -double* nrnpy_rangepointer(Section* sec, Symbol* s, double d, int* err, int idx) { +neuron::container::data_handle nrnpy_rangepointer(Section* sec, + Symbol* s, + double d, + int* err, + int idx) { /* if you change this change nrn_rangepointer as well */ *err = 0; if (s->u.rng.type == VINDEX) { - return &NODEV(node_ptr(sec, d, nullptr)); + auto* nd = node_ptr(sec, d, nullptr); + return nd->v_handle(); } if (s->u.rng.type == IMEMFAST) { if (nrn_use_fast_imem) { auto* nd = node_ptr(sec, d, nullptr); - if (!nd->_nt) { - v_setup_vectors(); - assert(nd->_nt); - } - return nd->_nt->_nrn_fast_imem->_nrn_sav_rhs + nd->v_node_index; + return nd->sav_rhs_handle(); } else { - return nullptr; + return {}; } } #if EXTRACELLULAR @@ -1337,7 +1338,7 @@ double* nrnpy_rangepointer(Section* sec, Symbol* s, double d, int* err, int idx) auto* nd = node_ptr(sec, d, nullptr); double* pd{nrn_vext_pd(s, 0, nd)}; if (pd) { - return pd; + return neuron::container::data_handle{pd}; } } #endif @@ -1352,17 +1353,13 @@ void rangevarevalpointer() { Section* sec{nrn_sec_pop()}; if (s->u.rng.type == VINDEX) { auto* const nd = node_ptr(sec, d, nullptr); - hoc_pushpx(&NODEV(nd)); + hoc_push(nd->v_handle()); return; } if (s->u.rng.type == IMEMFAST) { if (nrn_use_fast_imem) { auto* nd = node_ptr(sec, d, nullptr); - if (!nd->_nt) { - v_setup_vectors(); - assert(nd->_nt); - } - hoc_pushpx(nd->_nt->_nrn_fast_imem->_nrn_sav_rhs + nd->v_node_index); + hoc_push(nd->sav_rhs_handle()); } else { hoc_execerror( "cvode.use_fast_imem(1) has not been executed so i_membrane_ does not exist", 0); @@ -1381,7 +1378,7 @@ void rangevarevalpointer() { } } auto const i = node_index(sec, d); - hoc_pushpx(dprop(s, indx, sec, i)); + hoc_push(dprop(s, indx, sec, i)); } void rangevareval(void) /* symbol at pc, location on stack, return value on stack */ @@ -1399,6 +1396,24 @@ void rangepoint(void) /* symbol at pc, return value on stack */ rangevareval(); } +void rangeobjeval(void) /* symbol at pc, section location on stack, return object on stack*/ +{ + Symbol* s{(pc++)->sym}; + assert(s->subtype == NMODLRANDOM); // the only possibility at the moment + double d = xpop(); + Section* sec{nrn_sec_pop()}; + auto const i = node_index(sec, d); + Prop* m = nrn_mechanism_check(s->u.rng.type, sec, i); + Object* ob = nrn_nmodlrandom_wrap(m, s); + hoc_push_object(ob); +} + +void rangeobjevalmiddle(void) /* symbol at pc, return object on stack*/ +{ + hoc_pushx(0.5); + rangeobjeval(); +} + int node_index(Section* sec, double x) /* returns nearest index to x */ { int i; @@ -1561,8 +1576,8 @@ int nrn_at_beginning(Section* sec) { static void nrn_rootnode_alloc(Section* sec) { Extnode* nde; - extern Node* nrn_node_construct1(); - sec->parentnode = nrn_node_construct1(); + sec->parentnode = new Node{}; + sec->parentnode->sec_node_index_ = 0; sec->parentnode->sec = sec; #if EXTRACELLULAR if (sec->pnode[0]->extnode) { @@ -1618,7 +1633,7 @@ void nrn_parent_info(Section* s) { } if (true_parent == (Section*) 0) { if (sec->parentnode) { - /* non nil parent node in section without a parent is + /* non nullptr parent node in section without a parent is definitely valid */ pnode = sec->parentnode; @@ -1871,16 +1886,9 @@ double* nrn_vext_pd(Symbol* s, int indx, Node* nd) { if (s->u.rng.type != EXTRACELL) { return (double*) 0; } -#if I_MEMBRANE - if (s->u.rng.index != 3 * (nlayer) + 2) { - return (double*) 0; + if (s->u.rng.index != neuron::extracellular::vext_pseudoindex()) { + return nullptr; } -#else /* not I_MEMBRANE */ - if (s->u.rng.index != 3 * (nlayer) + 1) { - return (double*) 0; - } -#endif - zero = 0.; if (nd->extnode) { return nd->extnode->v + indx; @@ -1900,28 +1908,24 @@ double* nrn_vext_pd(Symbol* s, int indx, Node* nd) { /* if you change this then change nrnpy_dprop as well */ /* returns location of property symbol */ -double* dprop(Symbol* s, int indx, Section* sec, short inode) { - Prop* m; - - m = nrn_mechanism_check(s->u.rng.type, sec, inode); +neuron::container::data_handle dprop(Symbol* s, int indx, Section* sec, short inode) { + auto* const m = nrn_mechanism_check(s->u.rng.type, sec, inode); #if EXTRACELLULAR -/* this does not handle vext(0) and vext(1) properly at this time */ -#if I_MEMBRANE - if (m->_type == EXTRACELL && s->u.rng.index == 3 * (nlayer) + 2) { -#else - if (m->_type == EXTRACELL && s->u.rng.index == 3 * (nlayer) + 1) { -#endif - return sec->pnode[inode]->extnode->v + indx; + // old comment: this does not handle vext(0) and vext(1) properly at this time + if (m->_type == EXTRACELL && s->u.rng.index == neuron::extracellular::vext_pseudoindex()) { + return neuron::container::data_handle{neuron::container::do_not_search, + sec->pnode[inode]->extnode->v + indx}; } #endif if (s->subtype != NRNPOINTER) { if (m->ob) { - return m->ob->u.dataspace[s->u.rng.index].pval + indx; + return neuron::container::data_handle{m->ob->u.dataspace[s->u.rng.index].pval + + indx}; } else { - return &(m->param[s->u.rng.index]) + indx; + return m->param_handle_legacy(s->u.rng.index + indx); } } else { - auto* const p = m->dparam[s->u.rng.index + indx].get(); + neuron::container::data_handle const p{m->dparam[s->u.rng.index + indx]}; if (!p) { hoc_execerror(s->name, "wasn't made to point to anything"); } @@ -1929,34 +1933,33 @@ double* dprop(Symbol* s, int indx, Section* sec, short inode) { } } -/* return nil instead of hoc_execerror. */ +/* return nullptr instead of hoc_execerror. */ /* returns location of property symbol */ -double* nrnpy_dprop(Symbol* s, int indx, Section* sec, short inode, int* err) { - Prop* m; - - m = nrn_mechanism(s->u.rng.type, sec->pnode[inode]); +neuron::container::data_handle nrnpy_dprop(Symbol* s, + int indx, + Section* sec, + short inode, + int* err) { + auto* const m = nrn_mechanism(s->u.rng.type, sec->pnode[inode]); if (!m) { *err = 1; - return (double*) 0; + return {}; } #if EXTRACELLULAR -/* this does not handle vext(0) and vext(1) properly at this time */ -#if I_MEMBRANE - if (m->_type == EXTRACELL && s->u.rng.index == 3 * (nlayer) + 2) { -#else - if (m->_type == EXTRACELL && s->u.rng.index == 3 * (nlayer) + 1) { -#endif - return sec->pnode[inode]->extnode->v + indx; + /* this does not handle vext(0) and vext(1) properly at this time */ + if (m->_type == EXTRACELL && s->u.rng.index == neuron::extracellular::vext_pseudoindex()) { + return neuron::container::data_handle{sec->pnode[inode]->extnode->v + indx}; } #endif if (s->subtype != NRNPOINTER) { if (m->ob) { - return m->ob->u.dataspace[s->u.rng.index].pval + indx; + return neuron::container::data_handle{m->ob->u.dataspace[s->u.rng.index].pval + + indx}; } else { - return &(m->param[s->u.rng.index]) + indx; + return m->param_handle_legacy(s->u.rng.index + indx); } } else { - auto* const p = m->dparam[s->u.rng.index + indx].get(); + neuron::container::data_handle const p{m->dparam[s->u.rng.index + indx]}; if (!p) { *err = 2; } @@ -2026,8 +2029,8 @@ void forall_section(void) { Section* sec = hocSEC(qsec); qsec = qsec->next; if (buf[0]) { - hoc_regexp_compile(buf); - if (!hoc_regexp_search(secname(sec))) { + std::regex pattern(escape_bracket(buf)); + if (!std::regex_match(secname(sec), pattern)) { continue; } } @@ -2058,8 +2061,8 @@ void hoc_ifsec(void) { s = hoc_strpop(); Sprintf(buf, ".*%s.*", *s); - hoc_regexp_compile(buf); - if (hoc_regexp_search(secname(chk_access()))) { + std::regex pattern(escape_bracket(buf)); + if (std::regex_match(secname(chk_access()), pattern)) { hoc_execute(relative(savepc)); } if (!hoc_returning) @@ -2067,8 +2070,8 @@ void hoc_ifsec(void) { } void issection(void) { /* returns true if string is the access section */ - hoc_regexp_compile(gargstr(1)); - if (hoc_regexp_search(secname(chk_access()))) { + std::regex pattern(escape_bracket(gargstr(1))); + if (std::regex_match(secname(chk_access()), pattern)) { hoc_retpushx(1.); } else { hoc_retpushx(0.); diff --git a/src/nrnoc/cabvars.h b/src/nrnoc/cabvars.h index 436239454d..9c9d6c879d 100644 --- a/src/nrnoc/cabvars.h +++ b/src/nrnoc/cabvars.h @@ -73,6 +73,6 @@ extern void morph_alloc(Prop*); #endif -extern Memb_func* memb_func; +extern std::vector memb_func; -#endif // NRN_CABVARS_H \ No newline at end of file +#endif // NRN_CABVARS_H diff --git a/src/nrnoc/capac.cpp b/src/nrnoc/capac.cpp index aa52691e26..7ffbb3629e 100644 --- a/src/nrnoc/capac.cpp +++ b/src/nrnoc/capac.cpp @@ -3,24 +3,28 @@ #include "section.h" #include "membdef.h" +#include "neuron/cache/mechanism_range.hpp" #include "nrniv_mf.h" static const char* mechanism[] = {"0", "capacitance", "cm", 0, "i_cap", 0, 0}; static void cap_alloc(Prop*); -static void cap_init(NrnThread*, Memb_list*, int); +static void cap_init(neuron::model_sorted_token const&, NrnThread*, Memb_list*, int); -#define nparm 2 +static constexpr auto nparm = 2; +static constexpr auto ndparm = 0; extern "C" void capac_reg_(void) { int mechtype; /* all methods deal with capacitance in special ways */ - register_mech(mechanism, cap_alloc, (Pvmi) 0, (Pvmi) 0, (Pvmi) 0, cap_init, -1, 1); + register_mech(mechanism, cap_alloc, nullptr, nullptr, nullptr, cap_init, -1, 1); mechtype = nrn_get_mechtype(mechanism[1]); + using neuron::mechanism::field; + neuron::mechanism::register_data_fields(mechtype, field{"cm"}, field{"i_cap"}); hoc_register_prop_size(mechtype, nparm, 0); } -#define cm vdata[i][0] -#define i_cap vdata[i][1] +static constexpr auto cm_index = 0; +static constexpr auto i_cap_index = 1; /* cj is analogous to 1/dt for cvode and daspk @@ -29,106 +33,74 @@ for pure implicit fixed step it is 1/dt It used to be static but is now a thread data variable */ -void nrn_cap_jacob(NrnThread* _nt, Memb_list* ml) { +void nrn_cap_jacob(neuron::model_sorted_token const& sorted_token, NrnThread* _nt, Memb_list* ml) { + neuron::cache::MechanismRange ml_cache{sorted_token, *_nt, *ml, ml->type()}; + auto* const vec_d = _nt->node_d_storage(); int count = ml->nodecount; - Node** vnode = ml->nodelist; - double** vdata = ml->_data; - int i; double cfac = .001 * _nt->cj; -#if CACHEVEC - if (use_cachevec) { - int* ni = ml->nodeindices; - for (i = 0; i < count; i++) { - VEC_D(ni[i]) += cfac * cm; - } - } else -#endif /* CACHEVEC */ - { - for (i = 0; i < count; ++i) { - NODED(vnode[i]) += cfac * cm; - } + int* ni = ml->nodeindices; + for (int i = 0; i < count; i++) { + vec_d[ni[i]] += cfac * ml_cache.fpfield(i); } } -static void cap_init(NrnThread* _nt, Memb_list* ml, int type) { +static void cap_init(neuron::model_sorted_token const& sorted_token, + NrnThread* _nt, + Memb_list* ml, + int type) { + neuron::cache::MechanismRange ml_cache{sorted_token, *_nt, *ml, type}; int count = ml->nodecount; - double** vdata = ml->_data; - int i; - for (i = 0; i < count; ++i) { - i_cap = 0; + for (int i = 0; i < count; ++i) { + ml_cache.fpfield(i) = 0; } } -void nrn_capacity_current(NrnThread* _nt, Memb_list* ml) { +void nrn_capacity_current(neuron::model_sorted_token const& sorted_token, + NrnThread* _nt, + Memb_list* ml) { + neuron::cache::MechanismRange ml_cache{sorted_token, *_nt, *ml, ml->type()}; + auto* const vec_rhs = _nt->node_rhs_storage(); int count = ml->nodecount; Node** vnode = ml->nodelist; - double** vdata = ml->_data; - int i; double cfac = .001 * _nt->cj; /* since rhs is dvm for a full or half implicit step */ /* (nrn_update_2d() replaces dvi by dvi-dvx) */ /* no need to distinguish secondorder */ -#if CACHEVEC - if (use_cachevec) { - int* ni = ml->nodeindices; - for (i = 0; i < count; i++) { - i_cap = cfac * cm * VEC_RHS(ni[i]); - } - } else -#endif /* CACHEVEC */ - { - for (i = 0; i < count; ++i) { - i_cap = cfac * cm * NODERHS(vnode[i]); - } + int* ni = ml->nodeindices; + for (int i = 0; i < count; i++) { + ml_cache.fpfield(i) = cfac * ml_cache.fpfield(i) * vec_rhs[ni[i]]; } } -void nrn_mul_capacity(NrnThread* _nt, Memb_list* ml) { +void nrn_mul_capacity(neuron::model_sorted_token const& sorted_token, + NrnThread* _nt, + Memb_list* ml) { + neuron::cache::MechanismRange ml_cache{sorted_token, *_nt, *ml, ml->type()}; + auto* const vec_rhs = _nt->node_rhs_storage(); int count = ml->nodecount; - Node** vnode = ml->nodelist; - double** vdata = ml->_data; - int i; double cfac = .001 * _nt->cj; -#if CACHEVEC - if (use_cachevec) { - int* ni = ml->nodeindices; - for (i = 0; i < count; i++) { - VEC_RHS(ni[i]) *= cfac * cm; - } - } else -#endif /* CACHEVEC */ - { - for (i = 0; i < count; ++i) { - NODERHS(vnode[i]) *= cfac * cm; - } + int* ni = ml->nodeindices; + for (int i = 0; i < count; i++) { + vec_rhs[ni[i]] *= cfac * ml_cache.fpfield(i); } } -void nrn_div_capacity(NrnThread* _nt, Memb_list* ml) { +void nrn_div_capacity(neuron::model_sorted_token const& sorted_token, + NrnThread* _nt, + Memb_list* ml) { + neuron::cache::MechanismRange ml_cache{sorted_token, *_nt, *ml, ml->type()}; + auto* const vec_rhs = _nt->node_rhs_storage(); int count = ml->nodecount; Node** vnode = ml->nodelist; - double** vdata = ml->_data; - int i; -#if CACHEVEC - if (use_cachevec) { - int* ni = ml->nodeindices; - for (i = 0; i < count; i++) { - i_cap = VEC_RHS(ni[i]); - VEC_RHS(ni[i]) /= 1.e-3 * cm; - } - } else -#endif /* CACHEVEC */ - { - for (i = 0; i < count; ++i) { - i_cap = NODERHS(vnode[i]); - NODERHS(vnode[i]) /= 1.e-3 * cm; - } + int* ni = ml->nodeindices; + for (int i = 0; i < count; i++) { + ml_cache.fpfield(i) = vec_rhs[ni[i]]; + vec_rhs[ni[i]] /= 1.e-3 * ml_cache.fpfield(i); } - if (_nt->_nrn_fast_imem) { - double* p = _nt->_nrn_fast_imem->_nrn_sav_rhs; - for (i = 0; i < count; ++i) { - p[vnode[i]->v_node_index] += i_cap; + if (auto const vec_sav_rhs = _nt->node_sav_rhs_storage(); vec_sav_rhs) { + for (int i = 0; i < count; ++i) { + vec_sav_rhs[vnode[i]->v_node_index] += ml_cache.fpfield(i); } } } @@ -137,9 +109,7 @@ void nrn_div_capacity(NrnThread* _nt, Memb_list* ml) { /* the rest can be constructed automatically from the above info*/ static void cap_alloc(Prop* p) { - double* pd; - pd = nrn_prop_data_alloc(CAP, nparm, p); - pd[0] = DEF_cm; /*default capacitance/cm^2*/ - p->param = pd; - p->param_size = nparm; + assert(p->param_size() == nparm); + assert(p->param_num_vars() == nparm); + p->param(0) = DEF_cm; // default capacitance/cm^2 } diff --git a/src/nrnoc/container.cpp b/src/nrnoc/container.cpp new file mode 100644 index 0000000000..a493a2f5b5 --- /dev/null +++ b/src/nrnoc/container.cpp @@ -0,0 +1,152 @@ +#include "membfunc.h" +#include "neuron/container/generic_data_handle.hpp" +#include "neuron/container/soa_container.hpp" +#include "neuron/model_data.hpp" +#include "section.h" + +#include +#include + +namespace { +void invalidate_cache() { + neuron::cache::model.reset(); +} +} // namespace +namespace neuron { +Model::Model() { + m_node_data.set_unsorted_callback(invalidate_cache); + // needs some re-organisation if we ever want to support multiple Model instances + assert(!container::detail::defer_delete_storage); + container::detail::defer_delete_storage = &m_ptrs_for_deferred_deletion; +} +Model::~Model() { + assert(container::detail::defer_delete_storage == &m_ptrs_for_deferred_deletion); + container::detail::defer_delete_storage = nullptr; + std::for_each(m_ptrs_for_deferred_deletion.begin(), + m_ptrs_for_deferred_deletion.end(), + [](void* ptr) { operator delete[](ptr); }); +} +std::unique_ptr Model::find_container_info(void const* cont) const { + if (auto maybe_info = m_node_data.find_container_info(cont); maybe_info) { + return maybe_info; + } + for (auto& mech_data: m_mech_data) { + if (!mech_data) { + continue; + } + if (auto maybe_info = mech_data->find_container_info(cont); maybe_info) { + return maybe_info; + } + } + return {}; +} + +void Model::set_unsorted_callback(container::Mechanism::storage& mech_data) { + mech_data.set_unsorted_callback(invalidate_cache); + // This is called when a new Mechanism storage struct is created, i.e. when + // a new Mechanism type is registered. When that happens the cache + // implicitly becomes invalid, because it does not contain entries for the + // newly-added Mechanism. If this proves to be a bottleneck then we could + // handle this more efficiently. + invalidate_cache(); +} +} // namespace neuron +namespace neuron::detail { +// See neuron/model_data.hpp +Model model_data; +} // namespace neuron::detail +namespace neuron::cache { +std::optional model{}; +} +namespace neuron::container { +std::ostream& operator<<(std::ostream& os, generic_data_handle const& dh) { + os << "generic_data_handle{"; + if (!dh.m_offset.has_always_been_null()) { + // modern and valid or once-valid data handle + auto* const container_data = *static_cast(dh.m_container); + auto const maybe_info = utils::find_container_info(container_data); + if (maybe_info) { + if (!maybe_info->container().empty()) { + os << "cont=" << maybe_info->container() << ' '; + } + os << maybe_info->field() << ' ' << dh.m_offset << '/' << maybe_info->size(); + } else { + // couldn't find which container it points into; if container_data is null that will be + // because the relevant container/column was deleted + os << "cont=" << (container_data ? "unknown " : "deleted ") << dh.m_offset + << "/unknown"; + } + } else { + // legacy data handle + os << "raw="; + if (dh.m_container) { + // This shouldn't crash, but it might contain some garbage if + // we're wrapping a literal value + os << dh.m_container; + } else { + os << "nullptr"; + } + } + return os << " type=" << dh.type_name() << '}'; +} +} // namespace neuron::container +namespace neuron::container::detail { +// See neuron/container/soa_container.hpp +std::vector* defer_delete_storage{}; +} // namespace neuron::container::detail +namespace neuron::container::Mechanism { +storage::storage(short mech_type, std::string name, std::vector floating_point_fields) + : base_type{field::FloatingPoint{std::move(floating_point_fields)}} + , m_mech_name{std::move(name)} + , m_mech_type{mech_type} {} +double& storage::fpfield(std::size_t instance, int field, int array_index) { + return get_field_instance(instance, field, array_index); +} +double const& storage::fpfield(std::size_t instance, int field, int array_index) const { + return get_field_instance(instance, field, array_index); +} +data_handle storage::fpfield_handle(non_owning_identifier_without_container id, + int field, + int array_index) { + return get_field_instance_handle(id, field, array_index); +} +std::string_view storage::name() const { + return m_mech_name; +} +short storage::type() const { + return m_mech_type; +} +std::ostream& operator<<(std::ostream& os, storage const& data) { + return os << data.name() << "::storage{type=" << data.type() << ", " + << data.get_tag().num_variables() << " fields}"; +} +} // namespace neuron::container::Mechanism +namespace neuron::container::utils { +namespace detail { +generic_data_handle promote_or_clear(generic_data_handle gdh) { + // The whole point of this method is that it receives a raw pointer + assert(!gdh.refers_to_a_modern_data_structure()); + auto& model = neuron::model(); + if (auto h = model.node_data().find_data_handle(gdh); h.refers_to_a_modern_data_structure()) { + return h; + } + bool done{false}; + model.apply_to_mechanisms([&done, &gdh](auto& mech_data) { + if (done) { + return; + } + if (auto h = mech_data.find_data_handle(gdh); h.refers_to_a_modern_data_structure()) { + gdh = std::move(h); + done = true; + } + }); + if (done) { + return gdh; + } + return {}; +} +} // namespace detail +std::unique_ptr find_container_info(void const* c) { + return model().find_container_info(c); +} +} // namespace neuron::container::utils diff --git a/src/nrnoc/eion.cpp b/src/nrnoc/eion.cpp index 190c842134..602eac5e58 100644 --- a/src/nrnoc/eion.cpp +++ b/src/nrnoc/eion.cpp @@ -4,11 +4,12 @@ #include #include "section.h" #include "neuron.h" +#include "neuron/cache/mechanism_range.hpp" #include "membfunc.h" #include "parse.hpp" #include "membdef.h" #include "nrniv_mf.h" -#include "nrnunits_modern.h" +#include "nrnunits.h" #include #include @@ -21,7 +22,8 @@ extern Section* nrn_noerr_access(); extern void hoc_register_prop_size(int, int, int); -#define nparm 5 +static constexpr auto nparm = 5; +static constexpr auto ndparam = 1; static const char* mechanism[] = {/*just a template*/ "0", "na_ion", @@ -40,9 +42,9 @@ static DoubScal scdoub[] = {/* just a template*/ static void ion_alloc(Prop*); -static void ion_cur(NrnThread*, Memb_list*, int); +static void ion_cur(neuron::model_sorted_token const&, NrnThread*, Memb_list*, int); -static void ion_init(NrnThread*, Memb_list*, int); +static void ion_init(neuron::model_sorted_token const&, NrnThread*, Memb_list*, int); static int na_ion, k_ion, ca_ion; /* will get type for these special ions */ @@ -174,7 +176,15 @@ void ion_reg(const char* name, double valence) { hoc_symbol_units(hoc_lookup(buf[6].c_str()), "S/cm2"); s = hoc_lookup(buf[0].c_str()); mechtype = nrn_get_mechtype(mechanism[1]); - hoc_register_prop_size(mechtype, nparm, 1); + using neuron::mechanism::field; + neuron::mechanism::register_data_fields(mechtype, + field{buf[1]}, // erev + field{buf[2]}, // conci + field{buf[3]}, // conco + field{buf[5]}, // cur + field{buf[6]}, // dcurdv + field{"iontype", "iontype"}); + hoc_register_prop_size(mechtype, nparm, ndparam); hoc_register_dparam_semantics(mechtype, 0, "iontype"); nrn_writes_conc(mechtype, 1); if (ion_global_map_size <= s->subtype) { @@ -252,12 +262,7 @@ at least one model using this ion\n", } } -#define FARADAY _faraday_[_nrnunit_use_legacy_] -static double _faraday_[2] = {_faraday_codata2018, 96485.309}; -#define gasconstant _gasconstant_[_nrnunit_use_legacy_] -static double _gasconstant_[2] = {_gasconstant_codata2018, 8.3134}; - -#define ktf (1000. * gasconstant * (celsius + 273.15) / FARADAY) +#define ktf (1000. * _gasconstant_codata2018 * (celsius + 273.15) / _faraday_codata2018) double nrn_nernst(double ci, double co, double z) { /*printf("nrn_nernst %g %g %g\n", ci, co, z);*/ if (z == 0) { @@ -272,9 +277,9 @@ double nrn_nernst(double ci, double co, double z) { } } -void nrn_wrote_conc(Symbol* sym, double* pe, int it) { +void nrn_wrote_conc(Symbol* sym, double& erev, double ci, double co, int it) { if (it & 040) { - pe[0] = nrn_nernst(pe[1], pe[2], nrn_ion_charge(sym)); + erev = nrn_nernst(ci, co, nrn_ion_charge(sym)); } } @@ -333,7 +338,7 @@ double nrn_ghk(double v, double ci, double co, double z) { temp = z * v / ktf; eco = co * efun(temp); eci = ci * efun(-temp); - return (.001) * z * FARADAY * (eci - eco); + return (.001) * z * _faraday_codata2018 * (eci - eco); } void ghk(void) { @@ -341,11 +346,12 @@ void ghk(void) { hoc_retpushx(val); } -#define erev pd[i][0] /* From Eion */ -#define conci pd[i][1] -#define conco pd[i][2] -#define cur pd[i][3] -#define dcurdv pd[i][4] +static constexpr auto iontype_index_dparam = 0; +static constexpr auto erev_index = 0; /* From Eion */ +static constexpr auto conci_index = 1; +static constexpr auto conco_index = 2; +static constexpr auto cur_index = 3; +static constexpr auto dcurdv_index = 4; /* handle erev, conci, conc0 "in the right way" according to ion_style @@ -362,17 +368,6 @@ ion_style("name_ion", [c_style, e_style, einit, eadvance, cinit]) and models. */ -#define iontype ppd[i][0].get() /* how _AMBIGUOUS is to be handled */ -/*the bitmap is -03 concentration unused, nrnocCONST, DEP, STATE -04 initialize concentrations -030 reversal potential unused, nrnocCONST, DEP, STATE -040 initialize reversal potential -0100 calc reversal during fadvance -0200 ci being written by a model -0400 co being written by a model -*/ - #define charge global_charge(type) #define conci0 global_conci(type) #define conco0 global_conco(type) @@ -422,7 +417,7 @@ void nrn_check_conc_write(Prop* p_ok, Prop* pion, int i) { } chk_conc_[2 * p_ok->_type + i] |= ion_bit_[pion->_type]; - if (pion->dparam[0].get() & flag) { + if (pion->dparam[iontype_index_dparam].get() & flag) { /* now comes the hard part. Is the possibility in fact actual.*/ for (p = pion->next; p; p = p->next) { if (p == p_ok) { @@ -441,9 +436,9 @@ void nrn_check_conc_write(Prop* p_ok, Prop* pion, int i) { } } } - auto ii = pion->dparam[0].get(); + auto ii = pion->dparam[iontype_index_dparam].get(); ii |= flag; - pion->dparam[0] = ii; + pion->dparam[iontype_index_dparam] = ii; } void ion_style(void) { @@ -461,7 +456,7 @@ void ion_style(void) { p = nrn_mechanism(s->subtype, sec->pnode[0]); oldstyle = -1; if (p) { - oldstyle = p->dparam[0].get(); + oldstyle = p->dparam[iontype_index_dparam].get(); } if (ifarg(2)) { @@ -470,31 +465,15 @@ void ion_style(void) { istyle += 040 * (int) chkarg(4, 0., 1.); /* einit */ istyle += 0100 * (int) chkarg(5, 0., 1.); /* eadvance */ istyle += 04 * (int) chkarg(6, 0., 1.); /* cinit*/ - -#if 0 /* global effect */ - { - int count; - Datum** ppd; - v_setup_vectors(); - count = memb_list[s->subtype].nodecount; - ppd = memb_list[s->subtype].pdata; - for (i=0; i < count; ++i) { - iontype = (iontype&(0200+0400)) + istyle; - } - } -#else /* currently accessed section */ - { - for (i = 0; i < sec->nnode; ++i) { - p = nrn_mechanism(s->subtype, sec->pnode[i]); - if (p) { - auto ii = p->dparam[0].get(); - ii &= (0200 + 0400); - ii += istyle; - p->dparam[0] = ii; - } + for (i = 0; i < sec->nnode; ++i) { + p = nrn_mechanism(s->subtype, sec->pnode[i]); + if (p) { + auto ii = p->dparam[iontype_index_dparam].get(); + ii &= (0200 + 0400); + ii += istyle; + p->dparam[iontype_index_dparam] = ii; } } -#endif } hoc_retpushx((double) oldstyle); } @@ -511,7 +490,7 @@ int nrn_vartype(Symbol* sym) { } p = nrn_mechanism(sym->u.rng.type, sec->pnode[0]); if (p) { - auto it = p->dparam[0].get(); + auto it = p->dparam[iontype_index_dparam].get(); if (sym->u.rng.index == 0) { /* erev */ i = (it & 030) >> 3; /* unused, nrnocCONST, DEP, or STATE */ } else { /* concentration */ @@ -524,10 +503,9 @@ int nrn_vartype(Symbol* sym) { /* the ion mechanism it flag defines how _AMBIGUOUS is to be interpreted */ void nrn_promote(Prop* p, int conc, int rev) { - int oldconc, oldrev; - int it = p->dparam[0].get(); - oldconc = (it & 03); - oldrev = (it & 030) >> 3; + int it = p->dparam[iontype_index_dparam].get(); + int oldconc = (it & 03); + int oldrev = (it & 030) >> 3; /* precedence */ if (oldconc < conc) { oldconc = conc; @@ -550,22 +528,35 @@ void nrn_promote(Prop* p, int conc, int rev) { if (oldconc > 0 && oldrev == 2) { /*einit*/ it += 040; } - p->dparam[0] = it; + p->dparam[iontype_index_dparam] = it; // this sets iontype to 8 } +/*the bitmap is +03 concentration unused, nrnocCONST, DEP, STATE +04 initialize concentrations +030 reversal potential unused, nrnocCONST, DEP, STATE +040 initialize reversal potential +0100 calc reversal during fadvance +0200 ci being written by a model +0400 co being written by a model +*/ + /* Must be called prior to any channels which update the currents */ -static void ion_cur(NrnThread* nt, Memb_list* ml, int type) { - int count = ml->nodecount; - Node** vnode = ml->nodelist; - double** pd = ml->_data; - Datum** ppd = ml->pdata; - int i; +static void ion_cur(neuron::model_sorted_token const& sorted_token, + NrnThread* nt, + Memb_list* ml, + int type) { + neuron::cache::MechanismRange ml_cache{sorted_token, *nt, *ml, type}; + auto const count = ml->nodecount; /*printf("ion_cur %s\n", memb_func[type].sym->name);*/ - for (i = 0; i < count; ++i) { - dcurdv = 0.; - cur = 0.; + for (int i = 0; i < count; ++i) { + ml_cache.fpfield(i) = 0.0; + ml_cache.fpfield(i) = 0.0; + auto const iontype = ml->pdata[i][iontype_index_dparam].get(); if (iontype & 0100) { - erev = nrn_nernst(conci, conco, charge); + ml_cache.fpfield(i) = nrn_nernst(ml_cache.fpfield(i), + ml_cache.fpfield(i), + charge); } }; } @@ -573,56 +564,55 @@ static void ion_cur(NrnThread* nt, Memb_list* ml, int type) { /* Must be called prior to other models which possibly also initialize concentrations based on their own states */ -static void ion_init(NrnThread* nt, Memb_list* ml, int type) { - int count = ml->nodecount; - Node** vnode = ml->nodelist; - double** pd = ml->_data; - Datum** ppd = ml->pdata; +static void ion_init(neuron::model_sorted_token const& sorted_token, + NrnThread* nt, + Memb_list* ml, + int type) { int i; + neuron::cache::MechanismRange ml_cache{sorted_token, *nt, *ml, type}; + int count = ml->nodecount; /*printf("ion_init %s\n", memb_func[type].sym->name);*/ for (i = 0; i < count; ++i) { + auto const iontype = ml->pdata[i][iontype_index_dparam].get(); if (iontype & 04) { - conci = conci0; - conco = conco0; + ml_cache.fpfield(i) = conci0; + ml_cache.fpfield(i) = conco0; } } for (i = 0; i < count; ++i) { + auto const iontype = ml->pdata[i][iontype_index_dparam].get(); if (iontype & 040) { - erev = nrn_nernst(conci, conco, charge); + ml_cache.fpfield(i) = nrn_nernst(ml_cache.fpfield(i), + ml_cache.fpfield(i), + charge); } } } static void ion_alloc(Prop* p) { - double* pd[1]; - int i = 0; - - pd[0] = nrn_prop_data_alloc(p->_type, nparm, p); - p->param_size = nparm; - - cur = 0.; - dcurdv = 0.; + assert(p->param_size() == nparm); + assert(p->param_num_vars() == nparm); + p->param(cur_index) = 0.; + p->param(dcurdv_index) = 0.; if (p->_type == na_ion) { - erev = DEF_ena; - conci = DEF_nai; - conco = DEF_nao; + p->param(erev_index) = DEF_ena; + p->param(conci_index) = DEF_nai; + p->param(conco_index) = DEF_nao; } else if (p->_type == k_ion) { - erev = DEF_ek; - conci = DEF_ki; - conco = DEF_ko; + p->param(erev_index) = DEF_ek; + p->param(conci_index) = DEF_ki; + p->param(conco_index) = DEF_ko; } else if (p->_type == ca_ion) { - erev = DEF_eca; - conci = DEF_cai; - conco = DEF_cao; + p->param(erev_index) = DEF_eca; + p->param(conci_index) = DEF_cai; + p->param(conco_index) = DEF_cao; } else { - erev = DEF_eion; - conci = DEF_ioni; - conco = DEF_iono; + p->param(erev_index) = DEF_eion; + p->param(conci_index) = DEF_ioni; + p->param(conco_index) = DEF_iono; } - p->param = pd[0]; - - p->dparam = nrn_prop_datum_alloc(p->_type, 1, p); - p->dparam[0] = 0; + p->dparam = nrn_prop_datum_alloc(p->_type, ndparam, p); + p->dparam[iontype_index_dparam] = 0; } void second_order_cur(NrnThread* nt) { @@ -630,15 +620,15 @@ void second_order_cur(NrnThread* nt) { NrnThreadMembList* tml; Memb_list* ml; int j, i, i2; -#define c 3 -#define dc 4 + constexpr auto c = 3; + constexpr auto dc = 4; if (secondorder == 2) { for (tml = nt->tml; tml; tml = tml->next) if (memb_func[tml->index].alloc == ion_alloc) { ml = tml->ml; i2 = ml->nodecount; for (i = 0; i < i2; ++i) { - ml->_data[i][c] += ml->_data[i][dc] * (NODERHS(ml->nodelist[i])); + ml->data(i, c) += ml->data(i, dc) * (NODERHS(ml->nodelist[i])); } } } diff --git a/src/nrnoc/extcelln.cpp b/src/nrnoc/extcelln.cpp index fc62e8582c..674e89474d 100644 --- a/src/nrnoc/extcelln.cpp +++ b/src/nrnoc/extcelln.cpp @@ -10,7 +10,6 @@ extern int nrn_use_daspk_; -extern void nrn_delete_prop_pool(int type); #if EXTRACELLULAR @@ -19,21 +18,21 @@ int nrn_nlayer_extracellular = EXTRACELLULAR; /* the N index is a keyword in the following. See init.cpp for implementation.*/ /* if nlayer is changed the symbol table arayinfo->sub[0] must be updated for xraxial, xg, xc, and vext */ -static const char* mechanism[] = { - "0", - "extracellular", - "xraxial[N]", - "xg[N]", - "xc[N]", - "e_extracellular", - 0, +static const char* mechanism[] = {"0", + "extracellular", + "xraxial[N]", + "xg[N]", + "xc[N]", + "e_extracellular", + nullptr, #if I_MEMBRANE - "i_membrane", + "i_membrane", + "sav_g", + "sav_rhs", #endif - 0, - "vext[N]", - 0, -}; + nullptr, + "vext[N]", // not handled with other mech data + nullptr}; static HocParmLimits limits[] = {{"xraxial", {1e-9, 1e15}}, {"xg", {0., 1e15}}, {"xc", {0., 1e15}}, @@ -48,7 +47,7 @@ static HocParmUnits units[] = {{"xraxial", "MOhm/cm"}, {0, 0}}; static void extcell_alloc(Prop*); -static void extcell_init(NrnThread* nt, Memb_list* ml, int type); +static void extcell_init(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type); #if 0 static void printnode(const char* s); #endif @@ -63,29 +62,48 @@ static int _ode_count(int type) { return 0; } +// 4: xraxial[nlayer], xg[nlayer], xc[nlayer], e_extracellular +constexpr static auto nparm = 4 +#if I_MEMBRANE + + 3 // i_membrane, sav_g, and sav_rhs +#endif + ; +// it seems that one past the end (i.e. index nparm) is used to refer to vext, but that is +// allocated/managed separately; see neuron::extracellular::vext_pseudoindex() and its usage + +static void update_parmsize() { + using neuron::mechanism::field; + // clang-format off + neuron::mechanism::register_data_fields(EXTRACELL + , field{"xraxial", nrn_nlayer_extracellular} + , field{"xg", nrn_nlayer_extracellular} + , field{"xc", nrn_nlayer_extracellular} + , field{"e_extracellular"} +#if I_MEMBRANE + , field{"i_membrane"} + , field{"sav_g"} + , field{"sav_rhs"} +#endif + ); + // clang-format on + hoc_register_prop_size(EXTRACELL, nparm, 0); +} extern "C" void extracell_reg_(void) { - int i; - register_mech(mechanism, extcell_alloc, (Pvmi) 0, (Pvmi) 0, (Pvmi) 0, extcell_init, -1, 1); - i = nrn_get_mechtype(mechanism[1]); - hoc_register_cvode(i, _ode_count, 0, 0, 0); + register_mech(mechanism, extcell_alloc, nullptr, nullptr, nullptr, extcell_init, -1, 1); + int const i = nrn_get_mechtype(mechanism[1]); + assert(i == EXTRACELL); + hoc_register_cvode(i, _ode_count, nullptr, nullptr, nullptr); hoc_register_limits(i, limits); hoc_register_units(i, units); + update_parmsize(); } /* solving is done with sparse13 */ /* interface between hoc and extcell */ -#define xraxial pd /* From Eion */ -#define xg (pd + (nlayer)) -#define xc (pd + 2 * (nlayer)) -#define e_extracellular pd[3 * (nlayer)] -#if I_MEMBRANE -#define i_membrane pd[1 + 3 * (nlayer)] -#define sav_g pd[2 + 3 * (nlayer)] -#define sav_rhs pd[3 + 3 * (nlayer)] -#endif +using namespace neuron::extracellular; /* based on update() in fadvance.cpp */ /* update has already been called so modify nd->v based on dvi @@ -97,7 +115,6 @@ void nrn_update_2d(NrnThread* nt) { extern int secondorder; Node *nd, **ndlist; Extnode* nde; - double* pd; double cfac; Memb_list* ml = nt->_ecell_memb_list; if (!ml) { @@ -113,15 +130,15 @@ void nrn_update_2d(NrnThread* nt) { for (il = 0; il < nlayer; ++il) { nde->v[il] += *nde->_rhs[il]; } - NODEV(nd) -= *nde->_rhs[0]; + nd->v() -= *nde->_rhs[0]; } #if I_MEMBRANE for (i = 0; i < cnt; ++i) { - pd = ml->_data[i]; nd = ndlist[i]; NODERHS(nd) -= *nd->extnode->_rhs[0]; - i_membrane = sav_g * (NODERHS(nd)) + sav_rhs; + ml->data(i, i_membrane_index) = ml->data(i, sav_g_index) * (NODERHS(nd)) + + ml->data(i, sav_rhs_index); #if 1 /* i_membrane is a current density (mA/cm2). However it contains contributions from Non-ELECTRODE_CURRENT @@ -141,7 +158,7 @@ void nrn_update_2d(NrnThread* nt) { insert them at the points x=0 or x=1 */ #else - i_membrane *= NODEAREA(nd); + ml->data(i, i_membrane_index) *= NODEAREA(nd); /* i_membrane is nA for every segment. This is different from all other continuous mechanism currents and same as PointProcess currents since it contains @@ -153,55 +170,33 @@ void nrn_update_2d(NrnThread* nt) { #endif } -static int nparm() { /* number of doubles for property data */ - /* 3 are the nlayer size arrays xg, xc, xraxial */ -#if I_MEMBRANE - /* 4 is for e_extracellular, i_membrane, sav_g, and sav_rhs */ - return 3 * (nlayer) + 4; -#else - /* 1 is for e_extracellular */ - return 3 * (nlayer) + 1; -#endif -} - static void extcell_alloc(Prop* p) { - double* pd; - int i; - - pd = nrn_prop_data_alloc(EXTRACELL, nparm(), p); - p->param_size = nparm(); - - for (i = 0; i < nlayer; ++i) { - xraxial[i] = 1.e9; - xg[i] = 1.e9; - xc[i] = 0.; + assert(p->param_size() == (nparm - 3) + 3 * nrn_nlayer_extracellular); + assert(p->param_num_vars() == nparm); + for (auto i = 0; i < nrn_nlayer_extracellular; ++i) { + p->param(xraxial_index, i) = 1e9; + p->param(xg_index, i) = 1e9; + p->param(xc_index, i) = 0.0; } - e_extracellular = 0.; -#if 0 - i_membrane = 0.; - sav_g = 0.; - sav_rhs = 0.; -#endif - p->param = pd; + p->param(e_extracellular_index) = 0.0; } /*ARGSUSED*/ -static void extcell_init(NrnThread* nt, Memb_list* ml, int type) { +static void extcell_init(neuron::model_sorted_token const&, + NrnThread* nt, + Memb_list* ml, + int type) { int ndcount = ml->nodecount; Node** ndlist = ml->nodelist; - double** data = ml->_data; - int i, j; - double* pd; if ((cvode_active_ > 0) && (nrn_use_daspk_ == 0)) { hoc_execerror("Extracellular mechanism only works with fixed step methods and daspk", 0); } - for (i = 0; i < ndcount; ++i) { - for (j = 0; j < nlayer; ++j) { + for (int i = 0; i < ndcount; ++i) { + for (int j = 0; j < nrn_nlayer_extracellular; ++j) { ndlist[i]->extnode->v[j] = 0.; } #if I_MEMBRANE - pd = data[i]; - i_membrane = 0.; + ml->data(i, i_membrane_index) = 0.0; #endif } } @@ -274,9 +269,8 @@ void nlayer_extracellular() { if (nrn_nlayer_extracellular == old) { return; } - check_if_extracellular_in_use(); - nrn_delete_prop_pool(EXTRACELL); + update_parmsize(); /*global nlayer is the new value. Following needs to know the previous */ update_extracellular_reg(old); update_existing_extnode(old); @@ -306,16 +300,20 @@ void extcell_node_create(Node* nd) { Prop* p; /* may be a nnode increase so some may already be allocated */ if (!nd->extnode) { - nde = (Extnode*) ecalloc(1, sizeof(Extnode)); + nde = new Extnode{}; extnode_alloc_elements(nde); nd->extnode = nde; for (j = 0; j < nlayer; ++j) { nde->v[j] = 0.; } - nde->param = (double*) 0; for (p = nd->prop; p; p = p->next) { if (p->_type == EXTRACELL) { - nde->param = p->param; + for (auto i = 0; i < p->param_num_vars(); ++i) { + for (auto array_index = 0; array_index < p->param_array_dimension(i); + ++array_index) { + nde->param.push_back(p->param_handle(i, array_index)); + } + } break; } } @@ -323,23 +321,6 @@ void extcell_node_create(Node* nd) { } } -void nrn_extcell_update_param(void) { - int i; - NrnThread* nt; - FOR_THREADS(nt) { - Memb_list* ml = nt->_ecell_memb_list; - if (ml) { - int cnt = ml->nodecount; - Node** ndlist = ml->nodelist; - for (i = 0; i < cnt; ++i) { - Node* nd = ndlist[i]; - assert(nd->extnode); - nd->extnode->param = ml->_data[i]; - } - } - } -} - void extcell_2d_alloc(Section* sec) { int i, j; Node* nd; @@ -359,7 +340,6 @@ void extcell_2d_alloc(Section* sec) { void nrn_rhs_ext(NrnThread* _nt) { int i, j, cnt; Node *nd, *pnd, **ndlist; - double* pd; Extnode *nde, *pnde; Memb_list* ml = _nt->_ecell_memb_list; if (!ml) { @@ -375,8 +355,7 @@ void nrn_rhs_ext(NrnThread* _nt) { nde = nd->extnode; *nde->_rhs[0] -= NODERHS(nd); #if I_MEMBRANE - pd = ml->_data[i]; - sav_rhs = *nde->_rhs[0]; + ml->data(i, sav_rhs_index) = *nde->_rhs[0]; /* and for daspk this is the ionic current which can be combined later with i_cap before return from solve. */ #endif @@ -387,10 +366,9 @@ void nrn_rhs_ext(NrnThread* _nt) { pnd = _nt->_v_parent[nd->v_node_index]; if (pnd) { pnde = pnd->extnode; - pd = nde->param; /* axial contributions */ if (pnde) { /* parent sec may not be extracellular */ - for (j = 0; j < nlayer; ++j) { + for (j = 0; j < nrn_nlayer_extracellular; ++j) { double dv = pnde->v[j] - nde->v[j]; *nde->_rhs[j] -= nde->_b[j] * dv; *pnde->_rhs[j] += nde->_a[j] * dv; @@ -415,10 +393,11 @@ void nrn_rhs_ext(NrnThread* _nt) { /* series resistance and battery to ground */ /* between nlayer-1 and ground */ - j = nlayer - 1; - *nde->_rhs[j] -= xg[j] * (nde->v[j] - e_extracellular); + j = nrn_nlayer_extracellular - 1; + *nde->_rhs[j] -= *nde->param[xg_index_ext(j)] * + (nde->v[j] - *nde->param[e_extracellular_index_ext()]); for (--j; j >= 0; --j) { /* between j and j+1 layer */ - double x = xg[j] * (nde->v[j] - nde->v[j + 1]); + double x = *nde->param[xg_index_ext(j)] * (nde->v[j] - nde->v[j + 1]); *nde->_rhs[j] -= x; *nde->_rhs[j + 1] += x; } @@ -464,8 +443,7 @@ void nrn_setup_ext(NrnThread* _nt) { *nde->_x12[0] -= d; *nde->_x21[0] -= d; #if I_MEMBRANE - pd = ml->_data[i]; - sav_g = d; + ml->data(i, sav_g_index) = d; #endif } /* series resistance, capacitance, and axial terms. */ @@ -474,14 +452,13 @@ void nrn_setup_ext(NrnThread* _nt) { nde = nd->extnode; pnd = _nt->_v_parent[nd->v_node_index]; if (pnd) { - pd = nde->param; /* series resistance and capacitance to ground */ j = 0; for (;;) { /* between j and j+1 layer */ - mfac = (xg[j] + xc[j] * cfac); + mfac = (*nde->param[xg_index_ext(j)] + *nde->param[xc_index_ext(j)] * cfac); *nde->_d[j] += mfac; ++j; - if (j == nlayer) { + if (j == nrn_nlayer_extracellular) { break; } *nde->_d[j] += mfac; @@ -491,13 +468,11 @@ void nrn_setup_ext(NrnThread* _nt) { pnde = pnd->extnode; /* axial connections */ if (pnde) { /* parent sec may not be extracellular */ - for (j = 0; j < nlayer; ++j) { + for (j = 0; j < nrn_nlayer_extracellular; ++j) { *nde->_d[j] -= nde->_b[j]; *pnde->_d[j] -= nde->_a[j]; - ; *nde->_a_matelm[j] += nde->_a[j]; *nde->_b_matelm[j] += nde->_b[j]; - ; } } } @@ -523,27 +498,25 @@ void ext_con_coef(void) /* setup a and b */ dx = section_length(sec) / ((double) (sec->nnode - 1)); for (j = 0; j < sec->nnode - 1; j++) { nde = sec->pnode[j]->extnode; - pd = nde->param; - for (k = 0; k < nlayer; ++k) { - *nde->_rhs[k] = 1e-4 * xraxial[k] * (dx / 2.); /*Megohms*/ + for (k = 0; k < nrn_nlayer_extracellular; ++k) { + *nde->_rhs[k] = 1e-4 * *nde->param[xraxial_index_ext(k)] * + (dx / 2.); /*Megohms*/ } } /* last segment has 0 length. */ nde = sec->pnode[j]->extnode; - pd = nde->param; - for (k = 0; k < nlayer; ++k) { + for (k = 0; k < nrn_nlayer_extracellular; ++k) { *nde->_rhs[k] = 0.; - xc[k] = 0.; - xg[k] = 0.; + *nde->param[xc_index_ext(k)] = 0.; + *nde->param[xg_index_ext(k)] = 0.; } /* if owns a rootnode */ if (!sec->parentsec) { nde = sec->parentnode->extnode; - pd = nde->param; - for (k = 0; k < nlayer; ++k) { + for (k = 0; k < nrn_nlayer_extracellular; ++k) { *nde->_rhs[k] = 0.; - xc[k] = 0.; - xg[k] = 0.; + *nde->param[xc_index_ext(k)] = 0.; + *nde->param[xg_index_ext(k)] = 0.; } } } diff --git a/src/nrnoc/fadvance.cpp b/src/nrnoc/fadvance.cpp index dda8342cfd..11dc9b89df 100644 --- a/src/nrnoc/fadvance.cpp +++ b/src/nrnoc/fadvance.cpp @@ -50,11 +50,10 @@ extern double nrnmpi_wtime(); extern double* nrn_mech_wtime_; extern double t, dt; extern double chkarg(int, double low, double high); -extern void nrn_fixed_step(); -extern void nrn_fixed_step_group(int); -static void* nrn_fixed_step_thread(NrnThread*); -static void* nrn_fixed_step_group_thread(NrnThread* nth); -extern void nonvint(NrnThread* nt); +static void nrn_fixed_step_thread(neuron::model_sorted_token const&, NrnThread&); +static void nrn_fixed_step_group_thread(neuron::model_sorted_token const&, NrnThread&); +extern void nrn_solve(NrnThread*); +static void nonvint(neuron::model_sorted_token const&, NrnThread&); extern void nrncvode_set_t(double t); static void* nrn_ms_treeset_through_triang(NrnThread*); @@ -74,8 +73,8 @@ extern double hoc_epsilon; #define NONVINT_ODE_COUNT 5 #if NRNCTIME -#define CTBEGIN double wt = nrnmpi_wtime(); -#define CTADD nth->_ctime += nrnmpi_wtime() - wt; +#define CTBEGIN double wt = nrnmpi_wtime() +#define CTADD nth->_ctime += nrnmpi_wtime() - wt #else #define CTBEGIN /**/ #define CTADD /**/ @@ -111,7 +110,7 @@ void nrn_chk_ndt() { /* There are (too) many variants of nrn_fixed_step depending on nrnmpi_numprocs 1 or > 1, nrn_nthread 1 or > 1, -nrnmpi_v_transfer nil or callable, nrn_multisplit_setup nil or callable, +nrnmpi_v_transfer nullptr or callable, nrn_multisplit_setup nullptr or callable, and whether one step with fadvance or possibly many with ParallelContext.psolve before synchronizing with NetParEvent. The combination of simultaneous nrnmpi_numprocs > 1 and @@ -134,7 +133,7 @@ is only done by thread 0. Fixed step and global variable step logic is limited to the case where an nrnmpi_v_transfer requires existence of nrnthread_v_transfer (even if one thread). */ -#if 1 || PARANEURON +#if 1 || NRNMPI void (*nrnmpi_v_transfer_)(); /* called by thread 0 */ void (*nrnthread_v_transfer_)(NrnThread* nt); /* if at least one gap junction has a source voltage with extracellular inserted */ @@ -144,12 +143,12 @@ void (*nrnthread_vi_compute_)(NrnThread* nt); int cvode_active_; int stoprun; -int nrn_use_fast_imem; +bool nrn_use_fast_imem; #define PROFILE 0 #include "profile.h" -void fadvance(void) { +void fadvance() { nrn::Instrumentor::phase p_fadvance("fadvance"); tstopunset; if (cvode_active_) { @@ -167,7 +166,8 @@ void fadvance(void) { if (diam_changed) { recalc_diam(); } - nrn_fixed_step(); + auto const cache_token = nrn_ensure_model_data_are_sorted(); + nrn_fixed_step(cache_token); tstopunset; hoc_retpushx(1.); } @@ -252,6 +252,7 @@ void batch_run(void) /* avoid interpreter overhead */ } batch_open(filename, tstop, tstep, comment); batch_out(); + auto const cache_token = nrn_ensure_model_data_are_sorted(); if (cvode_active_) { while (t < tstop) { cvode_fadvance(t + tstep); @@ -262,7 +263,7 @@ void batch_run(void) /* avoid interpreter overhead */ tstop -= dt / 4.; tnext = t + tstep; while (t < tstop) { - nrn_fixed_step(); + nrn_fixed_step(cache_token); if (t > tnext) { batch_out(); tnext = t + tstep; @@ -294,13 +295,12 @@ static void dt2thread(double adt) { } static int _upd; -static void* daspk_init_step_thread(NrnThread* nt) { - setup_tree_matrix(nt); - nrn_solve(nt); +static void daspk_init_step_thread(neuron::model_sorted_token const& cache_token, NrnThread& nt) { + setup_tree_matrix(cache_token, nt); + nrn_solve(&nt); if (_upd) { - nrn_update_voltage(nt); + nrn_update_voltage(cache_token, nt); } - return nullptr; } void nrn_daspk_init_step(double tt, double dteps, int upd) { @@ -311,16 +311,17 @@ void nrn_daspk_init_step(double tt, double dteps, int upd) { t = tt; secondorder = 0; dt2thread(dteps); - nrn_thread_table_check(); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + nrn_thread_table_check(sorted_token); _upd = upd; - nrn_multithread_job(daspk_init_step_thread); + nrn_multithread_job(sorted_token, daspk_init_step_thread); dt = dtsav; secondorder = so; dt2thread(dtsav); - nrn_thread_table_check(); + nrn_thread_table_check(sorted_token); } -void nrn_fixed_step() { +void nrn_fixed_step(neuron::model_sorted_token const& cache_token) { nrn::Instrumentor::phase p_timestep("timestep"); int i; #if ELIMINATE_T_ROUNDOFF @@ -331,7 +332,7 @@ void nrn_fixed_step() { } else { dt2thread(dt); } - nrn_thread_table_check(); + nrn_thread_table_check(cache_token); if (nrn_multisplit_setup_) { nrn_multithread_job(nrn_ms_treeset_through_triang); // remove to avoid possible deadlock where some ranks do a @@ -346,11 +347,11 @@ void nrn_fixed_step() { nrn::Instrumentor::phase p_gap("gap-v-transfer"); (*nrnmpi_v_transfer_)(); } - nrn_multithread_job(nrn_fixed_step_lastpart); + nrn_multithread_job(cache_token, nrn_fixed_step_lastpart); } //} } else { - nrn_multithread_job(nrn_fixed_step_thread); + nrn_multithread_job(cache_token, nrn_fixed_step_thread); /* if there is no nrnthread_v_transfer then there cannot be a nrnmpi_v_transfer and lastpart will be done in above call. @@ -360,7 +361,7 @@ void nrn_fixed_step() { nrn::Instrumentor::phase p_gap("gap-v-transfer"); (*nrnmpi_v_transfer_)(); } - nrn_multithread_job(nrn_fixed_step_lastpart); + nrn_multithread_job(cache_token, nrn_fixed_step_lastpart); } } t = nrn_threads[0]._t; @@ -376,13 +377,13 @@ static int step_group_n; static int step_group_begin; static int step_group_end; -void nrn_fixed_step_group(int n) { +void nrn_fixed_step_group(neuron::model_sorted_token const& cache_token, int n) { int i; #if ELIMINATE_T_ROUNDOFF nrn_chk_ndt(); #endif dt2thread(dt); - nrn_thread_table_check(); + nrn_thread_table_check(cache_token); if (nrn_multisplit_setup_) { int b = 0; nrn_multithread_job(nrn_ms_treeset_through_triang); @@ -420,7 +421,7 @@ void nrn_fixed_step_group(int n) { step_group_end = 0; while (step_group_end < step_group_n) { /*printf("step_group_end=%d step_group_n=%d\n", step_group_end, step_group_n);*/ - nrn_multithread_job(nrn_fixed_step_group_thread); + nrn_multithread_job(cache_token, nrn_fixed_step_group_thread); if (nrn_allthread_handle) { (*nrn_allthread_handle)(); } @@ -433,43 +434,44 @@ void nrn_fixed_step_group(int n) { t = nrn_threads[0]._t; } -void* nrn_fixed_step_group_thread(NrnThread* nth) { +static void nrn_fixed_step_group_thread(neuron::model_sorted_token const& cache_token, + NrnThread& nt) { + auto* const nth = &nt; int i; nth->_stop_stepping = 0; for (i = step_group_begin; i < step_group_n; ++i) { nrn::Instrumentor::phase p_timestep("timestep"); - nrn_fixed_step_thread(nth); + nrn_fixed_step_thread(cache_token, nt); if (nth->_stop_stepping) { if (nth->id == 0) { step_group_end = i + 1; } nth->_stop_stepping = 0; - return nullptr; + return; } } if (nth->id == 0) { step_group_end = step_group_n; } - return nullptr; } -void* nrn_fixed_step_thread(NrnThread* nth) { - double wt; +static void nrn_fixed_step_thread(neuron::model_sorted_token const& cache_token, NrnThread& nt) { + auto* const nth = &nt; { nrn::Instrumentor::phase p("deliver-events"); deliver_net_events(nth); } - wt = nrnmpi_wtime(); + CTBEGIN; nrn_random_play(); #if ELIMINATE_T_ROUNDOFF - nth->nrn_ndt_ += .5; - nth->_t = nrn_tbase_ + nth->nrn_ndt_ * nrn_dt_; + nt.nrn_ndt_ += .5; + nt._t = nrn_tbase_ + nt.nrn_ndt_ * nrn_dt_; #else - nth->_t += .5 * nth->_dt; + nt._t += .5 * nt._dt; #endif fixed_play_continuous(nth); - setup_tree_matrix(nth); + setup_tree_matrix(cache_token, nt); { nrn::Instrumentor::phase p("matrix-solver"); nrn_solve(nth); @@ -480,23 +482,23 @@ void* nrn_fixed_step_thread(NrnThread* nth) { } { nrn::Instrumentor::phase p("update"); - nrn_update_voltage(nth); + nrn_update_voltage(cache_token, *nth); } - CTADD + CTADD; /* To simplify the logic, if there is no nrnthread_v_transfer then there cannot be an nrnmpi_v_transfer. */ if (!nrnthread_v_transfer_) { - nrn_fixed_step_lastpart(nth); + nrn_fixed_step_lastpart(cache_token, nt); } - return nullptr; } extern void nrn_extra_scatter_gather(int direction, int tid); -void* nrn_fixed_step_lastpart(NrnThread* nth) { - CTBEGIN +void nrn_fixed_step_lastpart(neuron::model_sorted_token const& cache_token, NrnThread& nt) { + auto* const nth = &nt; + CTBEGIN; #if ELIMINATE_T_ROUNDOFF nth->nrn_ndt_ += .5; nth->_t = nrn_tbase_ + nth->nrn_ndt_ * nrn_dt_; @@ -505,22 +507,21 @@ void* nrn_fixed_step_lastpart(NrnThread* nth) { #endif fixed_play_continuous(nth); nrn_extra_scatter_gather(0, nth->id); - nonvint(nth); - nrn_ba(nth, AFTER_SOLVE); - fixed_record_continuous(nth); - CTADD { + nonvint(cache_token, nt); + nrn_ba(cache_token, nt, AFTER_SOLVE); + fixed_record_continuous(cache_token, nt); + CTADD; + { nrn::Instrumentor::phase p("deliver-events"); nrn_deliver_events(nth); /* up to but not past texit */ } - return nullptr; } /* nrn_fixed_step_thread is split into three pieces */ void* nrn_ms_treeset_through_triang(NrnThread* nth) { - double wt; deliver_net_events(nth); - wt = nrnmpi_wtime(); + CTBEGIN; nrn_random_play(); #if ELIMINATE_T_ROUNDOFF nth->nrn_ndt_ += .5; @@ -529,9 +530,9 @@ void* nrn_ms_treeset_through_triang(NrnThread* nth) { nth->_t += .5 * nth->_dt; #endif fixed_play_continuous(nth); - setup_tree_matrix(nth); + setup_tree_matrix(nrn_ensure_model_data_are_sorted(), *nth); nrn_multisplit_triang(nth); - CTADD + CTADD; return nullptr; } void* nrn_ms_reduce_solve(NrnThread* nth) { @@ -539,14 +540,15 @@ void* nrn_ms_reduce_solve(NrnThread* nth) { return nullptr; } void* nrn_ms_bksub(NrnThread* nth) { - CTBEGIN + CTBEGIN; nrn_multisplit_bksub(nth); second_order_cur(nth); - nrn_update_voltage(nth); - CTADD + auto const cache_token = nrn_ensure_model_data_are_sorted(); + nrn_update_voltage(cache_token, *nth); + CTADD; /* see above comment in nrn_fixed_step_thread */ if (!nrnthread_v_transfer_) { - nrn_fixed_step_lastpart(nth); + nrn_fixed_step_lastpart(cache_token, *nth); } return nullptr; } @@ -564,38 +566,26 @@ void* nrn_ms_bksub_through_triang(NrnThread* nth) { } -void nrn_update_voltage(NrnThread* _nt) { +void nrn_update_voltage(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { + auto* const vec_rhs = nt.node_rhs_storage(); + auto* const vec_v = nt.node_voltage_storage(); + auto* const _nt = &nt; int i, i1, i2; i1 = 0; i2 = _nt->end; -#if CACHEVEC - if (use_cachevec) { - /* do not need to worry about linmod or extracellular*/ - if (secondorder) { - for (i = i1; i < i2; ++i) { - VEC_V(i) += 2. * VEC_RHS(i); - } - } else { - for (i = i1; i < i2; ++i) { - VEC_V(i) += VEC_RHS(i); - } + /* do not need to worry about linmod or extracellular*/ + if (secondorder) { + for (i = i1; i < i2; ++i) { + vec_v[i] += 2. * vec_rhs[i]; } - } else -#endif - { /* use original non-vectorized update */ - if (secondorder) { - for (i = i1; i < i2; ++i) { - NODEV(_nt->_v_node[i]) += 2. * NODERHS(_nt->_v_node[i]); - } - } else { - for (i = i1; i < i2; ++i) { - NODEV(_nt->_v_node[i]) += NODERHS(_nt->_v_node[i]); - } - if (use_sparse13) { - nrndae_update(); - } + } else { + for (i = i1; i < i2; ++i) { + vec_v[i] += vec_rhs[i]; } - } /* end of non-vectorized update */ + } + if (use_sparse13) { + nrndae_update(_nt); + } #if EXTRACELLULAR nrn_update_2d(_nt); @@ -606,7 +596,7 @@ void nrn_update_voltage(NrnThread* _nt) { #if I_MEMBRANE if (_nt->tml) { assert(_nt->tml->index == CAP); - nrn_capacity_current(_nt, _nt->tml->ml); + nrn_capacity_current(sorted_token, _nt, _nt->tml->ml); } #endif if (nrn_use_fast_imem) { @@ -615,20 +605,16 @@ void nrn_update_voltage(NrnThread* _nt) { } void nrn_calc_fast_imem(NrnThread* _nt) { - int i; - int i1 = 0; - int i3 = _nt->end; - double* pd = _nt->_nrn_fast_imem->_nrn_sav_d; - double* prhs = _nt->_nrn_fast_imem->_nrn_sav_rhs; - if (use_cachevec) { - for (i = i1; i < i3; ++i) { - prhs[i] = (pd[i] * VEC_RHS(i) + prhs[i]) * VEC_AREA(i) * 0.01; - } - } else { - for (i = i1; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - prhs[i] = (pd[i] * NODERHS(nd) + prhs[i]) * NODEAREA(nd) * 0.01; - } + constexpr int i1 = 0; + auto const i3 = _nt->end; + auto const vec_area = _nt->node_area_storage(); + auto const vec_rhs = _nt->node_rhs_storage(); + auto const vec_sav_d = _nt->node_sav_d_storage(); + auto const vec_sav_rhs = _nt->node_sav_rhs_storage(); + assert(vec_sav_d); + assert(vec_sav_rhs); + for (int i = i1; i < i3; ++i) { + vec_sav_rhs[i] = (vec_sav_d[i] * vec_rhs[i] + vec_sav_rhs[i]) * vec_area[i] * 0.01; } } @@ -642,19 +628,13 @@ void nrn_calc_fast_imem_fixedstep_init(NrnThread* _nt) { // Warning: Have not thought deeply about extracellular or LinearMechanism. // But there is a good chance things are ok. But needs testing. // I don't believe this is used by Cvode or IDA. - int i; - int i1 = 0; + constexpr auto i1 = 0; int i3 = _nt->end; - double* prhs = _nt->_nrn_fast_imem->_nrn_sav_rhs; - if (use_cachevec) { - for (i = i1; i < i3; ++i) { - prhs[i] = (VEC_RHS(i) + prhs[i]) * VEC_AREA(i) * 0.01; - } - } else { - for (i = i1; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - prhs[i] = (NODERHS(nd) + prhs[i]) * NODEAREA(nd) * 0.01; - } + auto const vec_area = _nt->node_area_storage(); + auto const vec_rhs = _nt->node_rhs_storage(); + auto const vec_sav_rhs = _nt->node_sav_rhs_storage(); + for (int i = i1; i < i3; ++i) { + vec_sav_rhs[i] = (vec_rhs[i] + vec_sav_rhs[i]) * vec_area[i] * 0.01; } } @@ -671,9 +651,10 @@ void fcurrent(void) { } dt2thread(-1.); - nrn_thread_table_check(); + auto const sorted_token = nrn_ensure_model_data_are_sorted(); + nrn_thread_table_check(sorted_token); state_discon_allowed_ = 0; - nrn_multithread_job(setup_tree_matrix); + nrn_multithread_job(sorted_token, setup_tree_matrix); state_discon_allowed_ = 1; hoc_retpushx(1.); } @@ -691,7 +672,7 @@ void nrn_print_matrix(NrnThread* _nt) { int i, n = spGetSize(_nt->_sp13mat, 0); spPrint(_nt->_sp13mat, 1, 1, 1); for (i = 1; i <= n; ++i) { - Printf("%d %g\n", i, _nt->_actual_rhs[i]); + Printf("%d %g\n", i, _nt->actual_rhs(i)); } } } else if (_nt) { @@ -699,8 +680,8 @@ void nrn_print_matrix(NrnThread* _nt) { nd = _nt->_v_node[inode]; Printf("%d %g %g %g %g\n", inode, - ClassicalNODEB(nd), - ClassicalNODEA(nd), + *nrn_classicalNodeB(nd), + *nrn_classicalNodeA(nd), NODED(nd), NODERHS(nd)); } @@ -712,8 +693,8 @@ void nrn_print_matrix(NrnThread* _nt) { Printf("%d %d %g %g %g %g\n", isec, inode, - ClassicalNODEB(nd), - ClassicalNODEA(nd), + *nrn_classicalNodeB(nd), + *nrn_classicalNodeA(nd), NODED(nd), NODERHS(nd)); } @@ -753,51 +734,38 @@ void fmatrix(void) { return; } -void nonvint(NrnThread* _nt) { - int i = 0; - double w; - int measure = 0; - NrnThreadMembList* tml; -#if 1 || PARANEURON +static void nonvint(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { /* nrnmpi_v_transfer if needed was done earlier */ if (nrnthread_v_transfer_) { nrn::Instrumentor::phase p_gap("gap-v-transfer"); - (*nrnthread_v_transfer_)(_nt); + nrnthread_v_transfer_(&nt); } -#endif nrn::Instrumentor::phase_begin("state-update"); - if (_nt->id == 0 && nrn_mech_wtime_) { - measure = 1; - } + bool const measure{nt.id == 0 && nrn_mech_wtime_}; errno = 0; - for (tml = _nt->tml; tml; tml = tml->next) + for (auto* tml = nt.tml; tml; tml = tml->next) { if (memb_func[tml->index].state) { std::string mechname("state-"); mechname += memb_func[tml->index].sym->name; + auto const w = measure ? nrnmpi_wtime() : -1.0; nrn::Instrumentor::phase_begin(mechname.c_str()); - Pvmi s = memb_func[tml->index].state; + memb_func[tml->index].state(sorted_token, &nt, tml->ml, tml->index); nrn::Instrumentor::phase_end(mechname.c_str()); - if (measure) { - w = nrnmpi_wtime(); - } - (*s)(_nt, tml->ml, tml->index); if (measure) { nrn_mech_wtime_[tml->index] += nrnmpi_wtime() - w; } - if (errno) { - if (nrn_errno_check(i)) { - hoc_warning("errno set during calculation of states", (char*) 0); - } + if (errno && nrn_errno_check(0)) { + hoc_warning("errno set during calculation of states", nullptr); } } - long_difus_solve(0, _nt); /* if any longitudinal diffusion */ - nrn_nonvint_block_fixed_step_solve(_nt->id); + } + long_difus_solve(sorted_token, 0, nt); /* if any longitudinal diffusion */ + nrn_nonvint_block_fixed_step_solve(nt.id); nrn::Instrumentor::phase_end("state-update"); } int nrn_errno_check(int i) { - int ierr; - ierr = hoc_errno_check(); + int ierr = hoc_errno_check(); if (ierr) { fprintf(stderr, "%d errno=%d at t=%g during call to mechanism %s\n", @@ -815,7 +783,7 @@ void frecord_init(void) { /* useful when changing states after an finitialize() nrn_record_init(); if (!cvode_active_) { for (i = 0; i < nrn_nthread; ++i) { - fixed_record_continuous(nrn_threads + i); + fixed_record_continuous(nrn_ensure_model_data_are_sorted(), nrn_threads[i]); } } hoc_retpushx(1.); @@ -844,6 +812,8 @@ void nrn_finitialize(int setv, double v) { nrn::Instrumentor::phase_begin("finitialize"); nrn_fihexec(3); /* model structure changes can be made */ verify_structure(); + // Is this the right place to call this? + auto const sorted_token = nrn_ensure_model_data_are_sorted(); #if ELIMINATE_T_ROUNDOFF nrn_ndt_ = 0.; nrn_dt_ = dt; @@ -855,7 +825,7 @@ void nrn_finitialize(int setv, double v) { if (cvode_active_) { nrncvode_set_t(t); } - nrn_thread_table_check(); + nrn_thread_table_check(sorted_token); clear_event_queue(); nrn_spike_exchange_init(); nrn_random_play(); @@ -864,12 +834,12 @@ void nrn_finitialize(int setv, double v) { nrn_deliver_events(nrn_threads + i); /* The play events at t=0 */ } if (setv) { - FOR_THREADS(_nt) - for (i = 0; i < _nt->end; ++i) { - NODEV(_nt->_v_node[i]) = v; + FOR_THREADS(_nt) { + auto const vec_v = _nt->node_voltage_storage(); + std::fill_n(vec_v, _nt->end, v); } } -#if 1 || PARANEURON +#if 1 || NRNMPI if (nrnthread_vi_compute_) FOR_THREADS(_nt) { (*nrnthread_vi_compute_)(_nt); @@ -887,7 +857,7 @@ void nrn_finitialize(int setv, double v) { #endif nrn_fihexec(0); /* after v is set but before INITIAL blocks are called*/ for (i = 0; i < nrn_nthread; ++i) { - nrn_ba(nrn_threads + i, BEFORE_INITIAL); + nrn_ba(sorted_token, nrn_threads[i], BEFORE_INITIAL); } /* the INITIAL blocks are ordered so that mechanisms that write concentrations are after ions and before mechanisms that read @@ -901,7 +871,7 @@ void nrn_finitialize(int setv, double v) { NrnThreadMembList* tml; for (tml = nt->tml; tml; tml = tml->next) { if (memb_func[tml->index].has_initialize()) { - memb_func[tml->index].invoke_initialize(nt, tml->ml, tml->index); + memb_func[tml->index].invoke_initialize(sorted_token, nt, tml->ml, tml->index); } } } @@ -912,7 +882,10 @@ void nrn_finitialize(int setv, double v) { if (nrn_is_artificial_[i]) if (memb_func[i].has_initialize()) { if (memb_list[i].nodecount) { - memb_func[i].invoke_initialize(nrn_threads, memb_list + i, i); + // initialize all artificial cells in all threads at once + auto& ml = memb_list[i]; + ml.set_storage_offset(0); + memb_func[i].invoke_initialize(sorted_token, nrn_threads, &ml, i); } if (errno) { if (nrn_errno_check(i)) { @@ -927,7 +900,7 @@ void nrn_finitialize(int setv, double v) { init_net_events(); for (i = 0; i < nrn_nthread; ++i) { - nrn_ba(nrn_threads + i, AFTER_INITIAL); + nrn_ba(sorted_token, nrn_threads[i], AFTER_INITIAL); } nrn_fihexec(1); /* after INITIAL blocks, before fcurrent*/ @@ -940,7 +913,7 @@ void nrn_finitialize(int setv, double v) { } else { state_discon_allowed_ = 0; for (i = 0; i < nrn_nthread; ++i) { - setup_tree_matrix(nrn_threads + i); + setup_tree_matrix(sorted_token, nrn_threads[i]); if (nrn_use_fast_imem) { nrn_calc_fast_imem_fixedstep_init(nrn_threads + i); } @@ -948,7 +921,7 @@ void nrn_finitialize(int setv, double v) { state_discon_allowed_ = 1; nrn_record_init(); for (i = 0; i < nrn_nthread; ++i) { - fixed_record_continuous(nrn_threads + i); + fixed_record_continuous(sorted_token, nrn_threads[i]); } } for (i = 0; i < nrn_nthread; ++i) { @@ -1014,12 +987,13 @@ void batch_save(void) { hoc_retpushx(1.); } -void nrn_ba(NrnThread* nt, int bat) { - for (NrnThreadBAList* tbl = nt->tbl[bat]; tbl; tbl = tbl->next) { +void nrn_ba(neuron::model_sorted_token const& cache_token, NrnThread& nt, int bat) { + for (NrnThreadBAList* tbl = nt.tbl[bat]; tbl; tbl = tbl->next) { nrn_bamech_t const f{tbl->bam->f}; Memb_list* const ml{tbl->ml}; + // TODO move this loop into the translated MOD file code for (int i = 0; i < ml->nodecount; ++i) { - (*f)(ml->nodelist[i], ml->_data[i], ml->pdata[i], ml->_thread, nt); + f(ml->nodelist[i], ml->pdata[i], ml->_thread, &nt, ml, i, cache_token); } } } @@ -1083,3 +1057,16 @@ int nrn_nonvint_block_helper(int method, int size, double* pd1, double* pd2, int } return rval; } + +// nrn_ensure_model_data_are_sorted_opaque() can be used in circumstances where +// neuron:model_sorted_token const& is a forward ref and nrn_ensure_model_data_are_sorted() cannot +// be used +namespace neuron { +opaque_model_sorted_token::opaque_model_sorted_token(model_sorted_token&& token) + : m_ptr{std::make_unique(std::move(token))} {} +opaque_model_sorted_token::~opaque_model_sorted_token() {} +} // namespace neuron + +neuron::opaque_model_sorted_token nrn_ensure_model_data_are_sorted_opaque() { + return nrn_ensure_model_data_are_sorted(); +} diff --git a/src/nrnoc/gui-redirect.h b/src/nrnoc/gui-redirect.h index 3d2ed635c2..e2eba69b28 100644 --- a/src/nrnoc/gui-redirect.h +++ b/src/nrnoc/gui-redirect.h @@ -2,116 +2,92 @@ #define gui_redirect_h #include "hocdec.h" +#include "nrnpy.h" extern Object* nrn_get_gui_redirect_obj(); -extern Object** (*nrnpy_gui_helper_)(const char*, Object*); -extern double (*nrnpy_object_to_double_)(Object*); -#define TRY_GUI_REDIRECT_OBJ(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper_(name, (Object*) obj); \ - if (ngh_result) { \ - return (void*) *ngh_result; \ - } \ - } \ +#define TRY_GUI_REDIRECT_OBJ(name, obj) \ + if (auto* const ngh_result = \ + neuron::python::methods.try_gui_helper(name, static_cast(obj))) { \ + return static_cast(*ngh_result); \ } -#define TRY_GUI_REDIRECT_METHOD_ACTUAL_DOUBLE(name, sym, v) \ - { \ - Object** guiredirect_result = NULL; \ - if (nrnpy_gui_helper_) { \ - Object* obj = nrn_get_gui_redirect_obj(); \ - guiredirect_result = nrnpy_gui_helper_(name, obj); \ - if (guiredirect_result) { \ - return (nrnpy_object_to_double_(*guiredirect_result)); \ - } \ - } \ +#define TRY_GUI_REDIRECT_METHOD_ACTUAL_DOUBLE(name, sym, v) \ + { \ + Object** guiredirect_result = NULL; \ + if (neuron::python::methods.gui_helper) { \ + Object* obj = nrn_get_gui_redirect_obj(); \ + guiredirect_result = neuron::python::methods.gui_helper(name, obj); \ + if (guiredirect_result) { \ + return neuron::python::methods.object_to_double(*guiredirect_result); \ + } \ + } \ } -#define TRY_GUI_REDIRECT_METHOD_ACTUAL_OBJ(name, sym, v) \ - { \ - Object** guiredirect_result = NULL; \ - if (nrnpy_gui_helper_) { \ - Object* obj = nrn_get_gui_redirect_obj(); \ - guiredirect_result = nrnpy_gui_helper_(name, obj); \ - if (guiredirect_result) { \ - return (guiredirect_result); \ - } \ - } \ +#define TRY_GUI_REDIRECT_METHOD_ACTUAL_OBJ(name, sym, v) \ + { \ + Object** guiredirect_result = NULL; \ + if (neuron::python::methods.gui_helper) { \ + Object* obj = nrn_get_gui_redirect_obj(); \ + guiredirect_result = neuron::python::methods.gui_helper(name, obj); \ + if (guiredirect_result) { \ + return (guiredirect_result); \ + } \ + } \ } -#define TRY_GUI_REDIRECT_NO_RETURN(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper_(name, (Object*) obj); \ - if (ngh_result) { \ - return; \ - } \ - } \ +#define TRY_GUI_REDIRECT_NO_RETURN(name, obj) \ + if (auto* const ngh_result = \ + neuron::python::methods.try_gui_helper(name, static_cast(obj)); \ + ngh_result) { \ + return; \ } -#define TRY_GUI_REDIRECT_DOUBLE(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper_(name, (Object*) obj); \ - if (ngh_result) { \ - hoc_ret(); \ - hoc_pushx(nrnpy_object_to_double_(*ngh_result)); \ - return; \ - } \ - } \ +#define TRY_GUI_REDIRECT_DOUBLE(name, obj) \ + if (auto* const ngh_result = \ + neuron::python::methods.try_gui_helper(name, static_cast(obj))) { \ + hoc_ret(); \ + hoc_pushx(neuron::python::methods.object_to_double(*ngh_result)); \ + return; \ } -#define TRY_GUI_REDIRECT_ACTUAL_DOUBLE(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper_(name, (Object*) obj); \ - if (ngh_result) { \ - return (nrnpy_object_to_double_(*ngh_result)); \ - } \ - } \ +#define TRY_GUI_REDIRECT_ACTUAL_DOUBLE(name, obj) \ + if (auto* const ngh_result = \ + neuron::python::methods.try_gui_helper(name, static_cast(obj))) { \ + return neuron::python::methods.object_to_double(*ngh_result); \ } -#define TRY_GUI_REDIRECT_ACTUAL_STR(name, obj) \ - { \ - char** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper3_str_(name, (Object*) obj, 0); \ - if (ngh_result) { \ - return ((const char**) ngh_result); \ - } \ - } \ +#define TRY_GUI_REDIRECT_ACTUAL_STR(name, obj) \ + { \ + char** ngh_result; \ + if (neuron::python::methods.gui_helper3_str) { \ + ngh_result = \ + neuron::python::methods.gui_helper3_str(name, static_cast(obj), 0); \ + if (ngh_result) { \ + return ((const char**) ngh_result); \ + } \ + } \ } -#define TRY_GUI_REDIRECT_ACTUAL_OBJ(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper_(name, (Object*) obj); \ - if (ngh_result) { \ - return ngh_result; \ - } \ - } \ +#define TRY_GUI_REDIRECT_ACTUAL_OBJ(name, obj) \ + if (auto* const ngh_result = \ + neuron::python::methods.try_gui_helper(name, static_cast(obj))) { \ + return ngh_result; \ } -#define TRY_GUI_REDIRECT_DOUBLE_SEND_STRREF(name, obj) \ - { \ - Object** ngh_result; \ - if (nrnpy_gui_helper_) { \ - ngh_result = nrnpy_gui_helper3_(name, (Object*) obj, 1); \ - if (ngh_result) { \ - hoc_ret(); \ - hoc_pushx(nrnpy_object_to_double_(*ngh_result)); \ - return; \ - } \ - } \ +#define TRY_GUI_REDIRECT_DOUBLE_SEND_STRREF(name, obj) \ + { \ + Object** ngh_result; \ + if (neuron::python::methods.gui_helper3) { \ + ngh_result = neuron::python::methods.gui_helper3(name, static_cast(obj), 1); \ + if (ngh_result) { \ + hoc_ret(); \ + hoc_pushx(neuron::python::methods.object_to_double(*ngh_result)); \ + return; \ + } \ + } \ } #endif diff --git a/src/nrnoc/hocprax.cpp b/src/nrnoc/hocprax.cpp index ff27c441a5..fcd6dee1b4 100644 --- a/src/nrnoc/hocprax.cpp +++ b/src/nrnoc/hocprax.cpp @@ -38,6 +38,7 @@ pval = pval_praxis(i, Vector) #include #include "hocdec.h" +#include "nrnpy.h" #include "parse.hpp" #include "scoplib.h" @@ -57,7 +58,6 @@ at return of previous prax call */ static long int nvar; -double (*nrnpy_praxis_efun)(Object* pycallable, Object* hvec); static Object* efun_py; static Object* efun_py_arg; static void* vec_py_save; @@ -113,7 +113,7 @@ void fit_praxis(void) { fmin = 0.; if (hoc_is_object_arg(1)) { - assert(nrnpy_praxis_efun); + assert(neuron::python::methods.praxis_efun); efun_py_ = *hoc_objgetarg(1); hoc_obj_ref(efun_py_); efun_py_arg_ = *vector_pobj(vector_arg(2)); @@ -259,7 +259,7 @@ static double efun(double* v, long int n) { for (i = 0; i < n; ++i) { px[i] = v[i]; } - err = nrnpy_praxis_efun(efun_py, efun_py_arg); + err = neuron::python::methods.praxis_efun(efun_py, efun_py_arg); for (i = 0; i < n; ++i) { v[i] = px[i]; } diff --git a/src/nrnoc/init.cpp b/src/nrnoc/init.cpp index b0deaa0e59..0b09f07af4 100644 --- a/src/nrnoc/init.cpp +++ b/src/nrnoc/init.cpp @@ -1,21 +1,27 @@ #include <../../nrnconf.h> #include #include "nrn_ansi.h" +#include "nrncore_write/io/nrncore_io.h" #include "oc_ansi.h" #include #include #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include "section.h" #include "parse.hpp" #include "nrniv_mf.h" #include "cabvars.h" #include "neuron.h" +#include "neuron/container/data_handle.hpp" #include "membdef.h" #include "multicore.h" #include "nrnmpi.h" +#include +#include /* change this to correspond to the ../nmodl/nocpout nmodl_version_ string*/ static char nmodl_version_[] = "7.7.0"; @@ -43,7 +49,7 @@ extern int nrn_noauto_dlopen_nrnmech; /* default 0 declared in hoc_init.cpp */ // error message hint with regard to mismatched arch void nrn_possible_mismatched_arch(const char* libname) { - if (strncmp(NRNHOSTCPU, "arm64", 5) == 0) { + if (neuron::config::system_processor == "arm64") { // what arch are we running on #if __arm64__ const char* we_are{"arm64"}; @@ -132,8 +138,8 @@ extern Symlist* nrn_load_dll_called_; extern int nrn_load_dll_recover_error(); extern void nrn_load_name_check(const char* name); static int memb_func_size_; -Memb_func* memb_func; -Memb_list* memb_list; +std::vector memb_func; +std::vector memb_list; short* memb_order_; Symbol** pointsym; Point_process** point_process; @@ -154,38 +160,37 @@ int* nrn_prop_dparam_size_; int* nrn_dparam_ptr_start_; int* nrn_dparam_ptr_end_; NrnWatchAllocateFunc_t* nrn_watch_allocate_; +std::unordered_map nrn_mech_inst_destruct; void hoc_reg_watch_allocate(int type, NrnWatchAllocateFunc_t waf) { nrn_watch_allocate_[type] = waf; } -// also for read -using bbcore_write_t = void (*)(double*, int*, int*, int*, double*, Datum*, Datum*, NrnThread*); bbcore_write_t* nrn_bbcore_write_; bbcore_write_t* nrn_bbcore_read_; -void hoc_reg_bbcore_write(int type, bbcore_write_t f) { - nrn_bbcore_write_[type] = f; +void hoc_reg_bbcore_write(int mechtype, bbcore_write_t f) { + nrn_bbcore_write_[mechtype] = f; } -void hoc_reg_bbcore_read(int type, bbcore_write_t f) { - nrn_bbcore_read_[type] = f; +void hoc_reg_bbcore_read(int mechtype, bbcore_write_t f) { + nrn_bbcore_read_[mechtype] = f; } const char** nrn_nmodl_text_; -void hoc_reg_nmodl_text(int type, const char* txt) { - nrn_nmodl_text_[type] = txt; +void hoc_reg_nmodl_text(int mechtype, const char* txt) { + nrn_nmodl_text_[mechtype] = txt; } const char** nrn_nmodl_filename_; -void hoc_reg_nmodl_filename(int type, const char* filename) { - nrn_nmodl_filename_[type] = filename; +void hoc_reg_nmodl_filename(int mechtype, const char* filename) { + nrn_nmodl_filename_[mechtype] = filename; } -void add_nrn_has_net_event(int type) { +void add_nrn_has_net_event(int mechtype) { ++nrn_has_net_event_cnt_; nrn_has_net_event_ = (int*) erealloc(nrn_has_net_event_, nrn_has_net_event_cnt_ * sizeof(int)); - nrn_has_net_event_[nrn_has_net_event_cnt_ - 1] = type; + nrn_has_net_event_[nrn_has_net_event_cnt_ - 1] = mechtype; } /* values are type numbers of mechanisms which have FOR_NETCONS statement */ @@ -205,9 +210,9 @@ void add_nrn_fornetcons(int type, int indx) { short* nrn_is_artificial_; short* nrn_artcell_qindex_; -void add_nrn_artcell(int type, int qi) { - nrn_is_artificial_[type] = 1; - nrn_artcell_qindex_[type] = qi; +void add_nrn_artcell(int mechtype, int qi) { + nrn_is_artificial_[mechtype] = 1; + nrn_artcell_qindex_[mechtype] = qi; } int nrn_is_artificial(int pnttype) { @@ -323,9 +328,9 @@ void hoc_last_init(void) { Fprintf(stderr, "%s\n", banner); IGNORE(fflush(stderr)); } - memb_func_size_ = 30; - memb_func = (Memb_func*) ecalloc(memb_func_size_, sizeof(Memb_func)); - memb_list = (Memb_list*) ecalloc(memb_func_size_, sizeof(Memb_list)); + memb_func_size_ = 30; // initial allocation size + memb_list.reserve(memb_func_size_); + memb_func.resize(memb_func_size_); // we directly resize because it is used below pointsym = (Symbol**) ecalloc(memb_func_size_, sizeof(Symbol*)); point_process = (Point_process**) ecalloc(memb_func_size_, sizeof(Point_process*)); pnt_map = static_cast(ecalloc(memb_func_size_, sizeof(char))); @@ -377,14 +382,15 @@ void hoc_last_init(void) { } SectionList_reg(); SectionRef_reg(); - register_mech(morph_mech, morph_alloc, (Pvmi) 0, (Pvmi) 0, (Pvmi) 0, (Pvmi) 0, -1, 0); + register_mech(morph_mech, morph_alloc, nullptr, nullptr, nullptr, nullptr, -1, 0); + neuron::mechanism::register_data_fields(MORPHOLOGY, neuron::mechanism::field{"diam"}); hoc_register_prop_size(MORPHOLOGY, 1, 0); for (m = mechanism; *m; m++) { (*m)(); } -#if !MAC && !defined(WIN32) +#if !defined(WIN32) modl_reg(); -#endif // not MAC and not WIN32 +#endif // not WIN32 hoc_register_limits(0, _hoc_parm_limits); hoc_register_units(0, _hoc_parm_units); #if defined(WIN32) || defined(NRNMECH_DLL_STYLE) @@ -449,26 +455,157 @@ void initnrn(void) { static int pointtype = 1; /* starts at 1 since 0 means not point in pnt_map*/ int n_memb_func; + +void reallocate_mech_data(int mechtype); +void initialize_memb_func(int mechtype, + nrn_cur_t cur, + nrn_jacob_t jacob, + Pvmp alloc, + nrn_state_t stat, + nrn_init_t initialize, + int vectorized); +void check_mech_version(const char** m); +int count_variables_in_mechanism(const char** m2, int modltypemax); +void register_mech_vars(const char** var_buffers, + int modltypemax, + Symbol* mech_symbol, + int mechtype, + int nrnpointerindex); + /* if vectorized then thread_data_size added to it */ void nrn_register_mech_common(const char** m, Pvmp alloc, - Pvmi cur, - Pvmi jacob, - Pvmi stat, - Pvmi initialize, + nrn_cur_t cur, + nrn_jacob_t jacob, + nrn_state_t stat, + nrn_init_t initialize, int nrnpointerindex, /* if -1 then there are none */ int vectorized) { - static int type = 2; /* 0 unused, 1 for cable section */ - int j, k, modltype, pindx, modltypemax; - Symbol* s; + // initialize at first entry, it will be incremented at exit of the function + static int mechtype = 2; /* 0 unused, 1 for cable section */ + int modltype; + int modltypemax; + Symbol* mech_symbol; const char** m2; nrn_load_name_check(m[1]); - if (type >= memb_func_size_) { + reallocate_mech_data(mechtype); + + initialize_memb_func(mechtype, cur, jacob, alloc, stat, initialize, vectorized); + + check_mech_version(m); + + mech_symbol = hoc_install(m[1], MECHANISM, 0.0, &hoc_symlist); + mech_symbol->subtype = mechtype; + memb_func[mechtype].sym = mech_symbol; + m2 = m + 2; + if (nrnpointerindex == -1) { + modltypemax = STATE; + } else { + modltypemax = NRNPOINTER; + } + int nvars = count_variables_in_mechanism(m2, modltypemax); + mech_symbol->s_varn = nvars; + mech_symbol->u.ppsym = (Symbol**) emalloc((unsigned) (nvars * sizeof(Symbol*))); + + register_mech_vars(m2, modltypemax, mech_symbol, mechtype, nrnpointerindex); + ++mechtype; + n_memb_func = mechtype; + // n_memb_func has changed, so any existing NrnThread do not know about the + // new mechanism + v_structure_change = 1; +} + +void register_mech_vars(const char** var_buffers, + int modltypemax, + Symbol* mech_symbol, + int mechtype, + int nrnpointerindex) { + /* this is set up for the possiblility of overloading range variables. + We are currently not allowing this. Hence the #if. + If never overloaded then no reason for list of symbols for each mechanism. + */ + /* the indexing is confusing because k refers to index in the range indx list + and j refers to index in mechanism list which has 0 elements to separate + nrnocCONST, DEPENDENT, and STATE */ + /* variable pointers added on at end, if they exist */ + /* allowing range variable arrays. Must extract dimension info from name[%d]*/ + /* pindx refers to index into the p-array */ + int pindx = 0; + int modltype; + int j, k; + for (j = 0, k = 0, modltype = nrnocCONST; modltype <= modltypemax; modltype++, j++) { + for (; var_buffers[j]; j++, k++) { + Symbol* var_symbol; + std::string varname(var_buffers[j]); // copy out the varname to allow modifying it + int index = 1; + unsigned nsub = 0; + auto subscript = varname.find('['); + if (subscript != varname.npos) { +#if EXTRACELLULAR + if (varname[subscript + 1] == 'N') { + index = nlayer; + } else +#endif // EXTRACELLULAR + { + index = std::stoi(varname.substr(subscript + 1)); + } + nsub = 1; + varname.erase(subscript); + } + if ((var_symbol = hoc_lookup(varname.c_str()))) { + IGNORE(fprintf(stderr, CHKmes, varname.c_str())); + } else { + var_symbol = hoc_install(varname.c_str(), RANGEVAR, 0.0, &hoc_symlist); + var_symbol->subtype = modltype; + var_symbol->u.rng.type = mechtype; + var_symbol->cpublic = 1; + if (modltype == NRNPOINTER) { /* not in p array */ + var_symbol->u.rng.index = nrnpointerindex; + } else { + var_symbol->u.rng.index = pindx; + } + if (nsub) { + var_symbol->arayinfo = (Arrayinfo*) emalloc(sizeof(Arrayinfo) + + nsub * sizeof(int)); + var_symbol->arayinfo->a_varn = nullptr; + var_symbol->arayinfo->refcount = 1; + var_symbol->arayinfo->nsub = nsub; + var_symbol->arayinfo->sub[0] = index; + } + if (modltype == NRNPOINTER) { + if (nrn_dparam_ptr_end_[mechtype] == 0) { + nrn_dparam_ptr_start_[mechtype] = nrnpointerindex; + } + nrnpointerindex += index; + nrn_dparam_ptr_end_[mechtype] = nrnpointerindex; + } else { + pindx += index; + } + } + mech_symbol->u.ppsym[k] = var_symbol; + } + } +} + +int count_variables_in_mechanism(const char** m2, int modltypemax) { + int j; + int modltype; + int nvars; + // count the number of variables registered in this mechanism + for (j = 0, nvars = 0, modltype = nrnocCONST; modltype <= modltypemax; modltype++) { + // while we have not encountered a 0 (sentinel for variable type) + while (m2[j++]) { + nvars++; + } + } + return nvars; +} + +void reallocate_mech_data(int mechtype) { + if (mechtype >= memb_func_size_) { memb_func_size_ += 20; - memb_func = (Memb_func*) erealloc(memb_func, memb_func_size_ * sizeof(Memb_func)); - memb_list = (Memb_list*) erealloc(memb_list, memb_func_size_ * sizeof(Memb_list)); pointsym = (Symbol**) erealloc(pointsym, memb_func_size_ * sizeof(Symbol*)); point_process = (Point_process**) erealloc(point_process, memb_func_size_ * sizeof(Point_process*)); @@ -502,7 +639,7 @@ void nrn_register_mech_common(const char** m, nrn_watch_allocate_ = (NrnWatchAllocateFunc_t*) erealloc(nrn_watch_allocate_, memb_func_size_ * sizeof(NrnWatchAllocateFunc_t)); - for (j = memb_func_size_ - 20; j < memb_func_size_; ++j) { + for (int j = memb_func_size_ - 20; j < memb_func_size_; ++j) { pnt_map[j] = 0; point_process[j] = (Point_process*) 0; pointsym[j] = (Symbol*) 0; @@ -521,41 +658,52 @@ void nrn_register_mech_common(const char** m, } nrn_mk_prop_pools(memb_func_size_); } +} + +void initialize_memb_func(int mechtype, + nrn_cur_t cur, + nrn_jacob_t jacob, + Pvmp alloc, + nrn_state_t stat, + nrn_init_t initialize, + int vectorized) { + assert(mechtype >= memb_list.size()); + memb_list.resize(mechtype + 1); + memb_func.resize(mechtype + 1); + nrn_prop_param_size_[mechtype] = 0; /* fill in later */ + nrn_prop_dparam_size_[mechtype] = 0; /* fill in later */ + nrn_dparam_ptr_start_[mechtype] = 0; /* fill in later */ + nrn_dparam_ptr_end_[mechtype] = 0; /* fill in later */ + memb_func[mechtype].current = cur; + memb_func[mechtype].jacob = jacob; + memb_func[mechtype].alloc = alloc; + memb_func[mechtype].state = stat; + memb_func[mechtype].set_initialize(initialize); + memb_func[mechtype].destructor = nullptr; + memb_func[mechtype].vectorized = vectorized ? 1 : 0; + memb_func[mechtype].thread_size_ = vectorized ? (vectorized - 1) : 0; + memb_func[mechtype].thread_mem_init_ = nullptr; + memb_func[mechtype].thread_cleanup_ = nullptr; + memb_func[mechtype].thread_table_check_ = nullptr; + memb_func[mechtype].is_point = 0; + memb_func[mechtype].hoc_mech = nullptr; + memb_func[mechtype].setdata_ = nullptr; + memb_func[mechtype].dparam_semantics = nullptr; + memb_order_[mechtype] = mechtype; + memb_func[mechtype].ode_count = nullptr; + memb_func[mechtype].ode_map = nullptr; + memb_func[mechtype].ode_spec = nullptr; + memb_func[mechtype].ode_matsol = nullptr; + memb_func[mechtype].ode_synonym = nullptr; + memb_func[mechtype].singchan_ = nullptr; +} - nrn_prop_param_size_[type] = 0; /* fill in later */ - nrn_prop_dparam_size_[type] = 0; /* fill in later */ - nrn_dparam_ptr_start_[type] = 0; /* fill in later */ - nrn_dparam_ptr_end_[type] = 0; /* fill in later */ - memb_func[type].current = cur; - memb_func[type].jacob = jacob; - memb_func[type].alloc = alloc; - memb_func[type].state = stat; - memb_func[type].set_initialize(initialize); - memb_func[type].destructor = nullptr; - memb_func[type].vectorized = vectorized ? 1 : 0; - memb_func[type].thread_size_ = vectorized ? (vectorized - 1) : 0; - memb_func[type].thread_mem_init_ = nullptr; - memb_func[type].thread_cleanup_ = nullptr; - memb_func[type].thread_table_check_ = nullptr; - memb_func[type]._update_ion_pointers = nullptr; - memb_func[type].is_point = 0; - memb_func[type].hoc_mech = nullptr; - memb_func[type].setdata_ = nullptr; - memb_func[type].dparam_semantics = (int*) 0; - memb_list[type].nodecount = 0; - memb_list[type]._thread = (Datum*) 0; - memb_order_[type] = type; - memb_func[type].ode_count = nullptr; - memb_func[type].ode_map = nullptr; - memb_func[type].ode_spec = nullptr; - memb_func[type].ode_matsol = nullptr; - memb_func[type].ode_synonym = nullptr; - memb_func[type].singchan_ = nullptr; +void check_mech_version(const char** m) { /* as of 5.2 nmodl translates so that the version string - is the first string in m. This allows the neuron application - to determine if nmodl c files are compatible with this version - Note that internal mechanisms have a version of "0" and are - by nature consistent. + is the first string in m. This allows the neuron application + to determine if nmodl c files are compatible with this version + Note that internal mechanisms have a version of "0" and are + by nature consistent. */ /*printf("%s %s\n", m[0], m[1]);*/ @@ -582,200 +730,249 @@ It's version %s \"c\" code is incompatible with this neuron version.\n", nrn_exit(1); } } - - s = hoc_install(m[1], MECHANISM, 0.0, &hoc_symlist); - s->subtype = type; - memb_func[type].sym = s; - /* printf("%s type=%d\n", s->name, type);*/ - m2 = m + 2; - if (nrnpointerindex == -1) { - modltypemax = STATE; - } else { - modltypemax = NRNPOINTER; - } - for (k = 0, j = 0, modltype = nrnocCONST; modltype <= modltypemax; modltype++, j++) { - /*EMPTY*/ - for (; m2[j]; j++, k++) { - ; - } - } - s->s_varn = k; - s->u.ppsym = (Symbol**) emalloc((unsigned) (j * sizeof(Symbol*))); - /* this is set up for the possiblility of overloading range variables. - We are currently not allowing this. Hence the #if. - If never overloaded then no reason for list of symbols for each mechanism. - */ - /* the indexing is confusing because k refers to index in the range indx list - and j refers to index in mechanism list which has 0 elements to separate - nrnocCONST, DEPENDENT, and STATE */ - /* variable pointers added on at end, if they exist */ - /* allowing range variable arrays. Must extract dimension info from name[%d]*/ - /* pindx refers to index into the p-array */ - pindx = 0; - for (j = 0, k = 0, modltype = nrnocCONST; modltype <= modltypemax; modltype++, j++) { - for (; m2[j]; j++, k++) { - Symbol* s2; - char buf[200], *cp; - int indx; - unsigned nsub = 0; - strcpy(buf, m2[j]); /* not allowed to change constant string */ - indx = 1; - cp = strchr(buf, '['); - if (cp) { -#if EXTRACELLULAR - if (cp[1] == 'N') { - indx = nlayer; - } else -#endif // EXTRACELLULAR - { - sscanf(cp + 1, "%d", &indx); - } - nsub = 1; - *cp = '\0'; - } - /*SUPPRESS 624*/ - if ((s2 = hoc_lookup(buf))) { -#if 0 - if (s2->subtype != RANGEVAR) { - IGNORE(fprintf(stderr, CHKmes, - buf)); - } -#else // not 0 - IGNORE(fprintf(stderr, CHKmes, buf)); -#endif // not 0 - } else { - s2 = hoc_install(buf, RANGEVAR, 0.0, &hoc_symlist); - s2->subtype = modltype; - s2->u.rng.type = type; - s2->cpublic = 1; - if (modltype == NRNPOINTER) { /* not in p array */ - s2->u.rng.index = nrnpointerindex; - } else { - s2->u.rng.index = pindx; - } - if (nsub) { - s2->arayinfo = (Arrayinfo*) emalloc(sizeof(Arrayinfo) + nsub * sizeof(int)); - s2->arayinfo->a_varn = (unsigned*) 0; - s2->arayinfo->refcount = 1; - s2->arayinfo->nsub = nsub; - s2->arayinfo->sub[0] = indx; - } - if (modltype == NRNPOINTER) { - if (nrn_dparam_ptr_end_[type] == 0) { - nrn_dparam_ptr_start_[type] = nrnpointerindex; - } - nrnpointerindex += indx; - nrn_dparam_ptr_end_[type] = nrnpointerindex; - } else { - pindx += indx; - } - } - s->u.ppsym[k] = s2; - } - } - ++type; - n_memb_func = type; } void register_mech(const char** m, Pvmp alloc, - Pvmi cur, - Pvmi jacob, - Pvmi stat, - Pvmi initialize, + nrn_cur_t cur, + nrn_jacob_t jacob, + nrn_state_t stat, + nrn_init_t initialize, int nrnpointerindex, /* if -1 then there are none */ int vectorized) { - int type = n_memb_func; + int mechtype = n_memb_func; nrn_register_mech_common(m, alloc, cur, jacob, stat, initialize, nrnpointerindex, vectorized); if (nrnpy_reg_mech_p_) { - (*nrnpy_reg_mech_p_)(type); + (*nrnpy_reg_mech_p_)(mechtype); } } -void nrn_writes_conc(int type, int unused) { +void nrn_writes_conc(int mechtype, int unused) { static int lastion = EXTRACELL + 1; int i; for (i = n_memb_func - 2; i >= lastion; --i) { memb_order_[i + 1] = memb_order_[i]; } - memb_order_[lastion] = type; + memb_order_[lastion] = mechtype; #if 0 - printf("%s reordered from %d to %d\n", memb_func[type].sym->name, type, lastion); + printf("%s reordered from %d to %d\n", memb_func[mechtype].sym->name, mechtype, lastion); #endif // 0 - if (nrn_is_ion(type)) { + if (nrn_is_ion(mechtype)) { ++lastion; } } -void hoc_register_prop_size(int type, int psize, int dpsize) { - nrn_prop_param_size_[type] = psize; - nrn_prop_dparam_size_[type] = dpsize; - if (memb_func[type].dparam_semantics) { - free(memb_func[type].dparam_semantics); - memb_func[type].dparam_semantics = (int*) 0; - } - if (dpsize) { - memb_func[type].dparam_semantics = (int*) ecalloc(dpsize, sizeof(int)); +namespace { +/** + * @brief Translate a dparam semantic string to integer form. + * + * This logic used to live inside hoc_register_dparam_semantics. + */ + +// name to int map for the negative types +// xx_ion and #xx_ion will get values of type and type+1000 respectively +static std::unordered_map name_to_negint = {{"area", -1}, + {"iontype", -2}, + {"cvodeieq", -3}, + {"netsend", -4}, + {"pointer", -5}, + {"pntproc", -6}, + {"bbcorepointer", -7}, + {"watch", -8}, + {"diam", -9}, + {"fornetcon", -10}, + {"random", -11}}; + +int dparam_semantics_to_int(std::string_view name) { + if (auto got = name_to_negint.find(std::string{name}); got != name_to_negint.end()) { + return got->second; + } else { + bool const i{name[0] == '#'}; + Symbol* s = hoc_lookup(std::string{name.substr(i)}.c_str()); + if (s && s->type == MECHANISM) { + return s->subtype + i * 1000; + } + throw std::runtime_error("unknown dparam semantics: " + std::string{name}); } } -void hoc_register_dparam_semantics(int type, int ix, const char* name) { - /* only interested in area, iontype, cvode_ieq, - netsend, pointer, pntproc, bbcorepointer, watch, diam, - fornetcon, - xx_ion and #xx_ion which will get - a semantics value of -1, -2, -3, - -4, -5, -6, -7, -8, -9, -10 - type, and type+1000 respectively - */ - if (strcmp(name, "area") == 0) { - memb_func[type].dparam_semantics[ix] = -1; - } else if (strcmp(name, "iontype") == 0) { - memb_func[type].dparam_semantics[ix] = -2; - } else if (strcmp(name, "cvodeieq") == 0) { - memb_func[type].dparam_semantics[ix] = -3; - } else if (strcmp(name, "netsend") == 0) { - memb_func[type].dparam_semantics[ix] = -4; - } else if (strcmp(name, "pointer") == 0) { - memb_func[type].dparam_semantics[ix] = -5; - } else if (strcmp(name, "pntproc") == 0) { - memb_func[type].dparam_semantics[ix] = -6; - } else if (strcmp(name, "bbcorepointer") == 0) { - memb_func[type].dparam_semantics[ix] = -7; - } else if (strcmp(name, "watch") == 0) { - memb_func[type].dparam_semantics[ix] = -8; - } else if (strcmp(name, "diam") == 0) { - memb_func[type].dparam_semantics[ix] = -9; - } else if (strcmp(name, "fornetcon") == 0) { - memb_func[type].dparam_semantics[ix] = -10; - } else { - int i = 0; - if (name[0] == '#') { - i = 1; + +std::vector indices_of_type( + const char* semantic_type, + std::vector> const& dparam_info) { + std::vector indices{}; + int inttype = dparam_semantics_to_int(std::string{semantic_type}); + for (auto i = 0; i < dparam_info.size(); ++i) { + if (dparam_semantics_to_int(dparam_info[i].second) == inttype) { + indices.push_back(i); } - Symbol* s = hoc_lookup(name + i); - if (s && s->type == MECHANISM) { - memb_func[type].dparam_semantics[ix] = s->subtype + i * 1000; + } + return indices; +} + +static std::unordered_map> mech_random_indices{}; + +void update_mech_ppsym_for_modlrandom( + int mechtype, + std::vector> const& dparam_info) { + std::vector indices = indices_of_type("random", dparam_info); + mech_random_indices[mechtype] = indices; + if (indices.empty()) { + return; + } + Symbol* mechsym = memb_func[mechtype].sym; + int is_point = memb_func[mechtype].is_point; + + int k = mechsym->s_varn; + mechsym->s_varn += int(indices.size()); + mechsym->u.ppsym = (Symbol**) erealloc(mechsym->u.ppsym, mechsym->s_varn * sizeof(Symbol*)); + + + for (auto i: indices) { + auto& p = dparam_info[i]; + Symbol* ransym{}; + if (is_point) { + ransym = hoc_install(p.first, RANGEOBJ, 0.0, &(nrn_pnt_template_[mechtype]->symtable)); } else { - fprintf(stderr, - "mechanism %s : unknown semantics for %s\n", - memb_func[type].sym->name, - name); - assert(0); + std::string s{p.first}; + s += "_"; + s += mechsym->name; + ransym = hoc_install(s.c_str(), RANGEOBJ, 0.0, &hoc_symlist); } + ransym->subtype = NMODLRANDOM; + ransym->u.rng.type = mechtype; + ransym->cpublic = 1; + ransym->u.rng.index = i; + mechsym->u.ppsym[k++] = ransym; } -#if 0 - printf("dparam semantics %s ix=%d %s %d\n", memb_func[type].sym->name, - ix, name, memb_func[type].dparam_semantics[ix]); -#endif // 0 } -void hoc_register_cvode(int i, nrn_ode_count_t cnt, nrn_ode_map_t map, Pvmi spec, Pvmi matsol) { +} // namespace + +namespace neuron::mechanism::detail { +void register_data_fields(int mechtype, + std::vector> const& param_info, + std::vector> const& dparam_info) { + nrn_prop_param_size_[mechtype] = param_info.size(); + nrn_prop_dparam_size_[mechtype] = dparam_info.size(); + delete[] std::exchange(memb_func[mechtype].dparam_semantics, nullptr); + if (!dparam_info.empty()) { + memb_func[mechtype].dparam_semantics = new int[dparam_info.size()]; + for (auto i = 0; i < dparam_info.size(); ++i) { + // dparam_info[i].first is the name of the variable, currently unused... + memb_func[mechtype].dparam_semantics[i] = dparam_semantics_to_int( + dparam_info[i].second); + } + } + // Translate param_info into the type we want to use internally now we're fully inside NEURON + // library code (wheels...) + std::vector param_info_new{}; + std::transform(param_info.begin(), + param_info.end(), + std::back_inserter(param_info_new), + [](auto const& old) -> container::Mechanism::Variable { + return {old.first, old.second}; + }); + // Create a per-mechanism data structure as part of the top-level + // neuron::model() structure. + auto& model = neuron::model(); + model.delete_mechanism(mechtype); // e.g. extracellular can call hoc_register_prop_size + // multiple times + auto& mech_data = model.add_mechanism(mechtype, + memb_func[mechtype].sym->name, // the mechanism name + std::move(param_info_new)); // names and array dimensions + // of double-valued + // per-instance variables + memb_list[mechtype].set_storage_pointer(&mech_data); + update_mech_ppsym_for_modlrandom(mechtype, dparam_info); +} +} // namespace neuron::mechanism::detail +namespace neuron::mechanism { +template <> +int const* get_array_dims(int mech_type) { + if (mech_type < 0) { + return nullptr; + } + return neuron::model() + .mechanism_data(mech_type) + .get_array_dims(); +} +template <> +double* const* get_data_ptrs(int mech_type) { + if (mech_type < 0) { + return nullptr; + } + return neuron::model() + .mechanism_data(mech_type) + .get_data_ptrs(); +} +template <> +int get_field_count(int mech_type) { + if (mech_type < 0) { + return -1; + } + return neuron::model() + .mechanism_data(mech_type) + .get_tag() + .num_variables(); +} +} // namespace neuron::mechanism + +/** + * @brief Support mechanism FUNCTION/PROCEDURE python syntax seg.mech.f() + * + * Python (density) mechanism registration uses nrn_mechs2func_map to + * create a per mechanism map of f members that can be called directly + * without prior call to setmech. + */ +void hoc_register_npy_direct(int mechtype, NPyDirectMechFunc* f) { + auto& fmap = nrn_mech2funcs_map[mechtype] = {}; + for (int i = 0; f[i].name; ++i) { + fmap[f[i].name] = &f[i]; + } +} +std::unordered_map nrn_mech2funcs_map; + +/** + * @brief Legacy way of registering mechanism data/pdata size. + * + * Superseded by neuron::mechanism::register_data_fields. + */ +void hoc_register_prop_size(int mechtype, int psize, int dpsize) { + assert(nrn_prop_param_size_[mechtype] == psize); + assert(nrn_prop_dparam_size_[mechtype] == dpsize); +} + +/** + * @brief Legacy way of registering pdata semantics. + * + * Superseded by neuron::mechanism::register_data_fields. + */ +void hoc_register_dparam_semantics(int mechtype, int ix, const char* name) { + assert(memb_func[mechtype].dparam_semantics[ix] == dparam_semantics_to_int(name)); +} + +int nrn_dparam_semantics_to_int(const char* name) { + return dparam_semantics_to_int(name); +} + +/** + * @brief dparam indices with random semantics for mechtype + */ +std::vector& nrn_mech_random_indices(int type) { + return mech_random_indices[type]; +} + +void hoc_register_cvode(int i, + nrn_ode_count_t cnt, + nrn_ode_map_t map, + nrn_ode_spec_t spec, + nrn_ode_matsol_t matsol) { memb_func[i].ode_count = cnt; memb_func[i].ode_map = map; memb_func[i].ode_spec = spec; memb_func[i].ode_matsol = matsol; } -void hoc_register_synonym(int i, void (*syn)(int, double**, Datum**)) { +void hoc_register_synonym(int i, nrn_ode_synonym_t syn) { memb_func[i].ode_synonym = syn; } @@ -794,21 +991,21 @@ int point_reg_helper(Symbol* s2) { return pointtype++; } -extern void class2oc(const char*, - void* (*cons)(Object*), - void (*destruct)(void*), - Member_func*, - int (*checkpoint)(void**), - Member_ret_obj_func*, - Member_ret_str_func*); +extern void class2oc_base(const char*, + void* (*cons)(Object*), + void (*destruct)(void*), + Member_func*, + int (*checkpoint)(void**), + Member_ret_obj_func*, + Member_ret_str_func*); int point_register_mech(const char** m, Pvmp alloc, - Pvmi cur, - Pvmi jacob, - Pvmi stat, - Pvmi initialize, + nrn_cur_t cur, + nrn_jacob_t jacob, + nrn_state_t stat, + nrn_init_t initialize, int nrnpointerindex, int vectorized, @@ -818,7 +1015,7 @@ int point_register_mech(const char** m, Symlist* sl; Symbol *s, *s2; nrn_load_name_check(m[1]); - class2oc(m[1], constructor, destructor, fmember, nullptr, nullptr, nullptr); + class2oc_base(m[1], constructor, destructor, fmember, nullptr, nullptr, nullptr); s = hoc_lookup(m[1]); sl = hoc_symlist; hoc_symlist = s->u.ctemplate->symtable; @@ -844,7 +1041,6 @@ double* makevector(int nrows) int _ninits; -#if 1 void _modl_set_dt(double newdt) { dt = newdt; nrn_threads->_dt = newdt; @@ -855,11 +1051,6 @@ void _modl_set_dt_thread(double newdt, NrnThread* nt) { double _modl_get_dt_thread(NrnThread* nt) { return nt->_dt; } -#endif // 1 - -int nrn_pointing(double* pd) { - return pd ? 1 : 0; -} int state_discon_flag_ = 0; void state_discontinuity(int i, double* pd, double d) { @@ -869,14 +1060,14 @@ void state_discontinuity(int i, double* pd, double d) { } } -void hoc_register_limits(int type, HocParmLimits* limits) { +void hoc_register_limits(int mechtype, HocParmLimits* limits) { int i; Symbol* sym; for (i = 0; limits[i].name; ++i) { sym = (Symbol*) 0; - if (type && memb_func[type].is_point) { + if (mechtype && memb_func[mechtype].is_point) { Symbol* t; - t = hoc_lookup(memb_func[type].sym->name); + t = hoc_lookup(memb_func[mechtype].sym->name); sym = hoc_table_lookup(limits[i].name, t->u.ctemplate->symtable); } if (!sym) { @@ -886,14 +1077,14 @@ void hoc_register_limits(int type, HocParmLimits* limits) { } } -void hoc_register_units(int type, HocParmUnits* units) { +void hoc_register_units(int mechtype, HocParmUnits* units) { int i; Symbol* sym; for (i = 0; units[i].name; ++i) { sym = (Symbol*) 0; - if (type && memb_func[type].is_point) { + if (mechtype && memb_func[mechtype].is_point) { Symbol* t; - t = hoc_lookup(memb_func[type].sym->name); + t = hoc_lookup(memb_func[mechtype].sym->name); sym = hoc_table_lookup(units[i].name, t->u.ctemplate->symtable); } if (!sym) { @@ -952,16 +1143,14 @@ void _cvode_abstol(Symbol** s, double* tol, int i) { } } -extern Node** node_construct(int); - -void hoc_register_tolerance(int type, HocStateTolerance* tol, Symbol*** stol) { +void hoc_register_tolerance(int mechtype, HocStateTolerance* tol, Symbol*** stol) { int i; Symbol* sym; - /*printf("register tolerance for %s\n", memb_func[type].sym->name);*/ + /*printf("register tolerance for %s\n", memb_func[mechtype].sym->name);*/ for (i = 0; tol[i].name; ++i) { - if (memb_func[type].is_point) { + if (memb_func[mechtype].is_point) { Symbol* t; - t = hoc_lookup(memb_func[type].sym->name); + t = hoc_lookup(memb_func[mechtype].sym->name); sym = hoc_table_lookup(tol[i].name, t->u.ctemplate->symtable); } else { sym = hoc_lookup(tol[i].name); @@ -969,55 +1158,55 @@ void hoc_register_tolerance(int type, HocStateTolerance* tol, Symbol*** stol) { hoc_symbol_tolerance(sym, tol[i].tolerance); } - if (memb_func[type].ode_count) { - Symbol **psym, *msym, *vsym; - double** pv; - Node** pnode; - Prop* p; - int i, j, k, n, na, index = 0; - - n = (*memb_func[type].ode_count)(type); - if (n > 0) { - psym = (Symbol**) ecalloc(n, sizeof(Symbol*)); - pv = (double**) ecalloc(2 * n, sizeof(double*)); - pnode = node_construct(1); - prop_alloc(&(pnode[0]->prop), MORPHOLOGY, pnode[0]); /* in case we need diam */ - p = prop_alloc(&(pnode[0]->prop), type, pnode[0]); /* this and any ions */ - (*memb_func[type].ode_map)(0, pv, pv + n, p->param, p->dparam, (double*) 0, type); - for (i = 0; i < n; ++i) { - for (p = pnode[0]->prop; p; p = p->next) { - if (pv[i] >= p->param && pv[i] < (p->param + p->param_size)) { - index = pv[i] - p->param; - break; + if (memb_func[mechtype].ode_count) { + if (auto const n = memb_func[mechtype].ode_count(mechtype); n > 0) { + auto* const psym = new Symbol* [n] {}; + Node node{}; // dummy node + node.sec_node_index_ = 0; + prop_alloc(&(node.prop), MORPHOLOGY, &node); /* in case we need diam */ + auto* p = prop_alloc(&(node.prop), mechtype, &node); /* this and any ions */ + // Fill `pv` with pointers to `2*n` parameters inside `p` + std::vector> pv(2 * n); + memb_func[mechtype].ode_map(p, 0, pv.data(), pv.data() + n, nullptr, mechtype); + // The first n elements of `pv` are "pv", the second n are "pvdot" + for (int i = 0; i < n; ++i) { + // `index` is the legacy index of `pv[i]` inside mechanism instance `p` + auto const [p, index] = [&h = pv[i]](Prop* p) { + for (; p; p = p->next) { + int legacy_index{}; + auto const num_params = p->param_num_vars(); + for (auto i_param = 0; i_param < num_params; ++i_param) { + auto const array_dim = p->param_array_dimension(i_param); + for (auto j = 0; j < array_dim; ++j, ++legacy_index) { + if (h == p->param_handle(i_param, j)) { + return std::make_pair(p, legacy_index); + } + } + } } - } - - /* p is the prop and index is the index - into the p->param array */ - assert(p); + std::ostringstream oss; + oss << "could not find " << h << " starting from " << *p; + throw std::runtime_error(oss.str()); + }(node.prop); /* need to find symbol for this */ - msym = memb_func[p->_type].sym; - for (j = 0; j < msym->s_varn; ++j) { - vsym = msym->u.ppsym[j]; + auto* msym = memb_func[p->_type].sym; + for (int j = 0; j < msym->s_varn; ++j) { + auto* vsym = msym->u.ppsym[j]; if (vsym->type == RANGEVAR && vsym->u.rng.index == index) { psym[i] = vsym; /*printf("identified %s at index %d of %s\n", vsym->name, index, * msym->name);*/ if (ISARRAY(vsym)) { - na = vsym->arayinfo->sub[0]; - for (k = 1; k < na; ++k) { + int const na = vsym->arayinfo->sub[0]; + for (int k = 1; k < na; ++k) { psym[++i] = vsym; } } break; } } - assert(j < msym->s_varn); } - - node_destruct(pnode, 1); *stol = psym; - free(pv); } } } @@ -1027,12 +1216,10 @@ void _nrn_thread_reg(int i, int cons, void (*f)(Datum*)) { memb_func[i].thread_mem_init_ = f; } else if (cons == 0) { memb_func[i].thread_cleanup_ = f; - } else if (cons == 2) { - memb_func[i]._update_ion_pointers = f; } } -void _nrn_thread_table_reg(int i, void (*f)(double*, Datum*, Datum*, NrnThread*, int)) { +void _nrn_thread_table_reg(int i, nrn_thread_table_check_t f) { memb_func[i].thread_table_check_ = f; } @@ -1040,8 +1227,8 @@ void _nrn_setdata_reg(int i, void (*call)(Prop*)) { memb_func[i].setdata_ = call; } /* there is some question about the _extcall_thread variables, if any. */ -double nrn_call_mech_func(Symbol* s, int narg, Prop* p, int type) { - void (*call)(Prop*) = memb_func[type].setdata_; +double nrn_call_mech_func(Symbol* s, int narg, Prop* p, int mechtype) { + void (*call)(Prop*) = memb_func[mechtype].setdata_; if (call) { (*call)(p); } @@ -1049,9 +1236,15 @@ double nrn_call_mech_func(Symbol* s, int narg, Prop* p, int type) { } void nrnunit_use_legacy() { + hoc_warning("nrnunit_use_legacy() is deprecated as only modern units are supported.", + "If you want to still use legacy unit you can use a version of nrn < 9."); if (ifarg(1)) { int arg = (int) chkarg(1, 0, 1); - _nrnunit_use_legacy_ = arg; + if (arg == 1) { + hoc_execerror( + "'nrnunit_use_legacy(1)' have been called but legacy units are no more supported.", + nullptr); + } } - hoc_retpushx((double) _nrnunit_use_legacy_); + hoc_retpushx(0.); // This value means modern unit } diff --git a/src/nrnoc/ldifus.cpp b/src/nrnoc/ldifus.cpp index 85af5f2ae0..1609df4c9a 100644 --- a/src/nrnoc/ldifus.cpp +++ b/src/nrnoc/ldifus.cpp @@ -13,11 +13,11 @@ #define nt_t nrn_threads->_t #define nt_dt nrn_threads->_dt -typedef struct LongDifus { - int dchange; +struct LongDifus { + int dchange{}; int* mindex; /* index into memb_list[m] */ int* pindex; /* parent in this struct */ - double** state; + std::vector> state; double* a; double* b; double* d; @@ -27,13 +27,15 @@ typedef struct LongDifus { double* vol; /* volatile volume from COMPARTMENT */ double* dc; /* volatile diffusion constant * cross sectional area from LONGITUDINAL_DIFFUSION */ -} LongDifus; + LongDifus(std::size_t n) + : state(n) {} +}; -typedef struct LongDifusThreadData { +struct LongDifusThreadData { int nthread; LongDifus** ldifus; Memb_list** ml; -} LongDifusThreadData; +}; static int ldifusfunccnt; static ldifusfunc_t* ldifusfunc; @@ -46,23 +48,6 @@ void hoc_register_ldifus1(ldifusfunc_t f) { ++ldifusfunccnt; } -#if MAC -/* this avoids a missing _ptrgl12 in the mac library that was called by -the MrC compiled object -*/ -void mac_difusfunc(ldifusfunc2_t* f, - int m, - ldifusfunc3_t diffunc, - void** v, - int ai, - int sindex, - int dindex, - NrnThread* nt) { - (*f)(m, diffunc, v, ai, sindex, dindex, nt); -} -#endif - - extern "C" void nrn_tree_solve(double* a, double* d, double* b, double* rhs, int* pindex, int n) { /* treesolver @@ -98,9 +83,8 @@ extern "C" void nrn_tree_solve(double* a, double* d, double* b, double* rhs, int } -void long_difus_solve(int method, NrnThread* nt) { - ldifusfunc2_t* f = NULL; - int i; +void long_difus_solve(neuron::model_sorted_token const& sorted_token, int method, NrnThread& nt) { + ldifusfunc2_t* f{}; if (ldifusfunc) { switch (method) { case 0: /* normal staggered time step */ @@ -117,9 +101,8 @@ void long_difus_solve(int method, NrnThread* nt) { break; } assert(f); - - for (i = 0; i < ldifusfunccnt; ++i) { - (*ldifusfunc[i])(f, nt); + for (int i = 0; i < ldifusfunccnt; ++i) { + ldifusfunc[i](f, sorted_token, nt); } } } @@ -127,12 +110,8 @@ void long_difus_solve(int method, NrnThread* nt) { static void longdifusfree(LongDifus** ppld) { if (*ppld) { LongDifus* pld = *ppld; -#if 0 -printf("free longdifus structure_change=%d %d\n", pld->schange, structure_change_cnt); -#endif free(pld->mindex); free(pld->pindex); - free(pld->state); free(pld->a); free(pld->b); free(pld->d); @@ -141,11 +120,11 @@ printf("free longdifus structure_change=%d %d\n", pld->schange, structure_change free(pld->bf); free(pld->vol); free(pld->dc); - free(pld); } - *ppld = (LongDifus*) 0; + delete std::exchange(*ppld, nullptr); } +// sindex is a non-legacy field index static void longdifus_diamchange(LongDifus* pld, int m, int sindex, Memb_list* ml, NrnThread* _nt) { int i, n, mi, mpi, j, index, pindex, vnodecount; Node *nd, *pnd; @@ -162,9 +141,11 @@ static void longdifus_diamchange(LongDifus* pld, int m, int sindex, Memb_list* m /* Also child may butte end to end with parent or attach to middle */ mi = pld->mindex[i]; if (sindex < 0) { - pld->state[i] = ml->pdata[mi][-sindex - 1].get(); + pld->state[i] = static_cast>( + ml->pdata[mi][-sindex - 1].get()); } else { - pld->state[i] = ml->_data[mi] + sindex; + // if this is an array variable then take a handle to the zeroth entry of it + pld->state[i] = ml->data_handle(mi, {sindex, 0}); } nd = ml->nodelist[mi]; pindex = pld->pindex[i]; @@ -186,20 +167,12 @@ static void longdifus_diamchange(LongDifus* pld, int m, int sindex, Memb_list* m } static void longdifusalloc(LongDifus** ppld, int m, int sindex, Memb_list* ml, NrnThread* _nt) { - LongDifus* pld; - int i, n, mi, mpi, j, index, pindex, vnodecount; - int *map, *omap; - Node *nd, *pnd; - hoc_Item* qsec; - - vnodecount = _nt->end; - *ppld = pld = (LongDifus*) emalloc(sizeof(LongDifus)); - n = ml->nodecount; - - pld->dchange = 0; + auto const n = ml->nodecount; + assert(n > 0); + auto* const pld = new LongDifus{static_cast(n)}; + *ppld = pld; pld->mindex = (int*) ecalloc(n, sizeof(int)); pld->pindex = (int*) ecalloc(n, sizeof(int)); - pld->state = (double**) ecalloc(n, sizeof(double*)); pld->a = (double*) ecalloc(n, sizeof(double)); pld->b = (double*) ecalloc(n, sizeof(double)); pld->d = (double*) ecalloc(n, sizeof(double)); @@ -210,30 +183,22 @@ static void longdifusalloc(LongDifus** ppld, int m, int sindex, Memb_list* ml, N pld->dc = (double*) ecalloc(n, sizeof(double)); /* make a map from node_index to memb_list index. -1 means no exist*/ - map = (int*) ecalloc(vnodecount, sizeof(int)); - omap = (int*) ecalloc(n, sizeof(int)); - for (i = 0; i < vnodecount; ++i) { - map[i] = -1; - } - for (i = 0; i < n; ++i) { + auto const vnodecount = _nt->end; + std::vector map(vnodecount, -1), omap(n); + for (int i = 0; i < n; ++i) { map[ml->nodelist[i]->v_node_index] = i; } -#if 0 -for (i=0; i < vnodecount; ++i) { - printf("%d index=%d\n", i, map[i]); -} -#endif /* order the indices for efficient gaussian elimination */ /* But watch out for 0 area nodes. Use the parent of parent */ /* But if parent of parent does not have diffusion mechanism check the parent section */ /* And watch out for root. Use first node of root section */ - for (i = 0, j = 0; i < vnodecount; ++i) { + for (int i = 0, j = 0; i < vnodecount; ++i) { if (map[i] > -1) { pld->mindex[j] = map[i]; omap[map[i]] = j; /* from memb list index to order */ - pnd = _nt->_v_parent[i]; - pindex = map[pnd->v_node_index]; + Node* pnd = _nt->_v_parent[i]; + auto pindex = map[pnd->v_node_index]; if (pindex == -1) { /* maybe this was zero area node */ pnd = _nt->_v_parent[pnd->v_node_index]; if (pnd) { @@ -262,16 +227,6 @@ for (i=0; i < vnodecount; ++i) { } } longdifus_diamchange(pld, m, sindex, ml, _nt); -#if 0 - for (i=0; i < n; ++i) { -printf("i=%d pin=%d mi=%d :%s node %d state[(%i)]=%g\n", i, pld->pindex[i], - pld->mindex[i], secname(ml->nodelist[pld->mindex[i]]->sec), - ml->nodelist[pld->mindex[i]]->sec_node_index_ - , sindex, pld->state[i][0]); - } -#endif - free(map); - free(omap); } /* called at end of v_setup_vectors only for thread 0 */ @@ -284,7 +239,9 @@ static void overall_setup(int m, int ai, int sindex, int dindex, - NrnThread* _nt) { + neuron::model_sorted_token const&, + NrnThread& ntr) { + auto* const _nt = &ntr; int i; LongDifusThreadData** ppldtd = (LongDifusThreadData**) v; LongDifusThreadData* ldtd = *ppldtd; @@ -328,204 +285,153 @@ static Memb_list* v2ml(void** v, int tid) { return (*ppldtd)->ml[tid]; } -static void -stagger(int m, ldifusfunc3_t diffunc, void** v, int ai, int sindex, int dindex, NrnThread* _nt) { - LongDifus* pld; - int i, n, di; - double dc, vol, dfdi, dx; - double** data; - Datum** pdata; - Datum* thread; - Memb_list* ml; - - di = dindex + ai; - - pld = v2ld(v, _nt->id); - if (!pld) +static void stagger(int m, + ldifusfunc3_t diffunc, + void** v, + int ai, // array index + int sindex, // field index of {x} variable + int dindex, // field index of D{x} variable + neuron::model_sorted_token const& sorted_token, + NrnThread& ntr) { + auto* const _nt = &ntr; + LongDifus* const pld = v2ld(v, _nt->id); + if (!pld) { return; - ml = v2ml(v, _nt->id); - - n = ml->nodecount; - data = ml->_data; - pdata = ml->pdata; - thread = ml->_thread; + } + auto* const ml = v2ml(v, _nt->id); + int const n = ml->nodecount; + Datum** const pdata = ml->pdata; + Datum* const thread = ml->_thread; longdifus_diamchange(pld, m, sindex, ml, _nt); /*flux and volume coefficients (if dc is constant this is too often)*/ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; int mi = pld->mindex[i]; - pld->dc[i] = (*diffunc)(ai, data[mi], pdata[mi], pld->vol + i, &dfdi, thread, _nt); + double dfdi; + pld->dc[i] = diffunc(ai, ml, mi, pdata[mi], pld->vol + i, &dfdi, thread, _nt, sorted_token); pld->d[i] = 0.; -#if 0 - if (dfdi) { - pld->d[i] += fabs(dfdi)/pld->vol[i]/pld->state[i][ai]; - } -#endif if (pin > -1) { /* D * area between compartments */ - dc = (pld->dc[i] + pld->dc[pin]) / 2.; - + double const dc = (pld->dc[i] + pld->dc[pin]) / 2.; pld->a[i] = -pld->af[i] * dc / pld->vol[pin]; pld->b[i] = -pld->bf[i] * dc / pld->vol[i]; } } /* setup matrix */ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; int mi = pld->mindex[i]; pld->d[i] += 1. / nt_dt; - pld->rhs[i] = pld->state[i][ai] / nt_dt; + pld->rhs[i] = *(pld->state[i].next_array_element(ai)) / nt_dt; if (pin > -1) { pld->d[i] -= pld->b[i]; pld->d[pin] -= pld->a[i]; } } -#if 0 -for (i=0; i < n; ++i) { double a,b; - if (pld->pindex[i] > -1) { - a = pld->a[i]; - b = pld->b[i]; - }else{ a=b=0.;} - printf("i=%d a=%g b=%g d=%g rhs=%g state=%g\n", - i, a, b, pld->d[i], pld->rhs[i], pld->state[i][ai]); -} -#endif /* we've set up the matrix; now solve it */ nrn_tree_solve(pld->a, pld->d, pld->b, pld->rhs, pld->pindex, n); /* update answer */ - for (i = 0; i < n; ++i) { - pld->state[i][ai] = pld->rhs[i]; + for (int i = 0; i < n; ++i) { + *(pld->state[i].next_array_element(ai)) = pld->rhs[i]; } } -static void -ode(int m, ldifusfunc3_t diffunc, void** v, int ai, int sindex, int dindex, NrnThread* _nt) { - LongDifus* pld; - int i, n, di; - double dc, vol, dfdi; - double** data; - Datum** pdata; - Datum* thread; - Memb_list* ml; - - di = dindex + ai; - - pld = v2ld(v, _nt->id); - if (!pld) +static void ode(int m, + ldifusfunc3_t diffunc, + void** v, + int ai, // array index + int sindex, // field index of {x} variable + int dindex, // field index of D{x} variable + neuron::model_sorted_token const& sorted_token, + NrnThread& ntr) { + auto* const _nt = &ntr; + LongDifus* const pld = v2ld(v, _nt->id); + if (!pld) { return; - ml = v2ml(v, _nt->id); - - n = ml->nodecount; - data = ml->_data; - pdata = ml->pdata; - thread = ml->_thread; - + } + auto* const ml = v2ml(v, _nt->id); + int const n = ml->nodecount; + Datum** const pdata = ml->pdata; + Datum* const thread = ml->_thread; longdifus_diamchange(pld, m, sindex, ml, _nt); /*flux and volume coefficients (if dc is constant this is too often)*/ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; int mi = pld->mindex[i]; - pld->dc[i] = (*diffunc)(ai, data[mi], pdata[mi], pld->vol + i, &dfdi, thread, _nt); + double dfdi; + pld->dc[i] = diffunc(ai, ml, mi, pdata[mi], pld->vol + i, &dfdi, thread, _nt, sorted_token); if (pin > -1) { /* D * area between compartments */ - dc = (pld->dc[i] + pld->dc[pin]) / 2.; - + double const dc = (pld->dc[i] + pld->dc[pin]) / 2.; pld->a[i] = pld->af[i] * dc / pld->vol[pin]; pld->b[i] = pld->bf[i] * dc / pld->vol[i]; } } /* add terms to diffeq */ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { double dif; int pin = pld->pindex[i]; int mi = pld->mindex[i]; -#if 0 - pld->d[i] = data[mi][di]; -#endif if (pin > -1) { - dif = (pld->state[pin][ai] - pld->state[i][ai]); - data[mi][di] += dif * pld->b[i]; - data[pld->mindex[pin]][di] -= dif * pld->a[i]; + dif = *(pld->state[pin].next_array_element(ai)) - + *(pld->state[i].next_array_element(ai)); + ml->data(mi, dindex, ai) += dif * pld->b[i]; + ml->data(pld->mindex[pin], dindex, ai) -= dif * pld->a[i]; } } -#if 0 - for (i=0; i < n; ++i) { - int mi = pld->mindex[i]; - printf("%d olddstate=%g new=%g\n", i, pld->d[i], data[mi][di]); - } -#endif } - -static void -matsol(int m, ldifusfunc3_t diffunc, void** v, int ai, int sindex, int dindex, NrnThread* _nt) { - LongDifus* pld; - int i, n, di; - double dc, vol, dfdi; - double** data; - Datum** pdata; - Datum* thread; - Memb_list* ml; - - di = dindex + ai; - - pld = v2ld(v, _nt->id); - if (!pld) +static void matsol(int m, + ldifusfunc3_t diffunc, + void** v, + int ai, // array index + int sindex, // field index of {x} variable + int dindex, // field index of D{x} variable + neuron::model_sorted_token const& sorted_token, + NrnThread& ntr) { + auto* const _nt = &ntr; + LongDifus* const pld = v2ld(v, _nt->id); + if (!pld) { return; - ml = v2ml(v, _nt->id); - - n = ml->nodecount; - data = ml->_data; - pdata = ml->pdata; - thread = ml->_thread; + } + auto* const ml = v2ml(v, _nt->id); + int const n = ml->nodecount; + Datum** const pdata = ml->pdata; + Datum* const thread = ml->_thread; /*flux and volume coefficients (if dc is constant this is too often)*/ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; int mi = pld->mindex[i]; - pld->dc[i] = (*diffunc)(ai, data[mi], pdata[mi], pld->vol + i, &dfdi, thread, _nt); + double dfdi; + pld->dc[i] = diffunc(ai, ml, mi, pdata[mi], pld->vol + i, &dfdi, thread, _nt, sorted_token); pld->d[i] = 0.; if (dfdi) { - pld->d[i] += fabs(dfdi) / pld->vol[i] / pld->state[i][ai]; -#if 0 -printf("i=%d state=%g vol=%g dfdc=%g\n", i, pld->state[i][ai],pld->vol[i], pld->d[i]); -#endif + pld->d[i] += fabs(dfdi) / pld->vol[i] / *(pld->state[i].next_array_element(ai)); } if (pin > -1) { /* D * area between compartments */ - dc = (pld->dc[i] + pld->dc[pin]) / 2.; - + auto const dc = (pld->dc[i] + pld->dc[pin]) / 2.; pld->a[i] = -pld->af[i] * dc / pld->vol[pin]; pld->b[i] = -pld->bf[i] * dc / pld->vol[i]; } } /* setup matrix */ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; int mi = pld->mindex[i]; pld->d[i] += 1. / nt_dt; - pld->rhs[i] = data[mi][di] / nt_dt; + pld->rhs[i] = ml->data(mi, dindex, ai) / nt_dt; if (pin > -1) { pld->d[i] -= pld->b[i]; pld->d[pin] -= pld->a[i]; } } -#if 0 -for (i=0; i < n; ++i) { double a,b; - int mi = pld->mindex[i]; - if (pld->pindex[i] > -1) { - a = pld->a[i]; - b = pld->b[i]; - }else{ a=b=0.;} - printf("i=%d a=%g b=%g d=%g rhs=%g dstate=%g\n", - i, a, b, pld->d[i], pld->rhs[i], data[mi][di]); -} -#endif /* triang */ - for (i = n - 1; i > 0; --i) { + for (int i = n - 1; i > 0; --i) { int pin = pld->pindex[i]; if (pin > -1) { double p; @@ -535,7 +441,7 @@ for (i=0; i < n; ++i) { double a,b; } } /* bksub */ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int pin = pld->pindex[i]; if (pin > -1) { pld->rhs[i] -= pld->b[i] * pld->rhs[pin]; @@ -543,8 +449,8 @@ for (i=0; i < n; ++i) { double a,b; pld->rhs[i] /= pld->d[i]; } /* update answer */ - for (i = 0; i < n; ++i) { + for (int i = 0; i < n; ++i) { int mi = pld->mindex[i]; - data[mi][di] = pld->rhs[i]; + ml->data(mi, dindex, ai) = pld->rhs[i]; } } diff --git a/src/nrnoc/membfunc.cpp b/src/nrnoc/membfunc.cpp index 166c98b64d..b6315cb3f5 100644 --- a/src/nrnoc/membfunc.cpp +++ b/src/nrnoc/membfunc.cpp @@ -1,8 +1,96 @@ #include "membfunc.h" +#include "multicore.h" +#include "section.h" + #include -void Memb_func::invoke_initialize(NrnThread* nt, Memb_list* ml, int mech_type) const { +void Memb_func::invoke_initialize(neuron::model_sorted_token const& sorted_token, + NrnThread* nt, + Memb_list* ml, + int mech_type) const { assert(has_initialize()); - m_initialize(nt, ml, mech_type); + if (ml->type() != mech_type) { + throw std::runtime_error("Memb_func::invoke_initialize(nt[" + std::to_string(nt->id) + + "], ml, " + std::to_string(mech_type) + + "): type mismatch, ml->type()=" + std::to_string(ml->type())); + } + m_initialize(sorted_token, nt, ml, mech_type); +} +long& _nrn_mechanism_access_alloc_seq(Prop* prop) { + return prop->_alloc_seq; +} +double& _nrn_mechanism_access_a(Node* node) { + return node->a(); +} +double& _nrn_mechanism_access_b(Node* node) { + return node->b(); +} +double& _nrn_mechanism_access_d(Node* node) { + return node->d(); +} +neuron::container::generic_data_handle*& _nrn_mechanism_access_dparam(Prop* prop) { + return prop->dparam; +} +Extnode*& _nrn_mechanism_access_extnode(Node* node) { + return node->extnode; +} +double& _nrn_mechanism_access_param(Prop* prop, int field, int array_index) { + return prop->param(field, array_index); +} +double& _nrn_mechanism_access_rhs(Node* node) { + return node->rhs(); +} +double& _nrn_mechanism_access_voltage(Node* node) { + return node->v(); +} +neuron::container::data_handle _nrn_mechanism_get_area_handle(Node* node) { + if (node) { + return node->area_handle(); + } else { + return {}; + } +} +Section* _nrn_mechanism_get_child(Section* sec) { + return sec->child; +} +int _nrn_mechanism_get_nnode(Section* sec) { + return sec->nnode; +} +Node* _nrn_mechanism_get_node(Section* sec, int idx) { + return sec->pnode[idx]; +} +int _nrn_mechanism_get_num_vars(Prop* prop) { + return prop->param_num_vars(); +} +neuron::container::data_handle _nrn_mechanism_get_param_handle( + Prop* prop, + neuron::container::field_index field) { + return prop->param_handle(field); +} +NrnThread* _nrn_mechanism_get_thread(Node* node) { + return node->_nt; +} +Section* _nrn_mechanism_get_sibling(Section* sec) { + return sec->sibling; +} +int _nrn_mechanism_get_type(Prop* prop) { + return prop ? prop->_type : -1; +} +int _nrn_mechanism_get_v_node_index(Node* node) { + return node->v_node_index; +} +neuron::container::non_owning_identifier_without_container _nrn_get_prop_id(Prop* p) { + return p->id(); +} + +namespace neuron::mechanism::_get { +std::size_t _current_row(Prop* prop) { + return prop ? prop->current_row() : container::invalid_row; +} +std::vector const& _pdata_ptr_cache_data( + neuron::model_sorted_token const& cache_token, + int mech_type) { + return cache_token.mech_cache(mech_type).pdata_ptr_cache; } +} // namespace neuron::mechanism::_get diff --git a/src/nrnoc/membfunc.h b/src/nrnoc/membfunc.h index 61af266fcc..5cf6ca55bf 100644 --- a/src/nrnoc/membfunc.h +++ b/src/nrnoc/membfunc.h @@ -1,19 +1,52 @@ #pragma once extern void hoc_register_prop_size(int type, int psize, int dpsize); +#include "neuron/container/data_handle.hpp" #include "nrnoc_ml.h" +#include "oc_ansi.h" // neuron::model_sorted_token +#include "options.h" // EXTRACELLULAR + +#include +#include +#include +#include typedef struct NrnThread NrnThread; +struct Extnode; +struct Section; struct Symbol; typedef Datum* (*Pfrpdat)(); typedef void (*Pvmi)(struct NrnThread*, Memb_list*, int); typedef void (*Pvmp)(Prop*); typedef int (*nrn_ode_count_t)(int); -typedef void (*nrn_ode_map_t)(int, double**, double**, double*, Datum*, double*, int); -typedef void (*nrn_ode_synonym_t)(int, double**, Datum**); -/* eventually replace following with Pvmp */ -typedef void (*nrn_bamech_t)(Node*, double*, Datum*, Datum*, struct NrnThread*); +using nrn_bamech_t = void (*)(Node*, + Datum*, + Datum*, + NrnThread*, + Memb_list*, + std::size_t, + neuron::model_sorted_token const&); +using nrn_cur_t = void (*)(neuron::model_sorted_token const&, NrnThread*, Memb_list*, int); +using nrn_init_t = nrn_cur_t; +using nrn_jacob_t = nrn_cur_t; +using nrn_ode_map_t = void (*)(Prop*, + int /* ieq */, + neuron::container::data_handle* /* pv (std::span) */, + neuron::container::data_handle* /* pvdot (std::span) */, + double* /* atol */, + int /* type */); +using nrn_ode_matsol_t = nrn_cur_t; +using nrn_ode_spec_t = nrn_cur_t; +using nrn_ode_synonym_t = void (*)(neuron::model_sorted_token const&, NrnThread&, Memb_list&, int); +using nrn_state_t = nrn_cur_t; +using nrn_thread_table_check_t = void (*)(Memb_list*, + std::size_t, + Datum*, + Datum*, + NrnThread*, + int, + neuron::model_sorted_token const&); #define NULL_CUR (Pfri) 0 #define NULL_ALLOC (Pfri) 0 @@ -22,38 +55,56 @@ typedef void (*nrn_bamech_t)(Node*, double*, Datum*, Datum*, struct NrnThread*); struct Memb_func { Pvmp alloc; - Pvmi current; - Pvmi jacob; - Pvmi state; + nrn_cur_t current; + nrn_jacob_t jacob; + nrn_state_t state; bool has_initialize() const { return m_initialize; } - void invoke_initialize(NrnThread* nt, Memb_list* ml, int type) const; - void set_initialize(Pvmi init) { + void invoke_initialize(neuron::model_sorted_token const& sorted_token, + NrnThread* nt, + Memb_list* ml, + int type) const; + void set_initialize(nrn_init_t init) { m_initialize = init; } Pvmp destructor; /* only for point processes */ Symbol* sym; nrn_ode_count_t ode_count; nrn_ode_map_t ode_map; - Pvmi ode_spec; - Pvmi ode_matsol; + nrn_ode_spec_t ode_spec; + nrn_ode_matsol_t ode_matsol; nrn_ode_synonym_t ode_synonym; Pvmi singchan_; /* managed by kschan for variable step methods */ int vectorized; int thread_size_; /* how many Datum needed in Memb_list if vectorized */ void (*thread_mem_init_)(Datum*); /* after Memb_list._thread is allocated */ void (*thread_cleanup_)(Datum*); /* before Memb_list._thread is freed */ - void (*thread_table_check_)(double*, Datum*, Datum*, NrnThread*, int); - void (*_update_ion_pointers)(Datum*); + nrn_thread_table_check_t thread_table_check_; int is_point; void* hoc_mech; void (*setdata_)(struct Prop*); int* dparam_semantics; // for nrncore writing. private: - Pvmi m_initialize{}; + nrn_init_t m_initialize{}; }; +/* Direct call Python wrappers to density mechanism functions */ +struct NPyDirectMechFunc { + const char* name; + double (*func)(Prop*); +}; +/* Above struct in translated mod files are elements of a {nullptr, nullptr} + terminated list. The translator could easily create an unordered map instead, + and that would be nicer. + However there is some question (as with std::string) that an unordered_map + is ABI compatible across codes compiled by different toolchains. + So we will build our per mechanism map in NEURON world from the null + terminated list in NMODL world. +*/ +using NPyDirectMechFuncs = std::unordered_map; +extern void hoc_register_npy_direct(int type, NPyDirectMechFunc*); +extern std::unordered_map nrn_mech2funcs_map; #define IMEMFAST -2 #define VINDEX -1 @@ -62,11 +113,55 @@ struct Memb_func { #define CAP 3 #if EXTRACELLULAR #define EXTRACELL 5 +extern int nrn_nlayer_extracellular; +namespace neuron::extracellular { +// these are the fields in the standard NEURON mechanism data structures +static constexpr auto xraxial_index = 0; // array of size nlayer +static constexpr auto xg_index = 1; // array of size nlayer +static constexpr auto xc_index = 2; // array of size nlayer +static constexpr auto e_extracellular_index = 3; // scalar +static constexpr auto i_membrane_index = 4; // scalar, might not exist +static constexpr auto sav_g_index = 5; // scalar, might not exist +static constexpr auto sav_rhs_index = 6; // scalar, might not exist + +// these are the indices into the Extnode param array +inline std::size_t xraxial_index_ext(std::size_t ilayer) { + return ilayer; +} +inline std::size_t xg_index_ext(std::size_t ilayer) { + return nrn_nlayer_extracellular + ilayer; +} +inline std::size_t xc_index_ext(std::size_t ilayer) { + return 2 * nrn_nlayer_extracellular + ilayer; +} +inline std::size_t e_extracellular_index_ext() { + return 3 * nrn_nlayer_extracellular; +} +#if I_MEMBRANE +inline std::size_t sav_rhs_index_ext() { + return 3 * nrn_nlayer_extracellular + 3; +} +#endif +inline std::size_t vext_pseudoindex() { +#if I_MEMBRANE + return 3 * nrn_nlayer_extracellular + 4; +#else + return 3 * nrn_nlayer_extracellular + 1; +#endif +} +} // namespace neuron::extracellular #endif -#define nrnocCONST 1 -#define DEP 2 -#define STATE 3 /*See init.cpp and cabvars.h for order of nrnocCONST, DEP, and STATE */ +#define nrnocCONST 1 // PARAMETER +#define DEP 2 // ASSIGNED +#define STATE 3 /* STATE: See init.cpp and cabvars.h for order of nrnocCONST, DEP, and STATE */ +#define NRNPOINTER \ + 4 /* added on to list of mechanism variables.These are \ +pointers which connect variables from other mechanisms via the _ppval array. \ +*/ + +#define _AMBIGUOUS 5 // for Ions +#define NMODLRANDOM 6 // RANDOM variable in NEURON block #define BEFORE_INITIAL 0 #define AFTER_INITIAL 1 @@ -81,18 +176,145 @@ typedef struct BAMech { } BAMech; extern BAMech** bamech_; -extern Memb_func* memb_func; +extern std::vector memb_func; extern int n_memb_func; extern int* nrn_prop_param_size_; extern int* nrn_prop_dparam_size_; +extern int nrn_dparam_semantics_to_int(const char*); +extern std::vector& nrn_mech_random_indices(int type); -extern Memb_list* memb_list; +extern std::vector memb_list; /* for finitialize, order is same up through extracellular, then ions, then mechanisms that write concentrations, then all others. */ extern short* memb_order_; -#define NRNPOINTER \ - 4 /* added on to list of mechanism variables.These are \ -pointers which connect variables from other mechanisms via the _ppval array. \ -*/ -#define _AMBIGUOUS 5 +namespace neuron::mechanism { +template +struct field { + using type = T; + field(std::string name_) + : name{std::move(name_)} {} + field(std::string name_, int array_size_) + : name{std::move(name_)} + , array_size{array_size_} {} + field(std::string name_, std::string semantics_) + : name{std::move(name_)} + , semantics{std::move(semantics_)} {} + int array_size{1}; + std::string name{}, semantics{}; +}; +namespace detail { +void register_data_fields(int mech_type, + std::vector> const& param_info, + std::vector> const& dparam_size); +} // namespace detail +/** + * @brief Type- and array-aware version of hoc_register_prop_size. + * + * hoc_register_prop_size did not propagate enough information to know which parts of the "data" + * size were ranges corresponding to a single array variable. This also aims to be ready for + * supporting multiple variable data types in MOD files. + */ +// This is declared static on purpose. When we are using binary Python wheels and a user runs +// nrnivmodl on their local machine then we link together a libnrniv.so from the binary wheel +// with object files that were produced from .mod files on the user's machine. If the files +// compiled on the user's machine have the same number/type of variables as a built in mechanism +// that is embedded in libnrniv.so then there will be two calls to this function with the same +// template arguments. If this function was not static, those would have external linkage and a +// call from one of the user's .mod files might use a definition of this function from +// libnrniv.so. This can cause problems due to ABI mismatches, and the ::detail version above +// essentially exists to ensure that *it* is the interface between libnrniv.so and code compiled +// on the user's machine. Yes, this is horrible. See #1963 and #2234 for more information. +template +static void register_data_fields(int mech_type, Fields const&... fields) { + // Use of const char* aims to avoid wheel ABI issues with std::string + std::vector> param_info{}; + std::vector> dparam_info{}; + auto const process = [&](auto const& field) { + using field_t = std::decay_t; + using data_t = typename field_t::type; + if constexpr (std::is_same_v) { + assert(field.semantics.empty()); + param_info.emplace_back(field.name.c_str(), field.array_size); + } else { + static_assert(std::is_same_v /* TODO */ || std::is_pointer_v, + "only pointers, doubles and ints are supported"); + assert(field.array_size == 1); // only scalar dparam data is supported + dparam_info.emplace_back(field.name.c_str(), field.semantics.c_str()); + } + }; + // fold expression with the comma operator is neater, but hits AppleClang expression depth + // limits for large sizeof...(Fields); the old initializer_list trick avoids that. + static_cast(std::initializer_list{(static_cast(process(fields)), 0)...}); + // beware, the next call crosses from translated mechanism code into the main NEURON library + detail::register_data_fields(mech_type, param_info, dparam_info); +} + +/** + * @brief Get the number of fields (some of which may be arrays) of the given type. + * + * If the given mechanism type is negative, -1 will be returned. + */ +template +[[nodiscard]] int get_field_count(int mech_type); + +/** + * @brief Pointer to a range of pointers to the start of contiguous storage ranges. + * + * If the given mechanism type is negative, nullptr will be returned. + */ +template +[[nodiscard]] T* const* get_data_ptrs(int mech_type); + +/** + * @brief Get the array dimensions for fields of the given type. + * + * This forms part of the API used by translated MOD file code to access the + * mechanism data managed by NEURON. It serves to help hide the implementation + * of the mechanism data storage from translated MOD file code and reduce ABI + * compatibility issues arising from Python wheel support. + * + * If the given mechanism type is negative, nullptr will be returned. + */ +template +[[nodiscard]] int const* get_array_dims(int mech_type); +namespace _get { +[[nodiscard]] std::size_t _current_row(Prop*); +[[nodiscard]] std::vector const& _pdata_ptr_cache_data( + neuron::model_sorted_token const& cache_token, + int mech_type); +} // namespace _get +} // namespace neuron::mechanism + +// See https://github.com/neuronsimulator/nrn/issues/2234 for context of how this might be done +// better in future... +[[nodiscard]] long& _nrn_mechanism_access_alloc_seq(Prop*); +[[nodiscard]] double& _nrn_mechanism_access_a(Node*); +[[nodiscard]] double& _nrn_mechanism_access_b(Node*); +[[nodiscard]] double& _nrn_mechanism_access_d(Node*); +[[nodiscard]] neuron::container::generic_data_handle*& _nrn_mechanism_access_dparam(Prop*); +[[nodiscard]] Extnode*& _nrn_mechanism_access_extnode(Node*); +[[nodiscard]] double& _nrn_mechanism_access_param(Prop*, int field, int array_index = 0); +[[nodiscard]] double& _nrn_mechanism_access_rhs(Node*); +[[nodiscard]] double& _nrn_mechanism_access_voltage(Node*); +[[nodiscard]] neuron::container::data_handle _nrn_mechanism_get_area_handle(Node*); +[[nodiscard]] Section* _nrn_mechanism_get_child(Section*); +[[nodiscard]] int _nrn_mechanism_get_nnode(Section*); +[[nodiscard]] Node* _nrn_mechanism_get_node(Section*, int); +[[nodiscard]] int _nrn_mechanism_get_num_vars(Prop*); +[[nodiscard]] neuron::container::data_handle _nrn_mechanism_get_param_handle( + Prop* prop, + neuron::container::field_index field); +[[nodiscard]] inline neuron::container::data_handle +_nrn_mechanism_get_param_handle(Prop* prop, int field, int array_index = 0) { + return _nrn_mechanism_get_param_handle(prop, + neuron::container::field_index{field, array_index}); +} +[[nodiscard]] Section* _nrn_mechanism_get_sibling(Section*); +[[nodiscard]] NrnThread* _nrn_mechanism_get_thread(Node*); +[[nodiscard]] int _nrn_mechanism_get_type(Prop*); +[[nodiscard]] int _nrn_mechanism_get_v_node_index(Node*); + +// Rarely (e.g. NEURON {RANDOM123 ranvar}) instances of a mod file +// need to deallocate owning objects at end of their life. +extern std::unordered_map nrn_mech_inst_destruct; diff --git a/src/nrnoc/memblist.cpp b/src/nrnoc/memblist.cpp new file mode 100644 index 0000000000..6e83477441 --- /dev/null +++ b/src/nrnoc/memblist.cpp @@ -0,0 +1,105 @@ +#include "neuron/container/generic_data_handle.hpp" +#include "neuron/container/mechanism_data.hpp" +#include "neuron/model_data.hpp" +#include "nrnoc_ml.h" + +#include +#include // std::distance, std::next +#include // std::accumulate + +Memb_list::Memb_list(int type) + : m_storage{&neuron::model().mechanism_data(type)} { + assert(type == m_storage->type()); +} + +[[nodiscard]] std::vector Memb_list::data() { + using Tag = neuron::container::Mechanism::field::FloatingPoint; + assert(m_storage); + assert(m_storage_offset != neuron::container::invalid_row); + auto const num_fields = m_storage->get_tag().num_variables(); + std::vector ret(num_fields, nullptr); + for (auto i = 0; i < num_fields; ++i) { + ret[i] = &m_storage->get_field_instance(m_storage_offset, i); + } + return ret; +} + +neuron::container::data_handle Memb_list::data_handle( + std::size_t instance, + neuron::container::field_index field) const { + assert(m_storage); + assert(m_storage_offset != neuron::container::invalid_row); + auto const offset = m_storage_offset + instance; + using Tag = neuron::container::Mechanism::field::FloatingPoint; + return m_storage->get_field_instance_handle(m_storage->get_identifier(offset), + field.field, + field.array_index); +} + +[[nodiscard]] double& Memb_list::data(std::size_t instance, int variable, int array_index) { + assert(m_storage); + assert(m_storage_offset != neuron::container::invalid_row); + return m_storage->get_field_instance( + m_storage_offset + instance, variable, array_index); +} + +[[nodiscard]] double const& Memb_list::data(std::size_t instance, + int variable, + int array_index) const { + assert(m_storage); + assert(m_storage_offset != neuron::container::invalid_row); + return m_storage->get_field_instance( + m_storage_offset + instance, variable, array_index); +} + + +[[nodiscard]] std::ptrdiff_t Memb_list::legacy_index(double const* ptr) const { + assert(m_storage_offset != neuron::container::invalid_row); + // For a mechanism with (in order) range variables: a, b[2], c the mechanism data are + // ______________________ + // instance 0 | a b[0] b[1] c | + // instance 1 | a' b'[0] b'[1] c' | + // instance 2 | a'' b''[0] b''[1] c'' | + // + // the old layout arranged this as: + // [a b[0], b[1], c, a', b'[0], b'[1], c', a'', b''[0], b''[1], c''] + // whereas the new layout has three different storage vectors: + // [a, a', a''] + // [b[0], b[1], b'[0], b'[1], b''[0], b''[1]] + // [c, c', c''] + // this method, given a pointer into one of the new layout vectors, + // calculates the (hypothetical) index into the old (single) vector + using Tag = neuron::container::Mechanism::field::FloatingPoint; + auto const size = m_storage->size(); // number of instances; 3 in the example above + auto const num_fields = m_storage->get_tag().num_variables(); // ex: 3 (a, b, c) + auto const* const array_dims = m_storage->get_array_dims(); // ex: [1, 2, 1] + auto const sum_of_array_dims = std::accumulate(array_dims, array_dims + num_fields, 0); + int sum_of_array_dims_of_previous_fields{}; + for (auto field = 0; field < num_fields; ++field) { // a, b or c in the example above + auto const array_dim = array_dims[field]; + assert(array_dim > 0); + auto const* const vec_data = &m_storage->get_field_instance(0, field); + auto const index = std::distance(vec_data, ptr); + // storage vectors are size * array_dim long + if (index >= 0 && index < size * array_dim) { + auto const instance_offset = index / array_dim; + auto const array_index = index % array_dim; + assert(ptr == &m_storage->get_field_instance(instance_offset, field, array_index)); + return ((instance_offset - m_storage_offset) * sum_of_array_dims) + + sum_of_array_dims_of_previous_fields + array_index; + } + sum_of_array_dims_of_previous_fields += array_dim; + } + assert(sum_of_array_dims_of_previous_fields == sum_of_array_dims); + // ptr doesn't live in this mechanism data, cannot compute a legacy index + return -1; +} + +[[nodiscard]] double* Memb_list::dptr_field(std::size_t instance, int variable) { + return pdata[instance][variable].get(); +} + +[[nodiscard]] int Memb_list::type() const { + assert(m_storage); + return m_storage->type(); +} diff --git a/src/nrnoc/multicore.cpp b/src/nrnoc/multicore.cpp index 4253d8bd3d..ac2dbdd70e 100644 --- a/src/nrnoc/multicore.cpp +++ b/src/nrnoc/multicore.cpp @@ -43,6 +43,7 @@ the handling of v_structure_change as long as possible. #include #include #include +#include #include #include @@ -59,7 +60,6 @@ void (*nrn_mk_transfer_thread_data_)(); static int busywait_; static int busywait_main_; extern void nrn_thread_error(const char*); -extern void nrn_old_thread_save(); extern double nrn_timeus(); extern void (*nrn_multisplit_setup_)(); extern int v_structure_change; @@ -87,7 +87,6 @@ bool interpreter_locked{false}; std::unique_ptr interpreter_lock; enum struct worker_flag { execute_job, exit, wait }; -using worker_job_t = void* (*) (NrnThread*); // With C++17 and alignment-aware allocators we could do something like // alignas(std::hardware_destructive_interference_size) here and then use a @@ -95,9 +94,34 @@ using worker_job_t = void* (*) (NrnThread*); // that std::hardware_destructive_interference_size is not very well supported. struct worker_conf_t { /* for nrn_solve etc.*/ - worker_job_t job{}; + std::variant> + job{}; std::size_t thread_id{}; worker_flag flag{worker_flag::wait}; + friend bool operator==(worker_conf_t const& lhs, worker_conf_t const& rhs) { + return lhs.flag == rhs.flag && lhs.thread_id == rhs.thread_id && lhs.job == rhs.job; + } +}; + +struct worker_kernel { + worker_kernel(std::size_t thread_id) + : m_thread_id{thread_id} {} + void operator()(std::monostate const&) const { + throw std::runtime_error("worker_kernel"); + } + void operator()(worker_job_t job) const { + job(nrn_threads + m_thread_id); + } + void operator()( + std::pair const& pair) const { + auto const& [job, token_ptr] = pair; + job(*token_ptr, nrn_threads[m_thread_id]); + } + + private: + std::size_t m_thread_id{}; }; void worker_main(worker_conf_t* my_wc_ptr, @@ -120,13 +144,12 @@ void worker_main(worker_conf_t* my_wc_ptr, return; } assert(wc.flag == worker_flag::execute_job); - (*wc.job)(nrn_threads + wc.thread_id); + std::visit(worker_kernel{wc.thread_id}, wc.job); wc.flag = worker_flag::wait; - wc.job = nullptr; + wc.job = std::monostate{}; cond.notify_one(); } else { - worker_job_t job{}; - NrnThread* job_arg{}; + worker_conf_t conf{}; { // Wait until we have a job to execute or we have been told to // shut down. @@ -140,11 +163,10 @@ void worker_main(worker_conf_t* my_wc_ptr, assert(wc.flag == worker_flag::execute_job); // Save the workload + argument to local variables before // releasing the mutex. - job = wc.job; - job_arg = nrn_threads + wc.thread_id; + conf = wc; } // Execute the workload without keeping the mutex - (*job)(job_arg); + std::visit(worker_kernel{conf.thread_id}, conf.job); // Signal that the work is completed and this thread is becoming // idle { @@ -152,9 +174,9 @@ void worker_main(worker_conf_t* my_wc_ptr, // Make sure we don't accidentally overwrite an exit signal from // the coordinating thread. if (wc.flag == worker_flag::execute_job) { - assert(wc.job == job); + assert(wc == conf); wc.flag = worker_flag::wait; - wc.job = nullptr; + wc.job = std::monostate{}; } } // Notify the coordinating thread. @@ -175,8 +197,7 @@ struct worker_threads_t { // worker_threads[0] does not appear to be used m_worker_threads.emplace_back(); for (std::size_t i = 1; i < nrn_nthread; ++i) { - m_wc[i].flag = worker_flag::wait; - m_wc[i].job = nullptr; + new (m_wc + i) worker_conf_t{}; m_wc[i].thread_id = i; m_worker_threads.emplace_back(worker_main, &(m_wc[i]), &(m_cond[i]), &(m_mut[i])); } @@ -208,7 +229,7 @@ struct worker_threads_t { free(std::exchange(m_wc, nullptr)); } - void assign_job(std::size_t worker, void* (*job)(NrnThread*) ) { + void assign_job(std::size_t worker, worker_job_t job) { assert(worker > 0); auto& cond = m_cond[worker]; auto& wc = m_wc[worker]; @@ -216,7 +237,7 @@ struct worker_threads_t { std::unique_lock lock{m_mut[worker]}; // Wait until the worker is idle. cond.wait(lock, [&wc] { return wc.flag == worker_flag::wait; }); - assert(!wc.job); + assert(std::holds_alternative(wc.job)); assert(wc.thread_id == worker); wc.job = job; wc.flag = worker_flag::execute_job; @@ -225,6 +246,25 @@ struct worker_threads_t { cond.notify_one(); } + void assign_job(std::size_t worker, + neuron::model_sorted_token const& cache_token, + worker_job_with_token_t job) { + assert(worker > 0); + auto& cond = m_cond[worker]; + auto& wc = m_wc[worker]; + { + std::unique_lock lock{m_mut[worker]}; + // Wait until the worker is idle. + cond.wait(lock, [&wc] { return wc.flag == worker_flag::wait; }); + assert(std::holds_alternative(wc.job)); + assert(wc.thread_id == worker); + wc.job = std::make_pair(job, &cache_token); + wc.flag = worker_flag::execute_job; + } + // Notify the worker that it has new work to do. + cond.notify_one(); + } + // Wait until all worker threads are waiting void wait() const { for (std::size_t i = 1; i < nrn_nthread; ++i) { @@ -265,6 +305,14 @@ void nrn_threads_create(int n, bool parallel) { NrnThread* nt; if (nrn_nthread != n) { worker_threads.reset(); + // If the number of threads changes then the node storage data is + // implicitly no longer sorted, as "sorted" includes being partitioned + // by NrnThread. Similarly for the mechanism data then "sorted" includes + // being partitioned by thread. + // TODO: consider if we can be smarter about how/when we call + // mark_as_unsorted() for different containers. + neuron::model().node_data().mark_as_unsorted(); + neuron::model().apply_to_mechanisms([](auto& mech_data) { mech_data.mark_as_unsorted(); }); nrn_threads_free(); for (i = 0; i < nrn_nthread; ++i) { nt = nrn_threads + i; @@ -291,12 +339,7 @@ void nrn_threads_create(int n, bool parallel) { for (j = 0; j < BEFORE_AFTER_SIZE; ++j) { nt->tbl[j] = (NrnThreadBAList*) 0; } - nt->_actual_rhs = 0; - nt->_actual_d = 0; - nt->_actual_a = 0; - nt->_actual_b = 0; - nt->_actual_v = 0; - nt->_actual_area = 0; + nt->_sp13_rhs = nullptr; nt->_v_parent_index = 0; nt->_v_node = 0; nt->_v_parent = 0; @@ -306,7 +349,7 @@ void nrn_threads_create(int n, bool parallel) { nt->_sp13mat = 0; nt->_ctime = 0.0; nt->_vcv = 0; - nt->_nrn_fast_imem = 0; + nt->_node_data_offset = 0; } } v_structure_change = 1; @@ -331,83 +374,13 @@ void nrn_threads_create(int n, bool parallel) { #endif } -/* -Avoid invalidating pointers to i_membrane_ unless the number of compartments -in a thread has changed. -*/ -static int fast_imem_nthread_ = 0; -static int* fast_imem_size_ = NULL; -static _nrn_Fast_Imem* fast_imem_; -static std::vector imem_defer_free_; - -void nrn_imem_defer_free(double* pd) { - if (pd) { - imem_defer_free_.push_back(pd); - } else { - for (const auto& pd: imem_defer_free_) { - free(pd); - } - imem_defer_free_.clear(); - } -} - -static void fast_imem_free() { - int i; - for (i = 0; i < nrn_nthread; ++i) { - nrn_threads[i]._nrn_fast_imem = NULL; - } - for (i = 0; i < fast_imem_nthread_; ++i) { - if (fast_imem_size_[i] > 0) { - nrn_imem_defer_free(fast_imem_[i]._nrn_sav_rhs); - free(fast_imem_[i]._nrn_sav_d); - } - } - if (fast_imem_nthread_) { - free(fast_imem_size_); - free(fast_imem_); - fast_imem_nthread_ = 0; - fast_imem_size_ = NULL; - fast_imem_ = NULL; - } -} - -static void fast_imem_alloc() { - int i; - if (fast_imem_nthread_ != nrn_nthread) { - fast_imem_free(); - fast_imem_nthread_ = nrn_nthread; - fast_imem_size_ = static_cast(ecalloc(nrn_nthread, sizeof(int))); - fast_imem_ = (_nrn_Fast_Imem*) ecalloc(nrn_nthread, sizeof(_nrn_Fast_Imem)); - } - for (i = 0; i < nrn_nthread; ++i) { - NrnThread* nt = nrn_threads + i; - int n = nt->end; - _nrn_Fast_Imem* fi = fast_imem_ + i; - if (n != fast_imem_size_[i]) { - if (fast_imem_size_[i] > 0) { - nrn_imem_defer_free(fi->_nrn_sav_rhs); - free(fi->_nrn_sav_d); - } - if (n > 0) { - CACHELINE_CALLOC(fi->_nrn_sav_rhs, double, n); - CACHELINE_CALLOC(fi->_nrn_sav_d, double, n); - } - fast_imem_size_[i] = n; - } - } -} - void nrn_fast_imem_alloc() { - if (nrn_use_fast_imem) { - int i; - fast_imem_alloc(); - for (i = 0; i < nrn_nthread; ++i) { - nrn_threads[i]._nrn_fast_imem = fast_imem_ + i; - } - } else { - fast_imem_free(); - nrn_imem_defer_free(nullptr); - } + // Make sure that storage for the fast_imem calculation exists/is destroyed according to + // nrn_use_fast_imem + neuron::model() + .node_data() + .set_field_status(nrn_use_fast_imem); } void nrn_threads_free() { @@ -420,19 +393,17 @@ void nrn_threads_free() { tml2 = tml->next; free((char*) ml->nodelist); free((char*) ml->nodeindices); - if (memb_func[tml->index].hoc_mech) { - free((char*) ml->prop); - } else { - free((char*) ml->_data); + delete[] ml->prop; + if (!memb_func[tml->index].hoc_mech) { free((char*) ml->pdata); } if (ml->_thread) { if (memb_func[tml->index].thread_cleanup_) { (*memb_func[tml->index].thread_cleanup_)(ml->_thread); } - free((char*) ml->_thread); + delete[] ml->_thread; } - free((char*) ml); + delete ml; free((char*) tml); } if (nt->_ml_list) { @@ -452,22 +423,6 @@ void nrn_threads_free() { hoc_l_freelist(&nt->roots); nt->ncell = 0; } - if (nt->_actual_rhs) { - free((char*) nt->_actual_rhs); - nt->_actual_rhs = 0; - } - if (nt->_actual_d) { - free((char*) nt->_actual_d); - nt->_actual_d = 0; - } - if (nt->_actual_a) { - free((char*) nt->_actual_a); - nt->_actual_a = 0; - } - if (nt->_actual_b) { - free((char*) nt->_actual_b); - nt->_actual_b = 0; - } if (nt->_v_parent_index) { free((char*) nt->_v_parent_index); nt->_v_parent_index = 0; @@ -490,11 +445,6 @@ void nrn_threads_free() { spDestroy(nt->_sp13mat); nt->_sp13mat = 0; } - nt->_nrn_fast_imem = NULL; - /* following freed by nrn_recalc_node_ptrs */ - nrn_old_thread_save(); - nt->_actual_v = 0; - nt->_actual_area = 0; nt->end = 0; nt->ncell = 0; nt->_vcv = 0; @@ -543,22 +493,20 @@ printf("thread_memblist_setup %lx v_node_count=%d ncell=%d end=%d\n", (long)nth, tml->next = (NrnThreadMembList*) 0; *ptml = tml; ptml = &tml->next; - CACHELINE_ALLOC(tml->ml, Memb_list, 1); + tml->ml = new Memb_list{i}; if (i == EXTRACELL) { _nt->_ecell_memb_list = tml->ml; } mlmap[i] = tml->ml; CACHELINE_ALLOC(tml->ml->nodelist, Node*, mlcnt[i]); CACHELINE_ALLOC(tml->ml->nodeindices, int, mlcnt[i]); - if (memb_func[i].hoc_mech) { - tml->ml->prop = (Prop**) emalloc(mlcnt[i] * sizeof(Prop*)); - } else { - CACHELINE_ALLOC(tml->ml->_data, double*, mlcnt[i]); + tml->ml->prop = new Prop*[mlcnt[i]]; // used for ode_map + if (!memb_func[i].hoc_mech) { CACHELINE_ALLOC(tml->ml->pdata, Datum*, mlcnt[i]); } tml->ml->_thread = (Datum*) 0; if (memb_func[i].thread_size_) { - tml->ml->_thread = (Datum*) ecalloc(memb_func[i].thread_size_, sizeof(Datum)); + tml->ml->_thread = new Datum[memb_func[i].thread_size_]{}; if (memb_func[tml->index].thread_mem_init_) { (*memb_func[tml->index].thread_mem_init_)(tml->ml->_thread); } @@ -580,10 +528,9 @@ printf("thread_memblist_setup %lx v_node_count=%d ncell=%d end=%d\n", (long)nth, Memb_list* ml = mlmap[p->_type]; ml->nodelist[ml->nodecount] = nd; ml->nodeindices[ml->nodecount] = nd->v_node_index; - if (memb_func[p->_type].hoc_mech) { - ml->prop[ml->nodecount] = p; - } else { - ml->_data[ml->nodecount] = p->param; + ml->prop[ml->nodecount] = p; + if (!memb_func[p->_type].hoc_mech) { + // ml->_data[ml->nodecount] = p->param; ml->pdata[ml->nodecount] = p->dparam; } ++ml->nodecount; @@ -678,6 +625,14 @@ void nrn_thread_memblist_setup() { for (it = 0; it < nrn_nthread; ++it) { thread_memblist_setup(nrn_threads + it, mlcnt, vmap); } + // Right now the sorting method updates the storage offsets inside the + // Memb_list* structures owned by NrnThreads. This is a bit of a design + // failure, as those offsets should have a lifetime linked to the sorted + // status of the underlying storage, i.e. they should be part of a cache + // structure. In any case, because we have just created new Memb_list then + // their offsets are empty, so we need to trigger a re-sort before they are + // used. + neuron::model().apply_to_mechanisms([](auto& mech_data) { mech_data.mark_as_unsorted(); }); nrn_fast_imem_alloc(); free((char*) vmap); free((char*) mlcnt); @@ -724,7 +679,7 @@ void reorder_secorder() { for (isec = order - _nt->ncell; isec < order; ++isec) { sec = secorder[isec]; /* to make it easy to fill in PreSyn.nt_*/ - sec->prop->dparam[9] = _nt; + sec->prop->dparam[9] = {neuron::container::do_not_search, _nt}; for (j = 0; j < sec->nnode; ++j) { nd = sec->pnode[j]; nd->_nt = _nt; @@ -738,10 +693,6 @@ void reorder_secorder() { } } _nt->end = inode; - CACHELINE_CALLOC(_nt->_actual_rhs, double, inode); - CACHELINE_CALLOC(_nt->_actual_d, double, inode); - CACHELINE_CALLOC(_nt->_actual_a, double, inode); - CACHELINE_CALLOC(_nt->_actual_b, double, inode); CACHELINE_CALLOC(_nt->_v_node, Node*, inode); CACHELINE_CALLOC(_nt->_v_parent, Node*, inode); CACHELINE_CALLOC(_nt->_v_parent_index, int, inode); @@ -775,7 +726,7 @@ void reorder_secorder() { for (isec = order - _nt->ncell; isec < order; ++isec) { sec = secorder[isec]; /* to make it easy to fill in PreSyn.nt_*/ - sec->prop->dparam[9] = _nt; + sec->prop->dparam[9] = {neuron::container::do_not_search, _nt}; for (j = 0; j < sec->nnode; ++j) { nd = sec->pnode[j]; nd->_nt = _nt; @@ -817,14 +768,6 @@ void reorder_secorder() { /* classical order abandoned */ (*nrn_multisplit_setup_)(); } - /* make the Nodes point to the proper d, rhs */ - FOR_THREADS(_nt) { - for (j = 0; j < _nt->end; ++j) { - Node* nd = _nt->_v_node[j]; - nd->_d = _nt->_actual_d + j; - nd->_rhs = _nt->_actual_rhs + j; - } - } /* because the d,rhs changed, if multisplit is used we need to update the reduced tree gather/scatter pointers */ @@ -860,11 +803,11 @@ void nrn_mk_table_check() { } } -void nrn_thread_table_check() { +void nrn_thread_table_check(neuron::model_sorted_token const& sorted_token) { for (auto [id, tml]: table_check_) { Memb_list* ml = tml->ml; - (*memb_func[tml->index].thread_table_check_)( - ml->_data[0], ml->pdata[0], ml->_thread, nrn_threads + id, tml->index); + memb_func[tml->index].thread_table_check_( + ml, 0, ml->pdata[0], ml->_thread, nrn_threads + id, tml->index, sorted_token); } } @@ -887,7 +830,7 @@ void nrn_hoc_unlock() { #endif } -void nrn_multithread_job(void* (*job)(NrnThread*) ) { +void nrn_multithread_job(worker_job_t job) { #if NRN_ENABLE_THREADS if (worker_threads) { nrn_inthread_ = 1; @@ -906,6 +849,25 @@ void nrn_multithread_job(void* (*job)(NrnThread*) ) { (*job)(nrn_threads); } +void nrn_multithread_job(neuron::model_sorted_token const& cache_token, + worker_job_with_token_t job) { +#if NRN_ENABLE_THREADS + if (worker_threads) { + nrn_inthread_ = 1; + for (std::size_t i = 1; i < nrn_nthread; ++i) { + worker_threads->assign_job(i, cache_token, job); + } + job(cache_token, nrn_threads[0]); + worker_threads->wait(); + nrn_inthread_ = 0; + return; + } +#endif + for (std::size_t i = 1; i < nrn_nthread; ++i) { + job(cache_token, nrn_threads[i]); + } + job(cache_token, nrn_threads[0]); +} void nrn_onethread_job(int i, void* (*job)(NrnThread*) ) { assert(i >= 0 && i < nrn_nthread); @@ -953,6 +915,25 @@ void nrn_thread_partition(int it, Object* sl) { v_structure_change = 1; } +Object** nrn_get_thread_partition(int it) { + assert(it >= 0 && it < nrn_nthread); + NrnThread* nt = nrn_threads + it; + if (!nt->roots) { + v_setup_vectors(); + } + // nt->roots is a hoc_List of Section*. Create a new SectionList and copy + // those Section* into it and ref them. + hoc_List* sl = hoc_l_newlist(); + Object** po = hoc_temp_objvar(hoc_lookup("SectionList"), sl); + hoc_Item* qsec; + ITERATE(qsec, nt->roots) { + Section* sec = hocSEC(qsec); + section_ref(sec); + hoc_l_lappendsec(sl, sec); + } + return po; +} + int nrn_user_partition() { int i, it, b, n; hoc_Item* qsec; @@ -1065,3 +1046,54 @@ int nrn_how_many_processors() { std::size_t nof_worker_threads() { return worker_threads.get() ? worker_threads->num_workers() : 0; } + +// Need to be able to use these methods while the model is frozen, so avoid calling the +// zero-parameter get(). +double* NrnThread::node_a_storage() { + return &neuron::model().node_data().get( + _node_data_offset); +} + +double* NrnThread::node_area_storage() { + return &neuron::model().node_data().get( + _node_data_offset); +} + +double* NrnThread::node_b_storage() { + return &neuron::model().node_data().get( + _node_data_offset); +} + +double* NrnThread::node_d_storage() { + return &neuron::model().node_data().get( + _node_data_offset); +} + +double* NrnThread::node_rhs_storage() { + return &neuron::model().node_data().get(_node_data_offset); +} + +double* NrnThread::node_sav_d_storage() { + auto& node_data = neuron::model().node_data(); + using Tag = neuron::container::Node::field::FastIMemSavD; + if (node_data.field_active()) { + return &node_data.get(_node_data_offset); + } else { + return nullptr; + } +} + +double* NrnThread::node_sav_rhs_storage() { + auto& node_data = neuron::model().node_data(); + using Tag = neuron::container::Node::field::FastIMemSavRHS; + if (node_data.field_active()) { + return &node_data.get(_node_data_offset); + } else { + return nullptr; + } +} + +double* NrnThread::node_voltage_storage() { + return &neuron::model().node_data().get( + _node_data_offset); +} diff --git a/src/nrnoc/multicore.h b/src/nrnoc/multicore.h index 71af7163a0..398e9f43c9 100644 --- a/src/nrnoc/multicore.h +++ b/src/nrnoc/multicore.h @@ -42,11 +42,9 @@ typedef struct NrnThreadBAList { struct NrnThreadBAList* next; } NrnThreadBAList; -typedef struct _nrn_Fast_Imem { - double* _nrn_sav_rhs; - double* _nrn_sav_d; -} _nrn_Fast_Imem; - +struct hoc_Item; +using hoc_List = hoc_Item; +struct Object; /** * \class NrnThread @@ -69,20 +67,34 @@ struct NrnThread { int _stop_stepping; /* delivered an all thread HocEvent */ int _ecell_child_cnt; /* see _ecell_children below */ - double* _actual_rhs; - double* _actual_d; - double* _actual_a; - double* _actual_b; - double* _actual_v; - double* _actual_area; + /** @brief Offset in the global node data where this NrnThread's values start. + */ + std::size_t _node_data_offset{}; + + [[nodiscard]] double* node_a_storage(); + [[nodiscard]] double* node_area_storage(); + [[nodiscard]] double* node_b_storage(); + [[nodiscard]] double* node_d_storage(); + [[nodiscard]] double* node_rhs_storage(); + [[nodiscard]] double* node_sav_d_storage(); + [[nodiscard]] double* node_sav_rhs_storage(); + [[nodiscard]] double* node_voltage_storage(); + [[nodiscard]] double& actual_d(std::size_t row) { + return node_d_storage()[row]; + } + [[nodiscard]] double& actual_rhs(std::size_t row) { + return node_rhs_storage()[row]; + } + int* _v_parent_index; Node** _v_node; Node** _v_parent; + double* _sp13_rhs; /* rhs matrix for sparse13 solver. updates to and from this vector + need to be transfered to actual_rhs */ char* _sp13mat; /* handle to general sparse matrix */ - Memb_list* _ecell_memb_list; /* normally nil */ + Memb_list* _ecell_memb_list; /* normally nullptr */ Node** _ecell_children; /* nodes with no extcell but parent has it */ - _nrn_Fast_Imem* _nrn_fast_imem; - void* _vcv; /* replaces old cvode_instance and nrn_cvode_ */ + void* _vcv; /* replaces old cvode_instance and nrn_cvode_ */ #if 1 double _ctime; /* computation time in seconds (using nrnmpi_wtime) */ @@ -98,15 +110,17 @@ extern int nrn_nthread; extern NrnThread* nrn_threads; void nrn_threads_create(int n, bool parallel); extern void nrn_thread_error(const char*); -extern void nrn_multithread_job(void* (*) (NrnThread*) ); +using worker_job_t = void* (*) (NrnThread*); +using worker_job_with_token_t = void (*)(neuron::model_sorted_token const&, NrnThread&); +void nrn_multithread_job(worker_job_t); +void nrn_multithread_job(neuron::model_sorted_token const&, worker_job_with_token_t); extern void nrn_onethread_job(int, void* (*) (NrnThread*) ); extern void nrn_wait_for_threads(); -extern void nrn_thread_table_check(); +void nrn_thread_table_check(neuron::model_sorted_token const&); extern void nrn_threads_free(); extern int nrn_user_partition(); extern void reorder_secorder(); extern void nrn_thread_memblist_setup(); -extern void nrn_imem_defer_free(double*); extern std::size_t nof_worker_threads(); #define FOR_THREADS(nt) for (nt = nrn_threads; nt < nrn_threads + nrn_nthread; ++nt) diff --git a/src/nrnoc/multisplit.h b/src/nrnoc/multisplit.h index 8bfc68eaa3..a3c491dc98 100644 --- a/src/nrnoc/multisplit.h +++ b/src/nrnoc/multisplit.h @@ -1,8 +1,4 @@ -#ifndef multisplit_h -#define multisplit_h - -#include - +#pragma once #if 0 /* comment */ in the classical order, knowing a node means you know the classical parent with @@ -27,17 +23,5 @@ two parents. A center node may be reversed or not with respect to its classical parent. One of the present parents may or may not be its classical parent. #endif /* end comment */ - - -#if 1 || PARANEURON -extern double* nrn_classicalNodeA(Node* n); -extern double* nrn_classicalNodeB(Node* n); -#define ClassicalNODEA(n) (*nrn_classicalNodeA(n)) -#define ClassicalNODEB(n) (*nrn_classicalNodeB(n)) -#else -#define ClassicalNODEA(n) NODEA(n) -#define ClassicalNODEB(n) NODEB(n) -#endif - - -#endif /* multisplit_h */ +double* nrn_classicalNodeA(Node* n); +double* nrn_classicalNodeB(Node* n); diff --git a/src/nrnoc/netstim.mod b/src/nrnoc/netstim.mod index 5db68ca1f1..86bb7562fe 100755 --- a/src/nrnoc/netstim.mod +++ b/src/nrnoc/netstim.mod @@ -1,26 +1,12 @@ : $Id: netstim.mod 2212 2008-09-08 14:32:26Z hines $ : comments at end -: the Random idiom has been extended to support CoreNEURON. - -: For backward compatibility, noiseFromRandom(hocRandom) can still be used -: as well as the default low-quality scop_exprand generator. -: However, CoreNEURON will not accept usage of the low-quality generator, -: and, if noiseFromRandom is used to specify the random stream, that stream -: must be using the Random123 generator. - -: The recommended idiom for specfication of the random stream is to use -: noiseFromRandom123(id1, id2[, id3]) - -: If any instance uses noiseFromRandom123, then no instance can use noiseFromRandom -: and vice versa. - NEURON { ARTIFICIAL_CELL NetStim + THREADSAFE RANGE interval, number, start RANGE noise - THREADSAFE : only true if every instance has its own distinct Random - BBCOREPOINTER donotuse + RANDOM ranvar } PARAMETER { @@ -34,41 +20,10 @@ ASSIGNED { event (ms) on ispike - donotuse -} - -VERBATIM -#if !NRNBBCORE -/** If we're running in NEURON, specify the noise style for all instances. - * 1 means noiseFromRandom was called when _ran_compat was previously 0. - * 2 means noiseFromRandom123 was called when _ran_compat was previously 0. - */ -static int _ran_compat; -#endif -ENDVERBATIM - -:backward compatibility -PROCEDURE seed(x) { -VERBATIM -#if !NRNBBCORE -ENDVERBATIM - set_seed(x) -VERBATIM -#endif -ENDVERBATIM } INITIAL { - VERBATIM -#if NRNBBCORE - if(_p_donotuse) { -#else - if(_p_donotuse && _ran_compat == 2) { -#endif - /* only this style initializes the stream on finitialize */ - nrnran123_setseq(reinterpret_cast(_p_donotuse), 0, 0); - } - ENDVERBATIM + seed(0) on = 0 : off ispike = 0 if (noise < 0) { @@ -110,180 +65,9 @@ FUNCTION invl(mean (ms)) (ms) { } FUNCTION erand() { -VERBATIM - if (_p_donotuse) { - /* - :Supports separate independent but reproducible streams for - : each instance. However, the corresponding hoc Random - : distribution MUST be set to Random.negexp(1) - */ -#if !NRNBBCORE - if (_ran_compat == 2) { - _lerand = nrnran123_negexp(reinterpret_cast(_p_donotuse)); - } else { - _lerand = nrn_random_pick(reinterpret_cast(_p_donotuse)); - } -#else - _lerand = nrnran123_negexp(reinterpret_cast(_p_donotuse)); -#endif - return _lerand; - } else { -#if NRNBBCORE - assert(0); -#else - /* - : the old standby. Cannot use if reproducible parallel sim - : independent of nhost or which host this instance is on - : is desired, since each instance on this cpu draws from - : the same stream - */ -#endif - } -#if !NRNBBCORE -ENDVERBATIM - erand = exprand(1) -VERBATIM -#endif -ENDVERBATIM -} - -PROCEDURE noiseFromRandom() { -VERBATIM -#if !NRNBBCORE - { - if (_ran_compat == 2) { - fprintf(stderr, "NetStim.noiseFromRandom123 was previously called\n"); - assert(0); - } - _ran_compat = 1; - auto& randstate = reinterpret_cast(_p_donotuse); - if (ifarg(1)) { - randstate = nrn_random_arg(1); - } else { - randstate = nullptr; - } - } -#endif -ENDVERBATIM -} - -PROCEDURE noiseFromRandom123() { -VERBATIM -#if !NRNBBCORE - if (_ran_compat == 1) { - fprintf(stderr, "NetStim.noiseFromRandom was previously called\n"); - assert(0); - } - _ran_compat = 2; - auto& r123state = reinterpret_cast(_p_donotuse); - if (r123state) { - nrnran123_deletestream(r123state); - r123state = nullptr; - } - if (ifarg(3)) { - r123state = nrnran123_newstream3(static_cast(*getarg(1)), static_cast(*getarg(2)), static_cast(*getarg(3))); - } else if (ifarg(2)) { - r123state = nrnran123_newstream(static_cast(*getarg(1)), static_cast(*getarg(2))); - } -#endif -ENDVERBATIM -} - -DESTRUCTOR { -VERBATIM - if (!noise) { return; } - if (_p_donotuse) { -#if NRNBBCORE - { /* but note that mod2c does not translate DESTRUCTOR */ -#else - if (_ran_compat == 2) { -#endif - auto& r123state = reinterpret_cast(_p_donotuse); - nrnran123_deletestream(r123state); - r123state = nullptr; - } - } -ENDVERBATIM -} - -VERBATIM -static void bbcore_write(double* x, int* d, int* xx, int *offset, _threadargsproto_) { - if (!noise) { return; } - /* error if using the legacy scop_exprand */ - if (!_p_donotuse) { - fprintf(stderr, "NetStim: cannot use the legacy scop_negexp generator for the random stream.\n"); - assert(0); - } - if (d) { - char which; - uint32_t* di = reinterpret_cast(d) + *offset; -#if !NRNBBCORE - if (_ran_compat == 1) { - auto* rand = reinterpret_cast(_p_donotuse); - /* error if not using Random123 generator */ - if (!nrn_random_isran123(rand, di, di+1, di+2)) { - fprintf(stderr, "NetStim: Random123 generator is required\n"); - assert(0); - } - nrn_random123_getseq(rand, di+3, &which); - di[4] = which; - } else { -#else - { -#endif - auto& r123state = reinterpret_cast(_p_donotuse); - nrnran123_getids3(r123state, di, di+1, di+2); - nrnran123_getseq(r123state, di+3, &which); - di[4] = which; -#if NRNBBCORE - /* CoreNEURON does not call DESTRUCTOR so... */ - nrnran123_deletestream(r123state); - r123state = nullptr; -#endif - } - /*printf("Netstim bbcore_write %d %d %d\n", di[0], di[1], di[3]);*/ - } - *offset += 5; + erand = random_negexp(ranvar) } -static void bbcore_read(double* x, int* d, int* xx, int* offset, _threadargsproto_) { - if (!noise) { return; } - /* Generally, CoreNEURON, in the context of psolve, begins with an empty - * model, so this call takes place in the context of a freshly created - * instance and _p_donotuse is not NULL. - * However, this function is also now called from NEURON at the end of - * coreneuron psolve in order to transfer back the nrnran123 sequence state. - * That allows continuation with a subsequent psolve within NEURON or - * properly transfer back to CoreNEURON if we continue the psolve there. - * So now, extra logic is needed for this call to work in a NEURON context. - */ - uint32_t* di = reinterpret_cast(d) + *offset; -#if NRNBBCORE - auto& r123state = reinterpret_cast(_p_donotuse); - assert(!r123state); - r123state = nrnran123_newstream3(di[0], di[1], di[2]); - nrnran123_setseq(r123state, di[3], di[4]); -#else - uint32_t id1, id2, id3; - assert(_p_donotuse); - if (_ran_compat == 1) { /* Hoc Random.Random123 */ - auto* pv = reinterpret_cast(_p_donotuse); - int b = nrn_random_isran123(pv, &id1, &id2, &id3); - assert(b); - nrn_random123_setseq(pv, di[3], (char)di[4]); - } else { - assert(_ran_compat == 2); - auto* r123state = reinterpret_cast(_p_donotuse); - nrnran123_getids3(r123state, &id1, &id2, &id3); - nrnran123_setseq(r123state, di[3], di[4]); - } - /* Random123 on NEURON side has same ids as on CoreNEURON side */ - assert(di[0] == id1 && di[1] == id2 && di[2] == id3); -#endif - *offset += 5; -} -ENDVERBATIM - PROCEDURE next_invl() { if (number > 0) { event = invl(interval) @@ -323,34 +107,28 @@ NET_RECEIVE (w) { } } -FUNCTION bbsavestate() { - bbsavestate = 0 - : limited to noiseFromRandom123 -VERBATIM -#if !NRNBBCORE - if (_ran_compat == 2) { - auto r123state = reinterpret_cast(_p_donotuse); - if (!r123state) { return 0.0; } - double* xdir = hoc_pgetarg(1); - if (*xdir == -1.) { - *xdir = 2; - return 0.0; - } - double* xval = hoc_pgetarg(2); - if (*xdir == 0.) { - char which; - uint32_t seq; - nrnran123_getseq(r123state, &seq, &which); - xval[0] = seq; - xval[1] = which; - } - if (*xdir == 1) { - nrnran123_setseq(r123state, xval[0], xval[1]); - } - } -#endif -ENDVERBATIM -} +:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: +: Legacy API +: +: Difference: seed(x) merely sets ranvar sequence to ((uint32_t)x, 0) +: noiseFromRandom HOC Random object must use Random123 +: generator. The ids and sequence are merely copied +: into ranvar. +:::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::::: + +: the Random idiom has been extended to support CoreNEURON. + +: For backward compatibility, noiseFromRandom(hocRandom) can still be used +: as well as the default low-quality scop_exprand generator. +: However, CoreNEURON will not accept usage of the low-quality generator, +: and, if noiseFromRandom is used to specify the random stream, that stream +: must be using the Random123 generator. + +: The recommended idiom for specfication of the random stream is to use +: noiseFromRandom123(id1, id2[, id3]) + +: If any instance uses noiseFromRandom123, then no instance can use noiseFromRandom +: and vice versa. COMMENT @@ -388,3 +166,39 @@ its sequence. ENDCOMMENT +PROCEDURE seed(x) { + random_setseq(ranvar, x) +} + +PROCEDURE noiseFromRandom() { +VERBATIM +#if !NRNBBCORE + { + if (ifarg(1)) { + Rand* r = nrn_random_arg(1); + uint32_t id[3]; + if (!nrn_random_isran123(r, &id[0], &id[1], &id[2])) { + hoc_execerr_ext("NetStim: Random.Random123 generator is required."); + } + nrnran123_setids(ranvar, id[0], id[1], id[2]); + char which; + nrn_random123_getseq(r, &id[0], &which); + nrnran123_setseq(ranvar, id[0], which); + } + } +#endif +ENDVERBATIM +} + +PROCEDURE noiseFromRandom123() { +VERBATIM +#if !NRNBBCORE + if (ifarg(3)) { + nrnran123_setids(ranvar, static_cast(*getarg(1)), static_cast(*getarg(2)), static_cast(*getarg(3))); + } else if (ifarg(2)) { + nrnran123_setids(ranvar, static_cast(*getarg(1)), static_cast(*getarg(2)), 0); + } + nrnran123_setseq(ranvar, 0, 0); +#endif +ENDVERBATIM +} diff --git a/src/nrnoc/neuron.h b/src/nrnoc/neuron.h index 8002db3e96..e42274b308 100755 --- a/src/nrnoc/neuron.h +++ b/src/nrnoc/neuron.h @@ -41,3 +41,4 @@ extern void nrnallsectionmenu(), nrnallpointmenu(), nrnsecmenu(); extern void nrnglobalmechmenu(), nrnmechmenu(), nrnpointmenu(); extern void this_section(), this_node(), parent_section(), parent_node(); extern void parent_connection(), section_orientation(); +extern void print_local_memory_usage(); diff --git a/src/nrnoc/nonvintblock.h b/src/nrnoc/nonvintblock.h index 4cc603253d..66f3ed39c1 100644 --- a/src/nrnoc/nonvintblock.h +++ b/src/nrnoc/nonvintblock.h @@ -43,11 +43,11 @@ nonvintblock_extern int ( /* called at end of nrnoc/treeset.cpp:rhs and nrncvode/cvtrset.cpp:rhs */ #define nrn_nonvint_block_current(size, rhs, tid) nonvint_block(2, size, rhs, 0, tid) /*if any ionic membrane currents are generated, they subtract from - NrnThread._actual_rhs*/ + NrnThread.node_rhs_storage()*/ /* called at end of nrnoc/treeset.cpp:lhs and nrncvode/cvtrset.cpp:lhs */ #define nrn_nonvint_block_conductance(size, d, tid) nonvint_block(3, size, d, 0, tid) -/*if any ionic membrane currents are generated, di/dv adds to _actual_d */ +/*if any ionic membrane currents are generated, di/dv adds to the vector of diagonal values */ /* called at end of nrnoc/fadvance.cpp:nonvint */ #define nrn_nonvint_block_fixed_step_solve(tid) nonvint_block(4, 0, 0, 0, tid) diff --git a/src/nrnoc/nrn_ansi.h b/src/nrnoc/nrn_ansi.h index 7b0c9bbeb6..4435982870 100644 --- a/src/nrnoc/nrn_ansi.h +++ b/src/nrnoc/nrn_ansi.h @@ -1,6 +1,9 @@ #pragma once #include "hocdec.h" #include "membfunc.h" // nrn_bamech_t +#include "neuron/container/data_handle.hpp" + +#include struct Extnode; struct hoc_Item; struct HocParmLimits; @@ -44,7 +47,9 @@ extern void _nrn_watch_allocate(Datum*, Point_process*, double nrflag); extern void hoc_reg_ba(int, nrn_bamech_t, int); -extern int nrn_pointing(double*); +[[nodiscard]] inline int nrn_pointing(double* p) { + return static_cast(p); +} extern void nrn_pushsec(Section*); extern void nrn_popsec(void); @@ -62,7 +67,6 @@ extern void cable_prop_assign(Symbol* sym, double* pd, int op); extern void nrn_parent_info(Section* s); extern void nrn_relocate_old_points(Section* oldsec, Node* oldnode, Section* sec, Node* node); extern int nrn_at_beginning(Section* sec); -extern void nrn_node_destruct1(Node*); extern void mech_insert1(Section*, int); extern void extcell_2d_alloc(Section* sec); extern int nrn_is_ion(int); @@ -80,25 +84,29 @@ extern void section_order(void); extern Section* nrn_sec_pop(void); extern Node* node_ptr(Section* sec, double x, double* parea); extern double* nrn_vext_pd(Symbol* s, int indx, Node* nd); -extern double* nrnpy_dprop(Symbol* s, int indx, Section* sec, short inode, int* err); +neuron::container::data_handle nrnpy_dprop(Symbol* s, + int indx, + Section* sec, + short inode, + int* err); extern void nrn_disconnect(Section*); extern void mech_uninsert1(Section* sec, Symbol* s); extern Object* nrn_sec2cell(Section*); extern int nrn_sec2cell_equals(Section*, Object*); -extern double* dprop(Symbol* s, int indx, Section* sec, short inode); +neuron::container::data_handle dprop(Symbol* s, int indx, Section* sec, short inode); extern void nrn_initcode(); extern int segment_limits(double*); extern "C" void nrn_random_play(); extern void fixed_play_continuous(NrnThread*); -extern void* setup_tree_matrix(NrnThread*); +extern void setup_tree_matrix(neuron::model_sorted_token const& sorted_token, NrnThread& nt); extern void nrn_solve(NrnThread*); extern void second_order_cur(NrnThread*); -extern void nrn_update_voltage(NrnThread*); -extern void* nrn_fixed_step_lastpart(NrnThread*); +void nrn_update_voltage(neuron::model_sorted_token const& sorted_token, NrnThread& nt); +extern void nrn_fixed_step_lastpart(neuron::model_sorted_token const& sorted_token, NrnThread& nt); extern void hoc_register_dparam_size(int, int); extern void setup_topology(void); extern int nrn_errno_check(int); -extern void long_difus_solve(int method, NrnThread* nt); +void long_difus_solve(neuron::model_sorted_token const&, int method, NrnThread& nt); extern void nrn_fihexec(int); extern int special_pnt_call(Object*, Symbol*, int); extern void ob_sec_access_push(hoc_Item*); @@ -112,20 +120,18 @@ extern void nrn_sec_ref(Section**, Section*); extern void hoc_level_pushsec(Section*); extern double nrn_ra(Section*); extern int node_index_exact(Section*, double); -void nrn_cachevec(int); -void nrn_ba(NrnThread*, int); -extern void nrniv_recalc_ptrs(void); -extern void nrn_recalc_ptrvector(void); -extern void nrn_recalc_ptrs(double* (*r)(double*) ); +void nrn_ba(neuron::model_sorted_token const&, NrnThread&, int); extern void nrn_rhs_ext(NrnThread*); extern void nrn_setup_ext(NrnThread*); -extern void nrn_cap_jacob(NrnThread*, Memb_list*); +void nrn_cap_jacob(neuron::model_sorted_token const&, NrnThread*, Memb_list*); +void nrn_div_capacity(neuron::model_sorted_token const&, NrnThread*, Memb_list*); +void nrn_mul_capacity(neuron::model_sorted_token const&, NrnThread*, Memb_list*); extern void clear_point_process_struct(Prop* p); extern void ext_con_coef(void); extern void nrn_multisplit_ptr_update(void); -extern void nrn_cache_prop_realloc(); extern void nrn_use_daspk(int); extern void nrn_update_ps2nt(void); +neuron::model_sorted_token nrn_ensure_model_data_are_sorted(); extern void activstim_rhs(void); extern void activclamp_rhs(void); extern void activclamp_lhs(void); @@ -135,14 +141,19 @@ extern void stim_prepare(void); extern void clamp_prepare(void); extern void synapse_prepare(void); +void nrn_fixed_step(neuron::model_sorted_token const&); +void nrn_fixed_step_group(neuron::model_sorted_token const&, int n); +void nrn_lhs(neuron::model_sorted_token const&, NrnThread&); +void nrn_rhs(neuron::model_sorted_token const&, NrnThread&); extern void v_setup_vectors(void); extern void section_ref(Section*); extern void section_unref(Section*); extern const char* secname(Section*); extern const char* nrn_sec2pysecname(Section*); -void nrn_rangeconst(Section*, Symbol*, double* value, int op); +void nrn_rangeconst(Section*, Symbol*, neuron::container::data_handle value, int op); extern int nrn_exists(Symbol*, Node*); -double* nrn_rangepointer(Section*, Symbol*, double x); +neuron::container::data_handle nrn_rangepointer(Section*, Symbol*, double x); +neuron::container::data_handle nrnpy_rangepointer(Section*, Symbol*, double, int*, int); extern double* cable_prop_eval_pointer(Symbol*); // section on stack will be popped extern char* hoc_section_pathname(Section*); extern double nrn_arc_position(Section*, Node*); @@ -159,8 +170,8 @@ extern int is_point_process(Object*); extern int nrn_vartype(Symbol*); // nrnocCONST, DEP, STATE extern void recalc_diam(void); extern Prop* nrn_mechanism_check(int type, Section* sec, int inode); -extern int nrn_use_fast_imem; -extern void nrn_fast_imem_alloc(); +extern bool nrn_use_fast_imem; +void nrn_fast_imem_alloc(); extern void nrn_calc_fast_imem(NrnThread*); extern Section* nrn_secarg(int iarg); extern void nrn_seg_or_x_arg(int iarg, Section** psec, double* px); @@ -194,3 +205,37 @@ char* nrn_version(int); * @param i Key index, must be less than nrn_num_config_keys(). */ [[nodiscard]] char* nrn_get_config_val(std::size_t i); + +/** + In mechanism libraries, cannot use + auto const token = nrn_ensure_model_data_are_sorted(); + because the return type is incomplete (from include/neuron/model_data.hpp). + And we do not want to fix by installing more *.hpp files in the + include/neuron directory because of potential ABI incompatibility (anything + with std::string anywhere in it). + The work around is to provide an extra layer of indirection via unique_ptr + so the opaque token has a definite size (one pointer) and declaration. + + The "trick" is just that you have to make sure the parts of the opaque + token that need the definition of the non-opaque token are defined in + the right place. That's why the constructor and destructor are defined + in fadvance.cpp + + Instead, use + auto const token = nrn_ensure_model_data_are_sorted_opaque(); + This file is already included in all translated mod files. +**/ +namespace neuron { +struct model_sorted_token; +struct opaque_model_sorted_token { + opaque_model_sorted_token(model_sorted_token&&); + ~opaque_model_sorted_token(); + operator model_sorted_token const &() const { + return *m_ptr; + } + + private: + std::unique_ptr m_ptr; +}; +} // namespace neuron +neuron::opaque_model_sorted_token nrn_ensure_model_data_are_sorted_opaque(); diff --git a/src/nrnoc/nrncvode.h b/src/nrnoc/nrncvode.h index ec77bca31b..7a684bfaba 100644 --- a/src/nrnoc/nrncvode.h +++ b/src/nrnoc/nrncvode.h @@ -1,4 +1,5 @@ #pragma once +#include "oc_ansi.h" // neuron::model_sorted_token struct Memb_list; struct NrnThread; void cvode_fadvance(double); @@ -11,15 +12,15 @@ void free_event_queues(); extern void init_net_events(); extern void nrn_record_init(); extern void nrn_play_init(); -extern void fixed_record_continuous(NrnThread* nt); +void fixed_record_continuous(neuron::model_sorted_token const&, NrnThread& nt); extern void fixed_play_continuous(NrnThread* nt); extern void nrn_solver_prepare(); extern "C" void nrn_random_play(); extern void nrn_daspk_init_step(double, double, int); extern void nrndae_init(); -extern void nrndae_update(); +extern void nrndae_update(NrnThread*); extern void nrn_update_2d(NrnThread*); -extern void nrn_capacity_current(NrnThread* _nt, Memb_list* ml); +void nrn_capacity_current(neuron::model_sorted_token const&, NrnThread* _nt, Memb_list* ml); extern void nrn_spike_exchange_init(); void nrn_spike_exchange(NrnThread* nt); extern bool nrn_use_bin_queue_; diff --git a/src/nrnoc/nrndae_c.h b/src/nrnoc/nrndae_c.h index 1a8fb8be91..48e573c7ee 100644 --- a/src/nrnoc/nrndae_c.h +++ b/src/nrnoc/nrndae_c.h @@ -1,16 +1,18 @@ #pragma once +#include "neuron/container/data_handle.hpp" #include extern void nrndae_alloc(void); extern int nrndae_extra_eqn_count(void); extern void nrndae_init(void); -extern void nrndae_rhs(void); /* relative to c*dy/dt = -g*y + b */ +extern void nrndae_rhs(NrnThread*); /* relative to c*dy/dt = -g*y + b */ extern void nrndae_lhs(void); -void nrndae_dkmap(double**, double**); +void nrndae_dkmap(std::vector>&, + std::vector>&); extern void nrndae_dkres(double*, double*, double*); extern void nrndae_dkpsol(double); -extern void nrndae_update(void); +extern void nrndae_update(NrnThread*); void nrn_matrix_node_free(); extern int nrndae_list_is_empty(void); diff --git a/src/nrnoc/nrniv_mf.h b/src/nrnoc/nrniv_mf.h index fb6211a900..aa4869f52d 100644 --- a/src/nrnoc/nrniv_mf.h +++ b/src/nrnoc/nrniv_mf.h @@ -4,17 +4,28 @@ #include "hocdec.h" #include "membfunc.h" +struct Memb_list; struct NrnThread; struct Point_process; -typedef double (*ldifusfunc3_t)(int, double*, Datum*, double*, double*, Datum*, NrnThread*); -typedef void ldifusfunc2_t(int, ldifusfunc3_t, void**, int, int, int, NrnThread*); -typedef void (*ldifusfunc_t)(ldifusfunc2_t, NrnThread*); +using ldifusfunc3_t = double (*)(int, + Memb_list*, + std::size_t, + Datum*, + double*, + double*, + Datum*, + NrnThread*, + neuron::model_sorted_token const&); +using ldifusfunc2_t = + void(int, ldifusfunc3_t, void**, int, int, int, neuron::model_sorted_token const&, NrnThread&); +using ldifusfunc_t = void (*)(ldifusfunc2_t, neuron::model_sorted_token const&, NrnThread&); typedef void (*pnt_receive_t)(Point_process*, double*, double); typedef void (*pnt_receive_init_t)(Point_process*, double*, double); extern Prop* need_memb_cl(Symbol*, int*, int*); extern Prop* prop_alloc(Prop**, int, Node*); +void prop_update_ion_variables(Prop*, Node*); [[deprecated("non-void* overloads are preferred")]] void artcell_net_send(void* v, double* weight, @@ -32,32 +43,30 @@ void nrn_net_send(Datum* v, double* weight, Point_process* pnt, double td, doubl extern double nrn_ion_charge(Symbol*); extern Point_process* ob2pntproc(Object*); extern Point_process* ob2pntproc_0(Object*); - -extern void register_mech(const char**, Pvmp, Pvmi, Pvmi, Pvmi, Pvmi, int, int); -extern int point_register_mech(const char**, - Pvmp, - Pvmi, - Pvmi, - Pvmi, - Pvmi, - int, - int, - void* (*) (Object*), - void (*)(void*), - Member_func*); +void register_mech(const char**, Pvmp, nrn_cur_t, nrn_jacob_t, nrn_state_t, nrn_init_t, int, int); +int point_register_mech(const char**, + Pvmp, + nrn_cur_t, + nrn_jacob_t, + nrn_state_t, + nrn_init_t, + int, + int, + void* (*) (Object*), + void (*)(void*), + Member_func*); extern int nrn_get_mechtype(const char*); extern void nrn_writes_conc(int, int); extern void add_nrn_has_net_event(int); -extern void hoc_register_cvode(int, nrn_ode_count_t, nrn_ode_map_t, Pvmi, Pvmi); -extern void hoc_register_synonym(int, void (*)(int, double**, Datum**)); +void hoc_register_cvode(int, nrn_ode_count_t, nrn_ode_map_t, nrn_ode_spec_t, nrn_ode_matsol_t); +void hoc_register_synonym(int, nrn_ode_synonym_t); extern void register_destructor(Pvmp); extern void ion_reg(const char*, double); extern void nrn_promote(Prop*, int, int); extern void add_nrn_artcell(int, int); extern void hoc_register_ldifus1(ldifusfunc_t); extern void nrn_check_conc_write(Prop*, Prop*, int); -extern void nrn_wrote_conc(Symbol*, double*, int); -extern void nrn_update_ion_pointer(Symbol*, Datum*, int, int); +void nrn_wrote_conc(Symbol*, double& erev, double ci, double co, int); extern Prop* need_memb(Symbol*); @@ -67,6 +76,7 @@ extern double has_loc_point(void*); extern double get_loc_point_process(void*); extern double loc_point_process(int, void*); extern Prop* nrn_point_prop_; +neuron::container::data_handle point_process_pointer(Point_process*, Symbol*, int); void steer_point_process(void* v); bool at_time(NrnThread*, double); @@ -74,9 +84,6 @@ bool at_time(NrnThread*, double); void artcell_net_move(Datum*, Point_process*, double); extern int ifarg(int); - -extern void nrn_complain(double*); - extern void set_seed(double); extern int nrn_matrix_cnt_; // defined in treeset.cpp extern int diam_changed; // defined in cabcode.cpp diff --git a/src/nrnoc/nrnoc_ml.h b/src/nrnoc/nrnoc_ml.h index 63c27b517e..6cf6d4397a 100644 --- a/src/nrnoc/nrnoc_ml.h +++ b/src/nrnoc/nrnoc_ml.h @@ -1,23 +1,205 @@ #pragma once -#include "hocdec.h" -#include "options.h" // for CACHEVEC +#include // std::ptrdiff_t, std::size_t +#include // std::numeric_limits +#include // std::vector struct Node; struct Prop; +namespace neuron::container { +struct generic_data_handle; +} +using Datum = neuron::container::generic_data_handle; + +// Only include a forward declaration to help avoid translated MOD file code relying on its layout +namespace neuron::container::Mechanism { +struct storage; +} + +/** + * @brief A view into a set of mechanism instances. + * + * This is a view to a set of mechanism instances that are contiguous in the + * underlying storage. This is inherently something that only makes sense if + * the underlying data are sorted. In this case, Memb_list essentially + * contains a pointer to the underlying storage struct and a single offset into + * it. This covers use-cases like Memb_list inside NrnThread -- the data are + * partitioned by NrnThread in nrn_sort_mech_data so all the instances of a + * particular mechanism in a particular thread are next to each other in the + * storage. + * + * Because this type is passed from NEURON library code into code generated from MOD files, it is + * prone to ABI issues -- particularly when dealing with Python wheels. + */ struct Memb_list { - Node** nodelist; -#if CACHEVEC != 0 + /** + * @brief Construct a null Memb_list that does not refer to any thread/type. + */ + Memb_list() = default; + + /** + * @brief Construct a Memb_list that knows its type + underlying storage. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + Memb_list(int type); + + Node** nodelist{}; /* nodeindices contains all nodes this extension is responsible for, * ordered according to the matrix. This allows to access the matrix * directly via the nrn_actual_* arrays instead of accessing it in the * order of insertion and via the node-structure, making it more * cache-efficient */ - int* nodeindices; -#endif /* CACHEVEC */ - double** _data; - Datum** pdata; - Prop** prop; - Datum* _thread; /* thread specific data (when static is no good) */ - int nodecount; + int* nodeindices{}; + Datum** pdata{}; + Prop** prop{}; + Datum* _thread{}; /* thread specific data (when static is no good) */ + int nodecount{}; + /** + * @brief Get a vector of double* representing the model data. + * + * Calling .data() on the return value yields a double** that is similar to + * the old _data member, with the key difference that its indices are + * transposed. Now, the first index corresponds to the variable and the + * second index corresponds to the instance of the mechanism. This method + * is useful for interfacing with CoreNEURON but should be deprecated and + * removed along with the translation layer between NEURON and CoreNEURON. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + [[nodiscard]] std::vector data(); + + template + [[nodiscard]] double& fpfield(std::size_t instance) { + return data(instance, variable); + } + + template + [[nodiscard]] double* dptr_field(std::size_t instance) { + return dptr_field(instance, variable); + } + + [[nodiscard]] neuron::container::data_handle data_handle( + std::size_t instance, + neuron::container::field_index field) const; + + /** + * @brief Get the `variable`-th floating point value in `instance` of the mechanism. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + [[nodiscard]] __attribute__((pure)) double& data(std::size_t instance, + int variable, + int array_index = 0); + + /** + * @brief Get the `variable`-th pointer-to-double in `instance` of the mechanism. + * + * Defined in .cpp to hide the full definition of Datum from translated MOD file code. + */ + [[nodiscard]] __attribute__((pure)) double* dptr_field(std::size_t instance, int variable); + + /** + * @brief Get the `variable`-th floating point value in `instance` of the mechanism. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + [[nodiscard]] __attribute__((pure)) double const& data(std::size_t instance, + int variable, + int array_index = 0) const; + + /** + * @brief Calculate a legacy index of the given pointer in this mechanism data. + * + * This used to be defined as ptr - ml->_data[0] if ptr belonged to the + * given mechanism, i.e. an offset from the zeroth element of the zeroth + * mechanism. This is useful when interfacing with CoreNEURON and for + * parameter exchange with other MPI ranks. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + [[nodiscard]] std::ptrdiff_t legacy_index(double const* ptr) const; + + /** + * @brief Calculate a legacy index from a data handle. + */ + [[nodiscard]] std::ptrdiff_t legacy_index( + neuron::container::data_handle const& dh) const { + return legacy_index(static_cast(dh)); + } + + /** + * @brief Get the offset of this Memb_list into global storage for this type. + * + * In the simplest case then this Memb_list represents all instances of a + * particular type in a particular thread, which are contiguous because the + * data have been sorted by a call like + * auto const cache_token = nrn_ensure_model_data_are_sorted() + * and this offset has been set to be + * cache_token.thread_cache(thread_id).mechanism_offset.at(mechanism_type) + * so that the first argument to data(i, j) is an offset inside this thread, + * not the global structure. + */ + [[nodiscard]] std::size_t get_storage_offset() const { + assert(m_storage_offset != neuron::container::invalid_row); + return m_storage_offset; + } + + /** + * @brief Set the offset of this Memb_list into global storage for this type. + * + * See the documentation for @ref get_storage_offset. + * + * @todo At the moment this is set as part of sorting/permuting data, but it + * is not automatically invalidated when the cache / sorted status is + * reset. Consider if these offsets can be more explicitly tied to the + * lifetime of the cache data. + */ + void set_storage_offset(std::size_t offset) { + m_storage_offset = offset; + } + + /** + * @brief Set the pointer to the underlying data container. + * + * This is a quasi-private method that you should think twice before + * calling. Normally m_storage would automatically be set by the constructor + * taking an integer mechanism type. + */ + void set_storage_pointer(neuron::container::Mechanism::storage* storage) { + m_storage = storage; + } + + /** + * @brief Get the mechanism type. + * + * Defined in .cpp to hide neuron::container::Mechanism::storage layout from translated MOD file + * code. + */ + [[nodiscard]] int type() const; + + [[nodiscard]] int _type() const { + return type(); + } + + private: + /** + * @brief Pointer to the global mechanism data structure for this mech type. + */ + neuron::container::Mechanism::storage* m_storage{}; + + /** + * @brief Offset of this thread+mechanism into the global mechanism data. + * + * This is locally a piece of "cache" information like node_data_offset -- + * the question is whether Memb_list itself is also considered a + * transient/cache type, in which case this is fine, or if it's considered + * permanent...in which case this value should probably not live here. + */ + std::size_t m_storage_offset{neuron::container::invalid_row}; }; diff --git a/src/nrnoc/nrnversionmacros.h b/src/nrnoc/nrnversionmacros.h index 0a2ee16935..d5b6473462 100644 --- a/src/nrnoc/nrnversionmacros.h +++ b/src/nrnoc/nrnversionmacros.h @@ -18,3 +18,13 @@ #if NRN_VERSION_GTEQ(8, 2, 0) #define NRN_VERSION_GTEQ_8_2_0 #endif + +// Around NEURON 9.0.0 there are significant changes to data structures, which in some cases require +// that VERBATIM code is adapted. +#if NRN_VERSION_LT(9, 0, 0) +#error "Changes from NEURON 9+ may have been erroneously backported to an older branch" +#else +// Used to test if floating point mechanism data are accessed via a double* (_p) or via a Memb_list* +// and std::size_t pair (_ml and _iml) +#define NRN_MECHANISM_DATA_IS_SOA // _ml and _iml are used +#endif diff --git a/src/nrnoc/options.h b/src/nrnoc/options.h index abbfe2f963..b0f9d84dfe 100644 --- a/src/nrnoc/options.h +++ b/src/nrnoc/options.h @@ -32,8 +32,4 @@ in one section is set but no others. But only the first time through treeset. #define KEEP_NSEG_PARM 1 /* Use old segment parameters to define */ /* the new segment information */ -#if !defined(CACHEVEC) -#define CACHEVEC 1 /* define to 0 doubles in nodes instead of vectors*/ -#endif - #define MULTICORE 1 /* not optional */ diff --git a/src/nrnoc/passive0.cpp b/src/nrnoc/passive0.cpp index c7e2e68d62..b71f106666 100644 --- a/src/nrnoc/passive0.cpp +++ b/src/nrnoc/passive0.cpp @@ -8,53 +8,44 @@ #define nparm 2 static const char* mechanism[] = {"0", "fastpas", "g_fastpas", "e_fastpas", 0, 0, 0}; static void pas_alloc(Prop* p); -static void pas_cur(NrnThread* nt, Memb_list* ml, int type); -static void pas_jacob(NrnThread* nt, Memb_list* ml, int type); +static void pas_cur(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type); +static void pas_jacob(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type); extern "C" void passive0_reg_(void) { int mechtype; - register_mech(mechanism, pas_alloc, pas_cur, pas_jacob, (Pvmi) 0, (Pvmi) 0, -1, 1); + register_mech(mechanism, pas_alloc, pas_cur, pas_jacob, nullptr, nullptr, -1, 1); mechtype = nrn_get_mechtype(mechanism[1]); + using neuron::mechanism::field; + neuron::mechanism::register_data_fields(mechtype, + field{"g_fastpas"}, + field{"e_fastpas"}); hoc_register_prop_size(mechtype, nparm, 0); } -#define g vdata[i][0] -#define e vdata[i][1] -#define v NODEV(vnode[i]) +static constexpr auto g_index = 0; +static constexpr auto e_index = 1; -static void pas_cur(NrnThread* nt, Memb_list* ml, int type) { +static void pas_cur(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { int count = ml->nodecount; Node** vnode = ml->nodelist; - double** vdata = ml->_data; - Datum** vpdata = ml->pdata; - int i; - for (i = 0; i < count; ++i) { - NODERHS(vnode[i]) -= g * (v - e); + for (int i = 0; i < count; ++i) { + NODERHS(vnode[i]) -= ml->data(i, g_index) * (NODEV(vnode[i]) - ml->data(i, e_index)); } } -static void pas_jacob(NrnThread* nt, Memb_list* ml, int type) { - int count = ml->nodecount; - Node** vnode = ml->nodelist; - double** vdata = ml->_data; - Datum** vpdata = ml->pdata; - int i; - for (i = 0; i < count; ++i) { - NODED(vnode[i]) += g; +static void pas_jacob(neuron::model_sorted_token const&, NrnThread* nt, Memb_list* ml, int type) { + auto const count = ml->nodecount; + auto* const ni = ml->nodeindices; + auto* const vec_d = nt->node_d_storage(); + for (int i = 0; i < count; ++i) { + vec_d[ni[i]] += ml->data(i, g_index); } } /* the rest can be constructed automatically from the above info*/ static void pas_alloc(Prop* p) { - double* pd; - pd = nrn_prop_data_alloc(p->_type, nparm, p); - p->param_size = nparm; -#if defined(__MWERKS__) - pd[0] = 5.e-4; /*DEF_g;*/ -#else - pd[0] = DEF_g; -#endif - pd[1] = DEF_e; - p->param = pd; + assert(p->param_size() == nparm); + p->param(0) = DEF_g; + p->param(1) = DEF_e; } diff --git a/src/nrnoc/point.cpp b/src/nrnoc/point.cpp index 9fff02b829..4159358d6b 100644 --- a/src/nrnoc/point.cpp +++ b/src/nrnoc/point.cpp @@ -31,16 +31,8 @@ void (*nrnpy_o2loc_p_)(Object*, Section**, double*); void (*nrnpy_o2loc2_p_)(Object*, Section**, double*); void* create_point_process(int pointtype, Object* ho) { - Point_process* pp; - pp = (Point_process*) emalloc(sizeof(Point_process)); - pp->node = 0; - pp->sec = 0; - pp->prop = 0; + auto* const pp = new Point_process{}; pp->ob = ho; - pp->presyn_ = 0; - pp->nvi_ = 0; - pp->_vnt = 0; - if (nrn_is_artificial_[pointsym[pointtype]->subtype]) { create_artcell_prop(pp, pointsym[pointtype]->subtype); return pp; @@ -48,12 +40,11 @@ void* create_point_process(int pointtype, Object* ho) { if (ho && ho->ctemplate->steer && ifarg(1)) { loc_point_process(pointtype, (void*) pp); } - return (void*) pp; + return pp; } Object* nrn_new_pointprocess(Symbol* sym) { void* v; - extern Object* hoc_new_object(Symbol*, void*); extern Object* hoc_new_opoint(int); Object* ob; extern Symlist* hoc_built_in_symlist; @@ -76,60 +67,63 @@ Object* nrn_new_pointprocess(Symbol* sym) { void destroy_point_process(void* v) { // might be NULL if error handling because of error in construction if (v) { - Point_process* pp = (Point_process*) v; + auto* const pp = static_cast(v); free_one_point(pp); - free(pp); + delete pp; } } void nrn_loc_point_process(int pointtype, Point_process* pnt, Section* sec, Node* node) { extern Prop* prop_alloc_disallow(Prop * *pp, short type, Node* nd); - extern Prop* prop_alloc(Prop**, int, Node*); extern Section* nrn_pnt_sec_for_need_; - Prop* p; - double x; - assert(!nrn_is_artificial_[pointsym[pointtype]->subtype]); - x = nrn_arc_position(sec, node); - /* the problem the next fragment overcomes is that POINTER's become - invalid when a point process is moved (dparam and param were - allocated but then param was freed and replaced by old param) The - error that I saw, then, was when a dparam pointed to a param -- - useful to give default value to a POINTER and then the param was - immediately freed. This was the tip of the iceberg since in general - when one moves a point process, some pointers are valid and - some invalid and this can only be known by the model in its - CONSTRUCTOR. Therefore, instead of copying the old param to - the new param (and therefore invalidating other pointers as in - menus) we flag the allocation routine for the model to - 1) not allocate param and dparam, 2) don't fill param but - do the work for dparam (fill pointers for ions), - 3) execute the constructor normally. - */ if (pnt->prop) { - nrn_point_prop_ = pnt->prop; - } else { - nrn_point_prop_ = (Prop*) 0; - } - nrn_pnt_sec_for_need_ = sec; - if (x == 0. || x == 1.) { - p = prop_alloc_disallow(&(node->prop), pointsym[pointtype]->subtype, node); + // Make the old Node forget about pnt->prop + if (auto* const old_node = pnt->node; old_node) { + auto* const p = pnt->prop; + if (!nrn_is_artificial_[p->_type]) { + auto* p1 = old_node->prop; + if (p1 == p) { + old_node->prop = p1->next; + } else { + for (; p1; p1 = p1->next) { + if (p1->next == p) { + p1->next = p->next; + break; + } + } + } + } + v_structure_change = 1; // needed? + } + // Tell the new Node about pnt->prop + pnt->prop->next = node->prop; + node->prop = pnt->prop; + // Call the nrn_alloc function from the MOD file with nrn_point_prop_ set: this will skip + // resetting parameter values and calling the CONSTRUCTOR block, but it *will* update ion + // variables to point to the ion mechanism instance in the new Node + prop_update_ion_variables(pnt->prop, node); } else { - p = prop_alloc(&(node->prop), pointsym[pointtype]->subtype, node); - } - nrn_pnt_sec_for_need_ = (Section*) 0; - - nrn_point_prop_ = (Prop*) 0; - if (pnt->prop) { - pnt->prop->param = nullptr; - pnt->prop->dparam = nullptr; - free_one_point(pnt); + // Allocate a new Prop for this Point_process + Prop* p; + auto const x = nrn_arc_position(sec, node); + nrn_point_prop_ = pnt->prop; + nrn_pnt_sec_for_need_ = sec; + // Both branches of this tell `node` about the new Prop `p` + if (x == 0. || x == 1.) { + p = prop_alloc_disallow(&(node->prop), pointsym[pointtype]->subtype, node); + } else { + p = prop_alloc(&(node->prop), pointsym[pointtype]->subtype, node); + } + nrn_pnt_sec_for_need_ = nullptr; + nrn_point_prop_ = nullptr; + pnt->prop = p; + pnt->prop->dparam[1] = {neuron::container::do_not_search, pnt}; } + // Update pnt->sec with sec, unreffing the old value and reffing the new one nrn_sec_ref(&pnt->sec, sec); - pnt->node = node; - pnt->prop = p; - pnt->prop->dparam[0] = &NODEAREA(node); - pnt->prop->dparam[1] = pnt; + pnt->node = node; // tell pnt which node it belongs to now + pnt->prop->dparam[0] = node->area_handle(); if (pnt->ob) { if (pnt->ob->observers) { hoc_obj_notify(pnt->ob); @@ -242,12 +236,14 @@ double has_loc_point(void* v) { return (pnt->sec != 0); } -double* point_process_pointer(Point_process* pnt, Symbol* sym, int index) { +neuron::container::data_handle point_process_pointer(Point_process* pnt, + Symbol* sym, + int index) { if (!pnt->prop) { if (nrn_inpython_ == 1) { /* python will handle the error */ hoc_warning("point process not located in a section", nullptr); nrn_inpython_ = 2; - return nullptr; + return {}; } else { hoc_execerror("point process not located in a section", nullptr); } @@ -257,31 +253,43 @@ double* point_process_pointer(Point_process* pnt, Symbol* sym, int index) { if (cppp_semaphore) { ++cppp_semaphore; cppp_datum = &datum; // we will store a value in `datum` later - return &ppp_dummy; + return neuron::container::data_handle{neuron::container::do_not_search, + &ppp_dummy}; } else { - return datum.get(); + // In case _p_somevar is being used as an opaque void* in a VERBATIM + // block then then the Datum will hold a literal void* and will not + // be convertible to double*. If instead somevar is used in the MOD + // file and it was set from the interpreter (mech._ref_pv = + // seg._ref_v), then the Datum will hold either data_handle + // or a literal double*. If we attempt to access a POINTER or + // BBCOREPOINTER variable that is being used as an opaque void* from + // the interpreter -- as is done in test_datareturn.py, for example + // -- then we will reach this code when the datum holds a literal + // void*. We don't know what type that pointer really refers to, so + // in the first instance let's return nullptr in that case, not + // void*-cast-to-double*. + if (datum.holds()) { + return static_cast>(datum); + } else { + return {}; + } } } else { if (pnt->prop->ob) { - return pnt->prop->ob->u.dataspace[sym->u.rng.index].pval + index; + return neuron::container::data_handle{ + pnt->prop->ob->u.dataspace[sym->u.rng.index].pval + index}; } else { - return &(pnt->prop->param[sym->u.rng.index + index]); + return pnt->prop->param_handle_legacy(sym->u.rng.index + index); } } } -/* put the right double pointer on the stack */ +// put the right data handle on the stack void steer_point_process(void* v) { - Symbol* sym; - int index; - Point_process* pnt = (Point_process*) v; - sym = hoc_spop(); - if (ISARRAY(sym)) { - index = hoc_araypt(sym, SYMBOL); - } else { - index = 0; - } - hoc_pushpx(point_process_pointer(pnt, sym, index)); + auto* const pnt = static_cast(v); + auto* const sym = hoc_spop(); + auto const index = ISARRAY(sym) ? hoc_araypt(sym, SYMBOL) : 0; + hoc_push(point_process_pointer(pnt, sym, index)); } void nrn_cppp(void) { @@ -294,7 +302,7 @@ void connect_point_process_pointer(void) { hoc_execerror("not a point process pointer", (char*) 0); } cppp_semaphore = 0; - *cppp_datum = hoc_pxpop(); + *cppp_datum = hoc_pop_handle(); hoc_nopop(); } @@ -316,18 +324,17 @@ static void free_one_point(Point_process* pnt) { } } } - { v_structure_change = 1; } - if (p->param) { - if (memb_func[p->_type].destructor) { - memb_func[p->_type].destructor(p); - } - notify_freed_val_array(p->param, p->param_size); - nrn_prop_data_free(p->_type, p->param); + v_structure_change = 1; + if (memb_func[p->_type].destructor) { + memb_func[p->_type].destructor(p); + } + if (auto got = nrn_mech_inst_destruct.find(p->_type); got != nrn_mech_inst_destruct.end()) { + (got->second)(p); } if (p->dparam) { nrn_prop_datum_free(p->_type, p->dparam); } - free(p); + delete p; pnt->prop = (Prop*) 0; pnt->node = (Node*) 0; if (pnt->sec) { @@ -353,14 +360,10 @@ void clear_point_process_struct(Prop* p) { if (p->ob) { hoc_obj_unref(p->ob); } - if (p->param) { - notify_freed_val_array(p->param, p->param_size); - nrn_prop_data_free(p->_type, p->param); - } if (p->dparam) { nrn_prop_datum_free(p->_type, p->dparam); } - free(p); + delete p; } } diff --git a/src/nrnoc/psection.cpp b/src/nrnoc/psection.cpp index 6077cc77aa..ef5565b2ee 100644 --- a/src/nrnoc/psection.cpp +++ b/src/nrnoc/psection.cpp @@ -61,7 +61,7 @@ static void pnode(Prop* p1) { if (p1->ob) { printf(" %s=%g", s->name, p1->ob->u.dataspace[s->u.rng.index].pval[0]); } else { - Printf(" %s=%g", s->name, p1->param[s->u.rng.index]); + Printf(" %s=%g", s->name, p1->param_legacy(s->u.rng.index)); } } } diff --git a/src/nrnoc/seclist.cpp b/src/nrnoc/seclist.cpp index 63ca983960..2568a5b9cd 100644 --- a/src/nrnoc/seclist.cpp +++ b/src/nrnoc/seclist.cpp @@ -1,8 +1,8 @@ #include <../../nrnconf.h> #define HOC_L_LIST 1 -#include #include "section.h" #include "neuron.h" +#include "nrnpy.h" #include "parse.hpp" #include "hocparse.h" #include "code.h" @@ -145,9 +145,6 @@ static double allroots(void* v) { } static double seclist_remove(void* v) { -#if USE_PYTHON - extern Symbol* nrnpy_pyobj_sym_; -#endif Section *sec, *s; Item *q, *q1; List* sl; @@ -277,7 +274,7 @@ extern int hoc_returning; static void check(Object* ob) { if (!ob) { - hoc_execerror("nil object is not a SectionList", (char*) 0); + hoc_execerror("nullptr object is not a SectionList", nullptr); } if (ob->ctemplate->constructor != constructor) { hoc_execerror(ob->ctemplate->sym->name, " is not a SectionList"); diff --git a/src/nrnoc/secref.cpp b/src/nrnoc/secref.cpp index e741128180..a0ec032b91 100644 --- a/src/nrnoc/secref.cpp +++ b/src/nrnoc/secref.cpp @@ -20,7 +20,6 @@ access s1.sec // soma becomes the default section #include "parse.hpp" #include "hoc_membf.h" #include "oc_ansi.h" -#include extern int hoc_return_type_code; @@ -156,7 +155,7 @@ static double s_rename(void* v) { if (size == 0) { pitm[index] = qsec; - sec->prop->dparam[0] = sym; + sec->prop->dparam[0] = {neuron::container::do_not_search, sym}; sec->prop->dparam[5] = index; sec->prop->dparam[6] = static_cast(nullptr); OPSECITM(sym)[0] = qsec; diff --git a/src/nrnoc/section.h b/src/nrnoc/section.h index 74b8ab3d7d..0f496d1f46 100644 --- a/src/nrnoc/section.h +++ b/src/nrnoc/section.h @@ -22,11 +22,14 @@ d is assumed to be non-zero. d and rhs is calculated from the property list. */ - - +#include "hoclist.h" +#include "membfunc.h" +#include "neuron/container/mechanism_data.hpp" +#include "neuron/container/node_data.hpp" +#include "neuron/model_data.hpp" #include "nrnredef.h" #include "options.h" -#include "hoclist.h" +#include "section_fwd.hpp" /*#define DEBUGSOLVE 1*/ #define xpop hoc_xpop @@ -35,39 +38,37 @@ #define execerror hoc_execerror #include "hocdec.h" -typedef struct Section { - int refcount; /* may be in more than one list */ - short nnode; /* Number of nodes for ith section */ - struct Section* parentsec; /* parent section of node 0 */ - struct Section* child; /* root of the list of children - connected to this parent kept in - order of increasing x */ - struct Section* sibling; /* used as list of sections that have same parent */ - - - /* the parentnode is only valid when tree_changed = 0 */ - struct Node* parentnode; /* parent node */ - struct Node** pnode; /* Pointer to pointer vector of node structures */ - int order; /* index of this in secorder vector */ - short recalc_area_; /* NODEAREA, NODERINV, diam, L need recalculation */ - short volatile_mark; /* for searching */ - void* volatile_ptr; /* e.g. ShapeSection* */ -#if DIAMLIST - short npt3d; /* number of 3-d points */ - short pt3d_bsize; /* amount of allocated space for 3-d points */ - struct Pt3d* pt3d; /* list of 3d points with diameter */ - struct Pt3d* logical_connection; /* nil for legacy, otherwise specifies logical connection - position (for translation) */ -#endif - struct Prop* prop; /* eg. length, etc. */ -} Section; +#include #if DIAMLIST -typedef struct Pt3d { - float x, y, z, d; /* 3d point, microns */ +struct Pt3d { + float x, y, z, d; // 3d point, microns double arc; -} Pt3d; +}; +#endif +struct Section { + int refcount{}; // may be in more than one list + short nnode{}; // Number of nodes for ith section + Section* parentsec{}; // parent section of node 0 + Section* child{}; // root of the list of children connected to this parent kept in order of + // increasing x + Section* sibling{}; // used as list of sections that have same parent + + Node* parentnode{}; // parent node; only valid when tree_changed = 0 + Node** pnode{}; // Pointer to pointer vector of node structures + int order{}; // index of this in secorder vector + short recalc_area_{}; // NODEAREA, NODERINV, diam, L need recalculation + short volatile_mark{}; // for searching + void* volatile_ptr{}; // e.g. ShapeSection* +#if DIAMLIST + short npt3d{}; // number of 3-d points + short pt3d_bsize{}; // amount of allocated space for 3-d points + Pt3d* pt3d{}; // list of 3d points with diameter + Pt3d* logical_connection{}; // nullptr for legacy, otherwise specifies logical connection + // position (for translation) #endif + Prop* prop{}; // eg. length, etc. +}; typedef float NodeCoef; typedef double NodeVal; @@ -96,72 +97,112 @@ typedef struct Info3Val { /* storage to help build matrix efficiently */ /* if any double is added after area then think about changing the notify_free_val parameter in node_free in solve.cpp */ - -#define NODED(n) (*((n)->_d)) -#define NODERHS(n) (*((n)->_rhs)) - -#undef NODEV /* sparc-sun-solaris2.9 */ - -#if CACHEVEC == 0 -#define NODEA(n) ((n)->_a) -#define NODEB(n) ((n)->_b) -#define NODEV(n) ((n)->_v) -#define NODEAREA(n) ((n)->_area) -#else /* CACHEVEC */ -#define NODEV(n) (*((n)->_v)) -#define NODEAREA(n) ((n)->_area) +#define NODEAREA(n) ((n)->area()) #define NODERINV(n) ((n)->_rinv) -#define VEC_A(i) (_nt->_actual_a[(i)]) -#define VEC_B(i) (_nt->_actual_b[(i)]) -#define VEC_D(i) (_nt->_actual_d[(i)]) -#define VEC_RHS(i) (_nt->_actual_rhs[(i)]) -#define VEC_V(i) (_nt->_actual_v[(i)]) -#define VEC_AREA(i) (_nt->_actual_area[(i)]) -#define NODEA(n) (VEC_A((n)->v_node_index)) -#define NODEB(n) (VEC_B((n)->v_node_index)) -#endif /* CACHEVEC */ - -extern int use_sparse13; -extern int use_cachevec; -extern int secondorder; -extern int cvode_active_; - -typedef struct Node { -#if CACHEVEC == 0 - double _v; /* membrane potential */ - double _area; /* area in um^2 but see treesetup.cpp */ - double _a; /* effect of node in parent equation */ - double _b; /* effect of parent in node equation */ -#else /* CACHEVEC */ - double* _v; /* membrane potential */ - double _area; /* area in um^2 but see treesetup.cpp */ - double _rinv; /* conductance uS from node to parent */ - double _v_temp; /* vile necessity til actual_v allocated */ -#endif /* CACHEVEC */ - double* _d; /* diagonal element in node equation */ - double* _rhs; /* right hand side in node equation */ + +struct Extnode; +struct Node { + // Eventually the old Node class should become an alias for + // neuron::container::handle::Node, but as an intermediate measure we can + // add one of those as a member and forward some access/modifications to it. + neuron::container::Node::owning_handle _node_handle{neuron::model().node_data()}; + + [[nodiscard]] auto id() { + return _node_handle.id(); + } + [[nodiscard]] auto& a() { + return _node_handle.a(); + } + [[nodiscard]] auto const& a() const { + return _node_handle.a(); + } + [[nodiscard]] auto& area() { + return _node_handle.area_hack(); + } + [[nodiscard]] auto const& area() const { + return _node_handle.area_hack(); + } + [[nodiscard]] auto area_handle() { + return _node_handle.area_handle(); + } + [[nodiscard]] auto& b() { + return _node_handle.b(); + } + [[nodiscard]] auto const& b() const { + return _node_handle.b(); + } + [[nodiscard]] auto& d() { + return _node_handle.d(); + } + [[nodiscard]] auto const& d() const { + return _node_handle.d(); + } + [[nodiscard]] auto& v() { + return _node_handle.v_hack(); + } + [[nodiscard]] auto const& v() const { + return _node_handle.v_hack(); + } + [[nodiscard]] auto& v_hack() { + return _node_handle.v_hack(); + } + [[nodiscard]] auto const& v_hack() const { + return _node_handle.v_hack(); + } + [[nodiscard]] auto v_handle() { + return _node_handle.v_handle(); + } + [[nodiscard]] auto& rhs() { + return _node_handle.rhs(); + } + [[nodiscard]] auto const& rhs() const { + return _node_handle.rhs(); + } + [[nodiscard]] auto rhs_handle() { + return _node_handle.rhs_handle(); + } + [[nodiscard]] auto& sav_d() { + return _node_handle.sav_d(); + } + [[nodiscard]] auto const& sav_d() const { + return _node_handle.sav_d(); + } + [[nodiscard]] auto& sav_rhs() { + return _node_handle.sav_rhs(); + } + [[nodiscard]] auto const& sav_rhs() const { + return _node_handle.sav_rhs(); + } + [[nodiscard]] auto sav_rhs_handle() { + return _node_handle.sav_rhs_handle(); + } + [[nodiscard]] auto non_owning_handle() { + return _node_handle.non_owning_handle(); + } + double _rinv{}; /* conductance uS from node to parent */ double* _a_matelm; double* _b_matelm; + double* _d_matelm; int eqn_index_; /* sparse13 matrix row/col index */ /* if no extnodes then = v_node_index +1*/ /* each extnode adds nlayer more equations after this */ - struct Prop* prop; /* Points to beginning of property list */ + Prop* prop{}; /* Points to beginning of property list */ Section* child; /* section connected to this node */ /* 0 means no other section connected */ Section* sec; /* section this node is in */ - /* #if PARANEURON */ + /* #if NRNMPI */ struct Node* _classical_parent; /* needed for multisplit */ struct NrnThread* _nt; /* #endif */ #if EXTRACELLULAR - struct Extnode* extnode; + Extnode* extnode{}; #endif #if EXTRAEQN - struct Eqnblock* eqnblock; /* hook to other equations which + Eqnblock* eqnblock{}; /* hook to other equations which need to be solved at the same time as the membrane potential. eg. fast changeing ionic concentrations */ -#endif /*MOREEQN*/ +#endif #if DEBUGSOLVE double savd; @@ -169,85 +210,202 @@ typedef struct Node { #endif /*DEBUGSOLVE*/ int v_node_index; /* only used to calculate parent_node_indices*/ int sec_node_index_; /* to calculate segment index from *Node */ -} Node; - -#if EXTRACELLULAR -/* pruned to only work with sparse13 */ -extern int nrn_nlayer_extracellular; -#define nlayer (nrn_nlayer_extracellular) /* first (0) layer is extracellular next to membrane */ -typedef struct Extnode { - double* param; /* points to extracellular parameter vector */ - /* v is membrane potential. so v internal = Node.v + Node.vext[0] */ - /* However, the Node equation is for v internal. */ - /* This is reconciled during update. */ - - /* Following all have allocated size of nlayer */ - double* v; /* v external. */ - double* _a; - double* _b; - double** _d; - double** _rhs; /* d, rhs, a, and b are analogous to those in node */ - double** _a_matelm; - double** _b_matelm; - double** _x12; /* effect of v[layer] on eqn layer-1 (or internal)*/ - double** _x21; /* effect of v[layer-1 or internal] on eqn layer*/ -} Extnode; -#endif + Node() = default; + Node(Node const&) = delete; + Node(Node&&) = default; + Node& operator=(Node const&) = delete; + Node& operator=(Node&&) = default; + ~Node(); + friend std::ostream& operator<<(std::ostream& os, Node const& n) { + return os << n._node_handle; + } +}; #if !INCLUDEHOCH #include "hocdec.h" /* Prop needs Datum and Datum needs Symbol */ #endif #define PROP_PY_INDEX 10 +struct Prop { + // Working assumption is that we can safely equate "Prop" with "instance + // of a mechanism" apart from a few special cases like CABLESECTION + Prop(short type) + : _type{type} { + if (type != CABLESECTION) { + m_mech_handle = neuron::container::Mechanism::owning_handle{ + neuron::model().mechanism_data(type)}; + } + } + Prop* next; /* linked list of properties */ + short _type; /* type of membrane, e.g. passive, HH, etc. */ + int dparam_size; /* for notifying hoc_free_val_array */ + // double* param; /* vector of doubles for this property */ + Datum* dparam; /* usually vector of pointers to doubles + of other properties but maybe other things as well + for example one cable section property is a + symbol */ + long _alloc_seq; /* for cache efficiency */ + Object* ob; /* nullptr if normal property, otherwise the object containing the data*/ + + /** @brief Get the identifier of this instance. + */ + [[nodiscard]] auto id() const { + assert(m_mech_handle); + return m_mech_handle->id_hack(); + } + + /** + * @brief Check if the given handle refers to data owned by this Prop. + */ + [[nodiscard]] bool owns(neuron::container::data_handle const& handle) const { + assert(m_mech_handle); + auto const num_fpfields = m_mech_handle->num_fpfields(); + auto* const raw_ptr = static_cast(handle); + for (auto i = 0; i < num_fpfields; ++i) { + for (auto j = 0; j < m_mech_handle->fpfield_dimension(i); ++j) { + if (raw_ptr == &m_mech_handle->fpfield(i, j)) { + return true; + } + } + } + return false; + } + + /** + * @brief Return a reference to the i-th floating point data field associated with this Prop. + * + * Note that there is a subtlety with the numbering scheme in case of array variables. + * If we have 3 array variables (a, b, c) with dimensions x, y, z: + * a[x] b[y] c[z] + * then, for example, the second element of b (assume y >= 2) is obtained with param(1, 1). + * In AoS NEURON these values were all stored contiguously, and the values were obtained using + * a single index; taking the same example, the second element of b used to be found at index + * x + 1 in the param array. In all of the above, scalar variables are treated the same and + * simply have dimension 1. In SoA NEURON then a[1] is stored immediately after a[0] in memory, + * but for a given mechanism instance b[0] is **not** stored immediately after a[x-1]. + * + * It is possible, but a little inefficient, to calculate the new pair of indices from an old + * index. For that, see the param_legacy and param_handle_legacy functions. + */ + [[nodiscard]] double& param(int field_index, int array_index = 0) { + assert(m_mech_handle); + return m_mech_handle->fpfield(field_index, array_index); + } + + /** + * @brief Return a reference to the i-th double value associated with this Prop. + * + * See the discussion above about numbering schemes. + */ + [[nodiscard]] double const& param(int field_index, int array_index = 0) const { + assert(m_mech_handle); + return m_mech_handle->fpfield(field_index, array_index); + } + + /** + * @brief Return a handle to the i-th double value associated with this Prop. + * + * See the discussion above about numbering schemes. + */ + [[nodiscard]] auto param_handle(int field, int array_index = 0) { + assert(m_mech_handle); + return m_mech_handle->fpfield_handle(field, array_index); + } + + [[nodiscard]] auto param_handle(neuron::container::field_index ind) { + return param_handle(ind.field, ind.array_index); + } + + private: + /** + * @brief Translate a legacy (flat) index into a (variable, array offset) pair. + * @todo Reimplement this using the new helpers. + */ + [[nodiscard]] std::pair translate_legacy_index(int legacy_index) const { + assert(m_mech_handle); + int total{}; + auto const num_fields = m_mech_handle->num_fpfields(); + for (auto field = 0; field < num_fields; ++field) { + auto const array_dim = m_mech_handle->fpfield_dimension(field); + if (legacy_index < total + array_dim) { + auto const array_index = legacy_index - total; + return {field, array_index}; + } + total += array_dim; + } + throw std::runtime_error("could not translate legacy index " + + std::to_string(legacy_index)); + } + + public: + [[nodiscard]] double& param_legacy(int legacy_index) { + auto const [array_dim, array_index] = translate_legacy_index(legacy_index); + return param(array_dim, array_index); + } + + [[nodiscard]] double const& param_legacy(int legacy_index) const { + auto const [array_dim, array_index] = translate_legacy_index(legacy_index); + return param(array_dim, array_index); + } + + [[nodiscard]] auto param_handle_legacy(int legacy_index) { + auto const [array_dim, array_index] = translate_legacy_index(legacy_index); + return param_handle(array_dim, array_index); + } + + /** + * @brief Return how many double values are assocated with this Prop. + * + * In case of array variables, this is the sum over array dimensions. + * i.e. if a mechanism has a[2] b[2] then param_size()=4 and param_num_vars()=2. + */ + [[nodiscard]] int param_size() const { + assert(m_mech_handle); + return m_mech_handle->fpfields_size(); + } + + /** + * @brief Return how many (possibly-array) variables are associated with this Prop. + * + * In case of array variables, this ignores array dimensions. + * i.e. if a mechanism has a[2] b[2] then param_size()=4 and param_num_vars()=2. + */ + [[nodiscard]] int param_num_vars() const { + assert(m_mech_handle); + return m_mech_handle->num_fpfields(); + } + + /** + * @brief Return the array dimension of the given value. + */ + [[nodiscard]] int param_array_dimension(int field) const { + assert(m_mech_handle); + return m_mech_handle->fpfield_dimension(field); + } + + [[nodiscard]] std::size_t current_row() const { + assert(m_mech_handle); + return m_mech_handle->current_row(); + } + + friend std::ostream& operator<<(std::ostream& os, Prop const& p) { + if (p.m_mech_handle) { + return os << *p.m_mech_handle; + } else { + return os << "Prop{nullopt}"; + } + } + + private: + // This is a handle that owns a row of the ~global mechanism data for + // `_type`. Usage of `param` and `param_size` should be replaced with + // indirection through this. + std::optional m_mech_handle; +}; -typedef struct Prop { - struct Prop* next; /* linked list of properties */ - short _type; /* type of membrane, e.g. passive, HH, etc. */ - short unused1; /* gcc and borland need pairs of shorts to align the same.*/ - int param_size; /* for notifying hoc_free_val_array */ - double* param; /* vector of doubles for this property */ - Datum* dparam; /* usually vector of pointers to doubles - of other properties but maybe other things as well - for example one cable section property is a - symbol */ - long _alloc_seq; /* for cache efficiency */ - Object* ob; /* nil if normal property, otherwise the object containing the data*/ -} Prop; - -extern double* nrn_prop_data_alloc(int type, int count, Prop* p); -extern Datum* nrn_prop_datum_alloc(int type, int count, Prop* p); -extern void nrn_prop_data_free(int type, double* pd); extern void nrn_prop_datum_free(int type, Datum* ppd); -extern double nrn_ghk(double, double, double, double); - -/* a point process is computed just like regular mechanisms. Ie it appears -in the property list whose type specifies which allocation, current, and -state functions to call. This means some nodes have more properties than -other nodes even in the same section. The Point_process structure allows -the interface to hoc variable names. -Each variable symbol u.rng->type refers to the point process mechanism. -The variable is treated as a vector -variable whose first index specifies "which one" of that mechanisms insertion -points we are talking about. Finally the variable u.rng->index tells us -where in the p-array to look. The number of point_process vectors is the -number of different point process types. This is different from the -mechanism type which enumerates all mechanisms including the point_processes. -It is the responsibility of create_point_process to set up the vectors and -fill in the symbol information. However only after the process is given -a location can the variables be set or accessed. This is because the -allocation function may have to connect to some ionic parameters and the -process exists primarily as a property of a node. -*/ -typedef struct Point_process { - Section* sec; /* section and node location for the point mechanism*/ - Node* node; - Prop* prop; /* pointer to the actual property linked to the - node property list */ - Object* ob; /* object that owns this process */ - void* presyn_; /* non-threshold presynapse for NetCon */ - void* nvi_; /* NrnVarIntegrator (for local step method) */ - void* _vnt; /* NrnThread* (for NET_RECEIVE and multicore) */ -} Point_process; +extern void nrn_delete_mechanism_prop_datum(int type); +extern int nrn_mechanism_prop_datum_count(int type); #if EXTRAEQN /*Blocks of equations can hang off each node of the current conservation diff --git a/src/nrnoc/section_fwd.hpp b/src/nrnoc/section_fwd.hpp new file mode 100644 index 0000000000..fe1a955f6c --- /dev/null +++ b/src/nrnoc/section_fwd.hpp @@ -0,0 +1,86 @@ +#pragma once +#include "multicore.h" +#include "neuron/container/generic_data_handle.hpp" +#include "nrnredef.h" +/** + * @file section_fwd.hpp + * @brief Forward declarations of Section, Node etc. to be included in translated MOD files. + */ +struct Node; +struct Prop; +struct Section; +using Datum = neuron::container::generic_data_handle; + +// Forward declaration of Prop means Prop.id() not directly accessible +// in mod files. This helps work around that problem for purposes of validity +// checking. Perhaps someday, Prop will disappear entirely as it conceptually +// is just a mechanism row and is bloated with redundant type and size info. +// It does not exist in CoreNEURON. +neuron::container::non_owning_identifier_without_container _nrn_get_prop_id(Prop*); + +extern int cvode_active_; +extern int secondorder; +extern int use_sparse13; + +double nrn_ghk(double, double, double, double); +Datum* nrn_prop_datum_alloc(int type, int count, Prop* p); + +#if EXTRACELLULAR +/* pruned to only work with sparse13 */ +extern int nrn_nlayer_extracellular; +#define nlayer (nrn_nlayer_extracellular) /* first (0) layer is extracellular next to membrane */ +struct Extnode { + std::vector> param{}; + // double* param; /* points to extracellular parameter vector */ + /* v is membrane potential. so v internal = Node.v + Node.vext[0] */ + /* However, the Node equation is for v internal. */ + /* This is reconciled during update. */ + + /* Following all have allocated size of nlayer */ + double* v; /* v external. */ + double* _a; + double* _b; + double** _d; + double** _rhs; /* d, rhs, a, and b are analogous to those in node */ + double** _a_matelm; + double** _b_matelm; + double** _x12; /* effect of v[layer] on eqn layer-1 (or internal)*/ + double** _x21; /* effect of v[layer-1 or internal] on eqn layer*/ +}; +#endif + +#define NODEA(n) _nrn_mechanism_access_a(n) +#define NODEB(n) _nrn_mechanism_access_b(n) +#define NODED(n) _nrn_mechanism_access_d(n) +#define NODERHS(n) _nrn_mechanism_access_rhs(n) +#ifdef NODEV +// Defined on macOS ("no device") at least +#undef NODEV +#endif +#define NODEV(n) _nrn_mechanism_access_voltage(n) + +/** + * A point process is computed just like regular mechanisms. Ie it appears in the property list + * whose type specifies which allocation, current, and state functions to call. This means some + * nodes have more properties than other nodes even in the same section. The Point_process structure + * allows the interface to hoc variable names. Each variable symbol u.rng->type refers to the point + * process mechanism. The variable is treated as a vector variable whose first index specifies + * "which one" of that mechanisms insertion points we are talking about. Finally the variable + * u.rng->index tells us where in the p-array to look. The number of point_process vectors is the + * number of different point process types. This is different from the mechanism type which + * enumerates all mechanisms including the point_processes. It is the responsibility of + * create_point_process to set up the vectors and fill in the symbol information. However only after + * the process is given a location can the variables be set or accessed. This is because the + * allocation function may have to connect to some ionic parameters and the process exists primarily + * as a property of a node. + */ +struct Point_process { + Section* sec{}; /* section and node location for the point mechanism*/ + Node* node{}; + Prop* prop{}; /* pointer to the actual property linked to the + node property list */ + Object* ob{}; /* object that owns this process */ + void* presyn_{}; /* non-threshold presynapse for NetCon */ + void* nvi_{}; /* NrnVarIntegrator (for local step method) */ + void* _vnt{}; /* NrnThread* (for NET_RECEIVE and multicore) */ +}; diff --git a/src/nrnoc/solve.cpp b/src/nrnoc/solve.cpp index 8b586d8305..08b104c1b5 100644 --- a/src/nrnoc/solve.cpp +++ b/src/nrnoc/solve.cpp @@ -58,6 +58,7 @@ node.v + extnode.v[0] #include "ocnotify.h" #include "section.h" #include "spmatrix.h" +#include "treeset.h" #include #include @@ -68,7 +69,7 @@ node.v + extnode.v[0] static void node_free(); static void triang(NrnThread*), bksub(NrnThread*); -#if PARANEURON +#if NRNMPI void (*nrnmpi_splitcell_compute_)(); #endif @@ -344,7 +345,7 @@ void nrn_solve(NrnThread* _nt) { printf("\nnrn_solve enter %lx\n", (long)_nt); nrn_print_matrix(_nt); #endif -#if PARANEURON +#if NRNMPI if (nrn_multisplit_solve_) { nrn_thread_error("nrn_multisplit_solve"); (*nrn_multisplit_solve_)(); @@ -365,6 +366,7 @@ void nrn_solve(NrnThread* _nt) { if (use_sparse13) { int e; nrn_thread_error("solve use_sparse13"); + update_sp13_mat_based_on_actual_d(_nt); e = spFactor(_nt->_sp13mat); if (e != spOKAY) { switch (e) { @@ -376,10 +378,13 @@ void nrn_solve(NrnThread* _nt) { hoc_execerror("spFactor error:", "Singular"); } } - spSolve(_nt->_sp13mat, _nt->_actual_rhs, _nt->_actual_rhs); + update_sp13_rhs_based_on_actual_rhs(_nt); + spSolve(_nt->_sp13mat, _nt->_sp13_rhs, _nt->_sp13_rhs); + update_actual_d_based_on_sp13_mat(_nt); + update_actual_rhs_based_on_sp13_rhs(_nt); } else { triang(_nt); -#if PARANEURON +#if NRNMPI if (nrnmpi_splitcell_compute_) { nrn_thread_error("nrnmpi_splitcell_compute"); (*nrnmpi_splitcell_compute_)(); @@ -395,60 +400,37 @@ void nrn_solve(NrnThread* _nt) { } /* triangularization of the matrix equations */ -void triang(NrnThread* _nt) { +static void triang(NrnThread* _nt) { Node *nd, *pnd; - double p; int i, i2, i3; i2 = _nt->ncell; i3 = _nt->end; -#if CACHEVEC - if (use_cachevec) { - for (i = i3 - 1; i >= i2; --i) { - p = VEC_A(i) / VEC_D(i); - VEC_D(_nt->_v_parent_index[i]) -= p * VEC_B(i); - VEC_RHS(_nt->_v_parent_index[i]) -= p * VEC_RHS(i); - } - } else -#endif /* CACHEVEC */ - { - for (i = i3 - 1; i >= i2; --i) { - nd = _nt->_v_node[i]; - pnd = _nt->_v_parent[i]; - p = NODEA(nd) / NODED(nd); - NODED(pnd) -= p * NODEB(nd); - NODERHS(pnd) -= p * NODERHS(nd); - } + auto* const vec_a = _nt->node_a_storage(); + auto* const vec_b = _nt->node_b_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); + for (i = i3 - 1; i >= i2; --i) { + auto const p = vec_a[i] / vec_d[i]; + auto const pi = _nt->_v_parent_index[i]; + vec_d[pi] -= p * vec_b[i]; + vec_rhs[pi] -= p * vec_rhs[i]; } } /* back substitution to finish solving the matrix equations */ void bksub(NrnThread* _nt) { - Node *nd, *cnd; - int i, i1, i2, i3; - i1 = 0; - i2 = i1 + _nt->ncell; - i3 = _nt->end; -#if CACHEVEC - if (use_cachevec) { - for (i = i1; i < i2; ++i) { - VEC_RHS(i) /= VEC_D(i); - } - for (i = i2; i < i3; ++i) { - VEC_RHS(i) -= VEC_B(i) * VEC_RHS(_nt->_v_parent_index[i]); - VEC_RHS(i) /= VEC_D(i); - } - } else -#endif /* CACHEVEC */ - { - for (i = i1; i < i2; ++i) { - NODERHS(_nt->_v_node[i]) /= NODED(_nt->_v_node[i]); - } - for (i = i2; i < i3; ++i) { - cnd = _nt->_v_node[i]; - nd = _nt->_v_parent[i]; - NODERHS(cnd) -= NODEB(cnd) * NODERHS(nd); - NODERHS(cnd) /= NODED(cnd); - } + auto const i1 = 0; + auto const i2 = i1 + _nt->ncell; + auto const i3 = _nt->end; + auto* const vec_b = _nt->node_b_storage(); + auto* const vec_d = _nt->node_d_storage(); + auto* const vec_rhs = _nt->node_rhs_storage(); + for (int i = i1; i < i2; ++i) { + vec_rhs[i] /= vec_d[i]; + } + for (int i = i2; i < i3; ++i) { + vec_rhs[i] -= vec_b[i] * vec_rhs[_nt->_v_parent_index[i]]; + vec_rhs[i] /= vec_d[i]; } } @@ -467,33 +449,15 @@ short nrn_value_mark(Section* sec) { return sec->volatile_mark; } -/* allocate space for sections (but no nodes) */ -/* returns pointer to Section */ -Section* sec_alloc(void) { - Section* sec; - - /* changed from emalloc to allocation from a SectionPool in order - to allow safe checking of whether a void* is a possible Section* - without the possibility of invalid memory read errors. - Note that freeing sections must be done - with nrn_section_free(Section*) - */ - sec = nrn_section_alloc(); - sec->refcount = 0; - sec->nnode = 0; - sec->parentsec = sec->sibling = sec->child = (Section*) 0; - sec->parentnode = (Node*) 0; - sec->pnode = (Node**) 0; -#if DIAMLIST - sec->npt3d = 0; - sec->pt3d_bsize = 0; - sec->pt3d = (Pt3d*) 0; - sec->logical_connection = (Pt3d*) 0; -#endif - sec->prop = (Prop*) 0; - sec->recalc_area_ = 0; - - return sec; +/** + * @brief Allocate a new Section object. + * + * Changed from emalloc to allocation from a SectionPool in order to allow safe checking of whether + * a void* is a possible Section* without the possibility of invalid memory read errors. Note that + * freeing sections must be done with nrn_section_free(Section*). + */ +Section* sec_alloc() { + return nrn_section_alloc(); } /* free a node vector for one section */ @@ -511,7 +475,7 @@ static void node_free(Section* sec) { sec->pnode = (Node**) 0; sec->nnode = 0; } -void nrn_node_destruct1(Node*); + static void section_unlink(Section* sec); /* free everything about sections */ void sec_free(hoc_Item* secitem) { @@ -537,7 +501,7 @@ void sec_free(hoc_Item* secitem) { prop_free(&(sec->prop)); node_free(sec); if (!sec->parentsec && sec->parentnode) { - nrn_node_destruct1(sec->parentnode); + delete sec->parentnode; } #if DIAMLIST if (sec->pt3d) { @@ -566,6 +530,7 @@ printf("section_unref: freed\n"); nrn_section_free(sec); } } + void section_ref(Section* sec) { /*printf("section_ref %lx %d\n", (long)sec,sec->refcount+1);*/ ++sec->refcount; @@ -594,104 +559,46 @@ static void section_unlink(Section* sec) /* other sections no longer reference t nrn_disconnect(sec); } -Node** node_construct(int n) { - Node *nd, **pnode; - int i; - - pnode = (Node**) ecalloc((unsigned) n, sizeof(Node*)); - for (i = n - 1; i >= 0; i--) { - nd = (Node*) ecalloc(1, sizeof(Node)); -#if CACHEVEC - nd->_v = &nd->_v_temp; - nd->_area = 100.; - nd->_rinv = 0.; -#endif - nd->sec_node_index_ = i; - pnode[i] = nd; - nd->prop = (Prop*) 0; - NODEV(nd) = DEF_vrest; +Node::~Node() { + prop_free(&prop); #if EXTRACELLULAR - nd->extnode = (Extnode*) 0; -#endif -#if EXTRAEQN - nd->eqnblock = (Eqnblock*) 0; -#endif - } - return pnode; -} - -Node* nrn_node_construct1(void) { - Node* nd; - Node** ndp; - ndp = node_construct(1); - nd = ndp[0]; - free(ndp); - return nd; -} - -void nrn_node_destruct1(Node* nd) { - if (!nd) { - return; - } - prop_free(&(nd->prop)); -#if CACHEVEC - notify_freed_val_array(&NODEV(nd), 1); - notify_freed_val_array(&NODEAREA(nd), 2); -#else - notify_freed_val_array(&NODEV(nd), 2); -#endif -#if EXTRACELLULAR - if (nd->extnode) { - notify_freed_val_array(nd->extnode->v, nlayer); + if (extnode) { + notify_freed_val_array(extnode->v, nlayer); } #endif #if EXTRAEQN - { - Eqnblock *e, *e1; - for (e = nd->eqnblock; e; e = e1) { - e1 = e->eqnblock_next; - free((char*) e); - } + for (Eqnblock* e = eqnblock; e;) { + free(std::exchange(e, e->eqnblock_next)); } #endif #if EXTRACELLULAR - if (nd->extnode) { - extnode_free_elements(nd->extnode); - free((char*) nd->extnode); + if (extnode) { + extnode_free_elements(extnode); + delete extnode; } #endif - free(nd); } +// this is delete[]...apart from the order? void node_destruct(Node** pnode, int n) { - int i; - Node* nd; - - for (i = n - 1; i >= 0; i--) { - if (pnode[i]) { - nrn_node_destruct1(pnode[i]); - } + for (int i = n - 1; i >= 0; --i) { + delete pnode[i]; } - free((char*) pnode); + delete[] pnode; } #if KEEP_NSEG_PARM extern int keep_nseg_parm_; +// TODO this logic should just be in the copy constructor...probably some of it +// already does the right thing by default static Node* node_clone(Node* nd1) { - Node* nd2; - Prop *p1, *p2; - extern Prop* prop_alloc(Prop**, int, Node*); - int i, imax; - nd2 = (Node*) ecalloc(1, sizeof(Node)); -#if CACHEVEC - nd2->_v = &nd2->_v_temp; -#endif - NODEV(nd2) = NODEV(nd1); - for (p1 = nd1->prop; p1; p1 = p1->next) { + Node* nd2 = new Node{}; + nd2->v() = nd1->v(); + for (Prop* p1 = nd1->prop; p1; p1 = p1->next) { if (!memb_func[p1->_type].is_point) { - p2 = prop_alloc(&(nd2->prop), p1->_type, nd2); + Prop* p2 = prop_alloc(&(nd2->prop), p1->_type, nd2); if (p2->ob) { Symbol *s, *ps; double *px, *py; @@ -702,23 +609,25 @@ static Node* node_clone(Node* nd1) { ps = s->u.ppsym[j]; px = p2->ob->u.dataspace[ps->u.rng.index].pval; py = p1->ob->u.dataspace[ps->u.rng.index].pval; - imax = hoc_total_array_data(ps, 0); - for (i = 0; i < imax; ++i) { + std::size_t imax{hoc_total_array_data(ps, 0)}; + for (std::size_t i = 0; i < imax; ++i) { px[i] = py[i]; } } } else { - for (i = 0; i < p1->param_size; ++i) { - p2->param[i] = p1->param[i]; + for (int i = 0; i < p1->param_num_vars(); ++i) { + for (auto j = 0; j < p1->param_array_dimension(i); ++j) { + p2->param(i, j) = p1->param(i, j); + } } } } } /* in case the user defined an explicit ion_style, make sure the new node has the same style for all ions. */ - for (p1 = nd1->prop; p1; p1 = p1->next) { + for (Prop* p1 = nd1->prop; p1; p1 = p1->next) { if (nrn_is_ion(p1->_type)) { - p2 = nd2->prop; + Prop* p2 = nd2->prop; while (p2 && p2->_type != p1->_type) { p2 = p2->next; } @@ -746,7 +655,7 @@ static void node_realloc(Section* sec, short nseg) { double x; pn1 = sec->pnode; n1 = sec->nnode; - pn2 = (Node**) ecalloc((unsigned) nseg, sizeof(Node*)); + pn2 = new Node* [nseg] {}; n2 = nseg; sec->pnode = pn2; sec->nnode = n2; @@ -829,8 +738,12 @@ void node_alloc(Section* sec, short nseg) { if (nseg == 0) { return; } - sec->pnode = node_construct(nseg); + sec->pnode = new Node* [nseg] {}; sec->nnode = nseg; + for (i = 0; i < nseg; ++i) { + sec->pnode[i] = new Node{}; + sec->pnode[i]->sec_node_index_ = i; + } } for (i = 0; i < nseg; ++i) { sec->pnode[i]->sec = sec; diff --git a/src/nrnoc/treeset.cpp b/src/nrnoc/treeset.cpp index c802f12cf0..0849d1cdb5 100644 --- a/src/nrnoc/treeset.cpp +++ b/src/nrnoc/treeset.cpp @@ -6,23 +6,26 @@ #include "multisplit.h" #include "nrn_ansi.h" #include "neuron.h" +#include "neuron/cache/mechanism_range.hpp" +#include "neuron/cache/model_data.hpp" +#include "neuron/container/soa_container.hpp" #include "nonvintblock.h" #include "nrndae_c.h" #include "nrniv_mf.h" #include "nrnmpi.h" #include "ocnotify.h" +#include "partrans.h" #include "section.h" #include "spmatrix.h" -#include "treeset.h" #include "utils/profile/profiler_interface.h" #include "multicore.h" #include -#if HAVE_STDLIB_H #include -#endif #include #include + +#include #include extern spREAL* spGetElement(char*, int, int); @@ -40,28 +43,11 @@ extern int* nrn_prop_dparam_size_; extern int* nrn_dparam_ptr_start_; extern int* nrn_dparam_ptr_end_; extern void nrn_define_shape(); -extern void nrn_partrans_update_ptrs(); -#if 1 || PARANEURON +#if 1 || NRNMPI void (*nrn_multisplit_setup_)(); #endif -#if CACHEVEC - - -/* a, b, d and rhs are, from now on, all stored in extra arrays, to improve - * cache efficiency in nrn_lhs() and nrn_rhs(). Formerly, three levels of - * indirection were necessary for accessing these elements, leading to lots - * of L2 misses. 2006-07-05/Hubert Eichner */ -/* these are now thread instance arrays */ -static void nrn_recalc_node_ptrs(); -#define UPDATE_VEC_AREA(nd) \ - if (nd->_nt && nd->_nt->_actual_area) { \ - nd->_nt->_actual_area[(nd)->v_node_index] = NODEAREA(nd); \ - } -#endif /* CACHEVEC */ -int use_cachevec; - /* Do not use unless necessary (loops in tree structure) since overhead (for gaussian elimination) is @@ -78,7 +64,6 @@ set to 1. This means that the mechanism vectors need to be re-determined. int v_structure_change; int structure_change_cnt; int diam_change_cnt; -int nrn_node_ptr_change_cnt_; extern int section_count; extern Section** secorder; @@ -344,13 +329,50 @@ vm += dvi-dvx */ +/* + * Update actual_rhs based on _sp13_rhs used for sparse13 solver + */ +void update_actual_rhs_based_on_sp13_rhs(NrnThread* nt) { + for (int i = 0; i < nt->end; i++) { + nt->actual_rhs(i) = nt->_sp13_rhs[nt->_v_node[i]->eqn_index_]; + } +} + +/* + * Update _sp13_rhs used for sparse13 solver based on changes on actual_rhs + */ +void update_sp13_rhs_based_on_actual_rhs(NrnThread* nt) { + for (int i = 0; i < nt->end; i++) { + nt->_sp13_rhs[nt->_v_node[i]->eqn_index_] = nt->actual_rhs(i); + } +} + +/* + * Update the SoA storage for node matrix diagonals from the sparse13 matrix. + */ +void update_actual_d_based_on_sp13_mat(NrnThread* nt) { + for (int i = 0; i < nt->end; ++i) { + nt->actual_d(i) = *nt->_v_node[i]->_d_matelm; + } +} + +/* + * Update the SoA storage for node matrix diagonals from the sparse13 matrix. + */ +void update_sp13_mat_based_on_actual_d(NrnThread* nt) { + for (int i = 0; i < nt->end; ++i) { + *nt->_v_node[i]->_d_matelm = nt->actual_d(i); + } +} + /* calculate right hand side of cm*dvm/dt = -i(vm) + is(vi) + ai_j*(vi_j - vi) cx*dvx/dt - cm*dvm/dt = -gx*(vx - ex) + i(vm) + ax_j*(vx_j - vx) This is a common operation for fixed step, cvode, and daspk methods */ -void nrn_rhs(NrnThread* _nt) { +void nrn_rhs(neuron::model_sorted_token const& cache_token, NrnThread& nt) { + auto* const _nt = &nt; int i, i1, i2, i3; double w; int measure = 0; @@ -367,45 +389,40 @@ void nrn_rhs(NrnThread* _nt) { nrn_thread_error("need recalc_diam()"); recalc_diam(); } + auto* const vec_rhs = nt.node_rhs_storage(); if (use_sparse13) { int i, neqn; nrn_thread_error("nrn_rhs use_sparse13"); neqn = spGetSize(_nt->_sp13mat, 0); for (i = 1; i <= neqn; ++i) { - _nt->_actual_rhs[i] = 0.; + _nt->_sp13_rhs[i] = 0.; + } + for (i = i1; i < i3; ++i) { + NODERHS(_nt->_v_node[i]) = 0.; } } else { -#if CACHEVEC - if (use_cachevec) { - for (i = i1; i < i3; ++i) { - VEC_RHS(i) = 0.; - } - } else -#endif /* CACHEVEC */ - { - for (i = i1; i < i3; ++i) { - NODERHS(_nt->_v_node[i]) = 0.; - } + for (i = i1; i < i3; ++i) { + vec_rhs[i] = 0.; } } - if (_nt->_nrn_fast_imem) { + auto const vec_sav_rhs = _nt->node_sav_rhs_storage(); + if (vec_sav_rhs) { for (i = i1; i < i3; ++i) { - _nt->_nrn_fast_imem->_nrn_sav_rhs[i] = 0.; + vec_sav_rhs[i] = 0.; } } - nrn_ba(_nt, BEFORE_BREAKPOINT); + nrn_ba(cache_token, nt, BEFORE_BREAKPOINT); /* note that CAP has no current */ for (tml = _nt->tml; tml; tml = tml->next) - if (memb_func[tml->index].current) { - Pvmi s = memb_func[tml->index].current; + if (auto const current = memb_func[tml->index].current; current) { std::string mechname("cur-"); mechname += memb_func[tml->index].sym->name; if (measure) { w = nrnmpi_wtime(); } nrn::Instrumentor::phase_begin(mechname.c_str()); - (*s)(_nt, tml->ml, tml->index); + current(cache_token, _nt, tml->ml, tml->index); nrn::Instrumentor::phase_end(mechname.c_str()); if (measure) { nrn_mech_wtime_[tml->index] += nrnmpi_wtime() - w; @@ -418,20 +435,12 @@ void nrn_rhs(NrnThread* _nt) { } activsynapse_rhs(); - if (_nt->_nrn_fast_imem) { + if (vec_sav_rhs) { /* _nrn_save_rhs has only the contribution of electrode current so here we transform so it only has membrane current contribution */ - double* p = _nt->_nrn_fast_imem->_nrn_sav_rhs; - if (use_cachevec) { - for (i = i1; i < i3; ++i) { - p[i] -= VEC_RHS(i); - } - } else { - for (i = i1; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - p[i] -= NODERHS(nd); - } + for (i = i1; i < i3; ++i) { + vec_sav_rhs[i] -= vec_rhs[i]; } } #if EXTRACELLULAR @@ -445,7 +454,7 @@ void nrn_rhs(NrnThread* _nt) { if (use_sparse13) { /* must be after nrn_rhs_ext so that whatever is put in nd->_rhs does not get added to nde->rhs */ - nrndae_rhs(); + nrndae_rhs(_nt); } activstim_rhs(); @@ -454,25 +463,16 @@ void nrn_rhs(NrnThread* _nt) { The extracellular mechanism contribution is already done. rhs += ai_j*(vi_j - vi) */ -#if CACHEVEC - if (use_cachevec) { - for (i = i2; i < i3; ++i) { - double dv = VEC_V(_nt->_v_parent_index[i]) - VEC_V(i); - /* our connection coefficients are negative so */ - VEC_RHS(i) -= VEC_B(i) * dv; - VEC_RHS(_nt->_v_parent_index[i]) += VEC_A(i) * dv; - } - } else -#endif /* CACHEVEC */ - { - for (i = i2; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - Node* pnd = _nt->_v_parent[i]; - double dv = NODEV(pnd) - NODEV(nd); - /* our connection coefficients are negative so */ - NODERHS(nd) -= NODEB(nd) * dv; - NODERHS(pnd) += NODEA(nd) * dv; - } + auto* const vec_a = nt.node_a_storage(); + auto* const vec_b = nt.node_b_storage(); + auto* const vec_v = nt.node_voltage_storage(); + auto* const parent_i = nt._v_parent_index; + for (i = i2; i < i3; ++i) { + auto const pi = parent_i[i]; + auto const dv = vec_v[pi] - vec_v[i]; + // our connection coefficients are negative so + vec_rhs[i] -= vec_b[i] * dv; + vec_rhs[pi] += vec_a[i] * dv; } } @@ -484,7 +484,8 @@ hand side after solving. This is a common operation for fixed step, cvode, and daspk methods */ -void nrn_lhs(NrnThread* _nt) { +void nrn_lhs(neuron::model_sorted_token const& sorted_token, NrnThread& nt) { + auto* const _nt = &nt; int i, i1, i2, i3; NrnThreadMembList* tml; @@ -497,38 +498,30 @@ void nrn_lhs(NrnThread* _nt) { } if (use_sparse13) { - int i, neqn; - neqn = spGetSize(_nt->_sp13mat, 0); + // Zero the sparse13 matrix spClear(_nt->_sp13mat); - } else { -#if CACHEVEC - if (use_cachevec) { - for (i = i1; i < i3; ++i) { - VEC_D(i) = 0.; - } - } else -#endif /* CACHEVEC */ - { - for (i = i1; i < i3; ++i) { - NODED(_nt->_v_node[i]) = 0.; - } - } } - if (_nt->_nrn_fast_imem) { + // Make sure the SoA node diagonals are also zeroed (is this needed?) + auto* const vec_d = _nt->node_d_storage(); + for (int i = i1; i < i3; ++i) { + vec_d[i] = 0.; + } + + auto const vec_sav_d = _nt->node_sav_d_storage(); + if (vec_sav_d) { for (i = i1; i < i3; ++i) { - _nt->_nrn_fast_imem->_nrn_sav_d[i] = 0.; + vec_sav_d[i] = 0.; } } /* note that CAP has no jacob */ for (tml = _nt->tml; tml; tml = tml->next) - if (memb_func[tml->index].jacob) { - Pvmi s = memb_func[tml->index].jacob; + if (auto const jacob = memb_func[tml->index].jacob; jacob) { std::string mechname("cur-"); mechname += memb_func[tml->index].sym->name; nrn::Instrumentor::phase_begin(mechname.c_str()); - (*s)(_nt, tml->ml, tml->index); + jacob(sorted_token, _nt, tml->ml, tml->index); nrn::Instrumentor::phase_end(mechname.c_str()); if (errno) { if (nrn_errno_check(tml->index)) { @@ -542,31 +535,17 @@ void nrn_lhs(NrnThread* _nt) { /* note, the first is CAP */ if (_nt->tml) { assert(_nt->tml->index == CAP); - nrn_cap_jacob(_nt, _nt->tml->ml); + nrn_cap_jacob(sorted_token, _nt, _nt->tml->ml); } activsynapse_lhs(); - - if (_nt->_nrn_fast_imem) { + if (vec_sav_d) { /* _nrn_save_d has only the contribution of electrode current so here we transform so it only has membrane current contribution */ - double* p = _nt->_nrn_fast_imem->_nrn_sav_d; - if (use_sparse13) { - for (i = i1; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - p[i] += NODED(nd); - } - } else if (use_cachevec) { - for (i = i1; i < i3; ++i) { - p[i] += VEC_D(i); - } - } else { - for (i = i1; i < i3; ++i) { - Node* nd = _nt->_v_node[i]; - p[i] += NODED(nd); - } + for (i = i1; i < i3; ++i) { + vec_sav_d[i] += vec_d[i]; } } #if EXTRACELLULAR @@ -576,7 +555,9 @@ void nrn_lhs(NrnThread* _nt) { if (use_sparse13) { /* must be after nrn_setup_ext so that whatever is put in nd->_d does not get added to nde->d */ + update_sp13_mat_based_on_actual_d(_nt); nrndae_lhs(); + update_actual_d_based_on_sp13_mat(_nt); // because nrndae_lhs writes to sp13_mat } activclamp_lhs(); @@ -585,43 +566,41 @@ void nrn_lhs(NrnThread* _nt) { /* now add the axial currents */ - if (use_sparse13) { - for (i = i2; i < i3; ++i) { + update_sp13_mat_based_on_actual_d(_nt); // just because of activclamp_lhs + for (i = i2; i < i3; ++i) { // note i2 Node* nd = _nt->_v_node[i]; - *(nd->_a_matelm) += NODEA(nd); - *(nd->_b_matelm) += NODEB(nd); /* b may have value from lincir */ - NODED(nd) -= NODEB(nd); - } - for (i = i2; i < i3; ++i) { - NODED(_nt->_v_parent[i]) -= NODEA(_nt->_v_node[i]); + auto const parent_i = _nt->_v_parent_index[i]; + auto* const parent_nd = _nt->_v_node[parent_i]; + auto const nd_a = NODEA(nd); + auto const nd_b = NODEB(nd); + // Update entries in sp13_mat + *nd->_a_matelm += nd_a; + *nd->_b_matelm += nd_b; /* b may have value from lincir */ + *nd->_d_matelm -= nd_b; + // used to update NODED (sparse13 matrix) using NODEA and NODEB ("SoA") + *parent_nd->_d_matelm -= nd_a; + // Also update the Node's d value in the SoA storage (is this needed?) + vec_d[i] -= nd_b; + vec_d[parent_i] -= nd_a; } } else { -#if CACHEVEC - if (use_cachevec) { - for (i = i2; i < i3; ++i) { - VEC_D(i) -= VEC_B(i); - VEC_D(_nt->_v_parent_index[i]) -= VEC_A(i); - } - } else -#endif /* CACHEVEC */ - { - for (i = i2; i < i3; ++i) { - NODED(_nt->_v_node[i]) -= NODEB(_nt->_v_node[i]); - NODED(_nt->_v_parent[i]) -= NODEA(_nt->_v_node[i]); - } + auto* const vec_a = _nt->node_a_storage(); + auto* const vec_b = _nt->node_b_storage(); + for (i = i2; i < i3; ++i) { + vec_d[i] -= vec_b[i]; + vec_d[_nt->_v_parent_index[i]] -= vec_a[i]; } } } /* for the fixed step method */ -void* setup_tree_matrix(NrnThread* _nt) { - nrn::Instrumentor::phase p_setup_tree_matrix("setup-tree-matrix"); - nrn_rhs(_nt); - nrn_lhs(_nt); - nrn_nonvint_block_current(_nt->end, _nt->_actual_rhs, _nt->id); - nrn_nonvint_block_conductance(_nt->end, _nt->_actual_d, _nt->id); - return nullptr; +void setup_tree_matrix(neuron::model_sorted_token const& cache_token, NrnThread& nt) { + nrn::Instrumentor::phase _{"setup-tree-matrix"}; + nrn_rhs(cache_token, nt); + nrn_lhs(cache_token, nt); + nrn_nonvint_block_current(nt.end, nt.node_rhs_storage(), nt.id); + nrn_nonvint_block_conductance(nt.end, nt.node_d_storage(), nt.id); } /* membrane mechanisms needed by other mechanisms (such as Eion by HH) @@ -671,7 +650,7 @@ Prop* need_memb(Symbol* sym) { or if the ion mechanism itself is inserted. Any earlier insertions of the latter or locating this kind of POINT_PROCESS in this section will mean that we no longer get to this arm - of the if statement because m above is not nil. + of the if statement because m above is not nullptr. */ Section* sec = nrn_pnt_sec_for_need_; Prop** cpl = current_prop_list; @@ -693,25 +672,32 @@ Prop* prop_alloc(Prop** pp, int type, Node* nd) { /* returning *Prop because allocation may */ /* cause other properties to be linked ahead */ /* some models need the node (to find area) */ - if (nd) { - nrn_alloc_node_ = nd; - } + nrn_alloc_node_ = nd; // this might be null v_structure_change = 1; current_prop_list = pp; - auto* p = (Prop*) emalloc(sizeof(Prop)); - p->_type = type; + auto* p = new Prop{static_cast(type)}; p->next = *pp; p->ob = nullptr; p->_alloc_seq = -1; *pp = p; assert(memb_func[type].alloc); p->dparam = nullptr; - p->param = nullptr; - p->param_size = 0; (memb_func[type].alloc)(p); return p; } +void prop_update_ion_variables(Prop* prop, Node* node) { + nrn_alloc_node_ = node; + nrn_point_prop_ = prop; + current_prop_list = &node->prop; + auto const type = prop->_type; + assert(memb_func[type].alloc); + memb_func[type].alloc(prop); + current_prop_list = nullptr; + nrn_point_prop_ = nullptr; + nrn_alloc_node_ = nullptr; +} + Prop* prop_alloc_disallow(Prop** pp, short type, Node* nd) { disallow_needmemb = 1; auto* p = prop_alloc(pp, type, nd); @@ -719,15 +705,12 @@ Prop* prop_alloc_disallow(Prop** pp, short type, Node* nd) { return p; } -void prop_free(Prop** pp) /* free an entire property list */ -{ - Prop *p, *pn; - p = *pp; - *pp = (Prop*) 0; +// free an entire property list +void prop_free(Prop** pp) { + Prop* p = *pp; + *pp = nullptr; while (p) { - pn = p->next; - single_prop_free(p); - p = pn; + single_prop_free(std::exchange(p, p->next)); } } @@ -738,9 +721,8 @@ void single_prop_free(Prop* p) { clear_point_process_struct(p); return; } - if (p->param) { - notify_freed_val_array(p->param, p->param_size); - nrn_prop_data_free(p->_type, p->param); + if (auto got = nrn_mech_inst_destruct.find(p->_type); got != nrn_mech_inst_destruct.end()) { + (got->second)(p); } if (p->dparam) { if (p->_type == CABLESECTION) { @@ -751,7 +733,7 @@ void single_prop_free(Prop* p) { if (p->ob) { hoc_obj_unref(p->ob); } - free((char*) p); + delete p; } @@ -768,7 +750,7 @@ static double diam_from_list(Section* sec, int inode, Prop* p, double rparent); int recalc_diam_count_, nrn_area_ri_nocount_, nrn_area_ri_count_; void nrn_area_ri(Section* sec) { int j; - double ra, dx, diam, rright, rleft; + double ra, dx, rright, rleft; Prop* p; Node* nd; if (nrn_area_ri_nocount_ == 0) { @@ -804,13 +786,12 @@ void nrn_area_ri(Section* sec) { /* area for right circular cylinders. Ri as right half of parent + left half of this */ - diam = p->param[0]; + auto& diam = p->param(0); if (diam <= 0.) { - p->param[0] = 1e-6; + diam = 1e-6; hoc_execerror(secname(sec), "diameter diam = 0. Setting to 1e-6"); } - NODEAREA(nd) = PI * diam * dx; /* um^2 */ - UPDATE_VEC_AREA(nd); + nd->area() = PI * diam * dx; // um^2 rleft = 1e-2 * ra * (dx / 2) / (PI * diam * diam / 4.); /*left half segment Megohms*/ NODERINV(nd) = 1. / (rleft + rright); /*uS*/ rright = rleft; @@ -818,8 +799,7 @@ void nrn_area_ri(Section* sec) { } /* last segment has 0 length. area is 1e2 in dimensionless units */ - NODEAREA(sec->pnode[j]) = 1.e2; - UPDATE_VEC_AREA(sec->pnode[j]); + sec->pnode[j]->area() = 1.e2; NODERINV(sec->pnode[j]) = 1. / rright; sec->recalc_area_ = 0; diam_changed = 1; @@ -870,19 +850,26 @@ void connection_coef(void) /* setup a and b */ section connects straight to the point*/ /* for the near future we always have a last node at x=1 with no properties */ + + // To match legacy behaviour, make sure that the SoA storage for "a" and "b" is zeroed before + // the initilisation code below is run. + for (auto tid = 0; tid < nrn_nthread; ++tid) { + auto& nt = nrn_threads[tid]; + std::fill_n(nt.node_a_storage(), nt.end, 0.0); + std::fill_n(nt.node_b_storage(), nt.end, 0.0); + } // ForAllSections(sec) ITERATE(qsec, section_list) { Section* sec = hocSEC(qsec); -#if 1 /* unnecessary because they are unused, but help when looking at fmatrix */ + // Unnecessary because they are unused, but help when looking at fmatrix. if (!sec->parentsec) { - if (nrn_classicalNodeA(sec->parentnode)) { - ClassicalNODEA(sec->parentnode) = 0.0; + if (auto* const ptr = nrn_classicalNodeA(sec->parentnode)) { + *ptr = 0.0; } - if (nrn_classicalNodeB(sec->parentnode)) { - ClassicalNODEB(sec->parentnode) = 0.0; + if (auto* const ptr = nrn_classicalNodeB(sec->parentnode)) { + *ptr = 0.0; } } -#endif /* convert to siemens/cm^2 for all nodes except last and microsiemens for last. This means that a*V = mamps/cm2 and a*v in last node = nanoamps. Note that last node @@ -894,11 +881,11 @@ void connection_coef(void) /* setup a and b */ nd = sec->pnode[0]; area = NODEAREA(sec->parentnode); /* dparam[4] is rall_branch */ - ClassicalNODEA(nd) = -1.e2 * sec->prop->dparam[4].get() * NODERINV(nd) / area; + *nrn_classicalNodeA(nd) = -1.e2 * sec->prop->dparam[4].get() * NODERINV(nd) / area; for (j = 1; j < sec->nnode; j++) { nd = sec->pnode[j]; area = NODEAREA(sec->pnode[j - 1]); - ClassicalNODEA(nd) = -1.e2 * NODERINV(nd) / area; + *nrn_classicalNodeA(nd) = -1.e2 * NODERINV(nd) / area; } } /* now the effect of parent on node equation. */ @@ -907,7 +894,7 @@ void connection_coef(void) /* setup a and b */ Section* sec = hocSEC(qsec); for (j = 0; j < sec->nnode; j++) { nd = sec->pnode[j]; - ClassicalNODEB(nd) = -1.e2 * NODERINV(nd) / NODEAREA(nd); + *nrn_classicalNodeB(nd) = -1.e2 * NODERINV(nd) / NODEAREA(nd); } } #if EXTRACELLULAR @@ -1597,18 +1584,16 @@ static double diam_from_list(Section* sec, int inode, Prop* p, double rparent) /* answer for inode is here */ NODERINV(sec->pnode[inode]) = 1. / (rparent + rleft); diam *= .5 / ds; - if (fabs(diam - p->param[0]) > 1e-9 || diam < 1e-5) { - p->param[0] = diam; /* microns */ + if (fabs(diam - p->param(0)) > 1e-9 || diam < 1e-5) { + p->param(0) = diam; /* microns */ } - NODEAREA(sec->pnode[inode]) = area * .5 * PI; /* microns^2 */ - UPDATE_VEC_AREA(sec->pnode[inode]); + sec->pnode[inode]->area() = area * .5 * PI; /* microns^2 */ #if NTS_SPINE /* if last point has a spine then increment spine count for last node */ if (inode == sec->nnode - 2 && sec->pt3d[npt - 1].d < 0.) { nspine += 1; } - NODEAREA(sec->pnode[inode]) += nspine * spinearea; - UPDATE_VEC_AREA(sec->pnode[inode]); + sec->pnode[inode]->area() = sec->pnode[inode]->area() + nspine * spinearea; #endif return ri; } @@ -1640,13 +1625,10 @@ void v_setup_vectors(void) { if (memb_list[i].nodecount) { memb_list[i].nodecount = 0; free(memb_list[i].nodelist); -#if CACHEVEC - free((void*) memb_list[i].nodeindices); -#endif /* CACHEVEC */ - if (memb_func[i].hoc_mech) { - free(memb_list[i].prop); - } else { - free(memb_list[i]._data); + free(memb_list[i].nodeindices); + delete[] memb_list[i].prop; + if (!memb_func[i].hoc_mech) { + // free(memb_list[i]._data); free(memb_list[i].pdata); } } @@ -1667,14 +1649,10 @@ void v_setup_vectors(void) { if (nrn_is_artificial_[i] && memb_func[i].has_initialize()) { if (memb_list[i].nodecount) { memb_list[i].nodelist = (Node**) emalloc(memb_list[i].nodecount * sizeof(Node*)); -#if CACHEVEC memb_list[i].nodeindices = (int*) emalloc(memb_list[i].nodecount * sizeof(int)); -#endif /* CACHEVEC */ - if (memb_func[i].hoc_mech) { - memb_list[i].prop = (Prop**) emalloc(memb_list[i].nodecount * sizeof(Prop*)); - } else { - memb_list[i]._data = (double**) emalloc(memb_list[i].nodecount * - sizeof(double*)); + // Prop used by ode_map even when hoc_mech is false + memb_list[i].prop = new Prop*[memb_list[i].nodecount]; + if (!memb_func[i].hoc_mech) { memb_list[i].pdata = (Datum**) emalloc(memb_list[i].nodecount * sizeof(Datum*)); } memb_list[i].nodecount = 0; /* counted again below */ @@ -1707,7 +1685,6 @@ void v_setup_vectors(void) { reorder_secorder(); #endif -#if CACHEVEC FOR_THREADS(_nt) { for (inode = 0; inode < _nt->end; inode++) { if (_nt->_v_parent[inode] != NULL) { @@ -1716,28 +1693,22 @@ void v_setup_vectors(void) { } } -#endif /* CACHEVEC */ - nrn_thread_memblist_setup(); /* fill in artificial cell info */ for (i = 0; i < n_memb_func; ++i) { if (nrn_is_artificial_[i] && memb_func[i].has_initialize()) { hoc_Item* q; - hoc_List* list; - int j, nti; cTemplate* tmp = nrn_pnt_template_[i]; memb_list[i].nodecount = tmp->count; - nti = 0; - j = 0; - list = tmp->olist; + int nti{}, j{}; + hoc_List* list = tmp->olist; + std::vector thread_counts(nrn_nthread); ITERATE(q, list) { Object* obj = OBJ(q); auto* pnt = static_cast(obj->u.this_pointer); p = pnt->prop; memb_list[i].nodelist[j] = nullptr; - memb_list[i]._data[j] = p->param; - memb_list[i].pdata[j] = p->dparam; /* for now, round robin all the artificial cells */ /* but put the non-threadsafe ones in thread 0 */ /* @@ -1752,15 +1723,44 @@ void v_setup_vectors(void) { pnt->_vnt = nrn_threads + nti; nti = (nti + 1) % nrn_nthread; } + auto const tid = static_cast(pnt->_vnt)->id; + ++thread_counts[tid]; + // pnt->_i_instance = j; ++j; } + assert(j == memb_list[i].nodecount); + // The following is a transition measure while data are SOA-backed + // using the new neuron::container::soa scheme but pdata are not. + // data get permuted so that artificial cells are blocked according + // to the NrnThread they are assigned to, but without this change + // then the pdata order encoded in the global non-thread-specific + // memb_list[i] structure was different, with threads interleaved. + // This was a problem when we wanted to e.g. run the initialisation + // kernels in finitialize using that global structure, as the i-th + // rows of data and pdata did not refer to the same mechanism + // instance. The temporary solution here is to manually organise + // pdata to match the data order, with all the instances associated + // with thread 0 followed by all the instances associated with + // thread 1, and so on. See CellGroup::mk_tml_with_art for another + // side of this story and why it is useful to have artificial cell + // data blocked by thread. + std::vector thread_offsets(nrn_nthread); + for (auto j = 1; j < nrn_nthread; ++j) { + thread_offsets[j] = std::exchange(thread_counts[j - 1], 0) + thread_offsets[j - 1]; + } + thread_counts[nrn_nthread - 1] = 0; + ITERATE(q, list) { + auto* const pnt = static_cast(OBJ(q)->u.this_pointer); + auto const tid = static_cast(pnt->_vnt)->id; + memb_list[i].pdata[thread_offsets[tid] + thread_counts[tid]++] = pnt->prop->dparam; + } } } - nrn_recalc_node_ptrs(); + neuron::model().node_data().mark_as_unsorted(); v_structure_change = 0; nrn_update_ps2nt(); ++structure_change_cnt; - long_difus_solve(3, nrn_threads); + long_difus_solve(nrn_ensure_model_data_are_sorted(), 3, *nrn_threads); // !!! nrn_nonvint_block_setup(); diam_changed = 1; } @@ -1783,7 +1783,7 @@ static FILE* fnd; void node_data_scaffolding(void) { int i; Pd(n_memb_func); - /* P "Mechanism names (first two are nil) beginning with memb_func[2]\n");*/ + /* P "Mechanism names (first two are nullptr) beginning with memb_func[2]\n");*/ for (i = 2; i < n_memb_func; ++i) { P "%s", memb_func[i].sym->name); Pn; @@ -1867,55 +1867,12 @@ void node_data(void) { #endif -void nrn_complain(double* pp) { - /* print location for this param on the standard error */ - Node* nd; - hoc_Item* qsec; - int j; - Prop* p; - // ForAllSections(sec) - ITERATE(qsec, section_list) { - Section* sec = hocSEC(qsec); - for (j = 0; j < sec->nnode; ++j) { - nd = sec->pnode[j]; - for (p = nd->prop; p; p = p->next) { - if (p->param == pp) { - fprintf(stderr, - "Error at section location %s(%g)\n", - secname(sec), - nrn_arc_position(sec, nd)); - return; - } - } - } - } - fprintf(stderr, "Don't know the location of params at %p\n", pp); -} - void nrn_matrix_node_free() { NrnThread* nt; FOR_THREADS(nt) { - if (nt->_actual_rhs) { - free(nt->_actual_rhs); - nt->_actual_rhs = (double*) 0; + if (nt->_sp13_rhs) { + free(std::exchange(nt->_sp13_rhs, nullptr)); } - if (nt->_actual_d) { - free(nt->_actual_d); - nt->_actual_d = (double*) 0; - } -#if CACHEVEC - if (nt->_actual_a) { - free(nt->_actual_a); - nt->_actual_a = (double*) 0; - } - if (nt->_actual_b) { - free(nt->_actual_b); - nt->_actual_b = (double*) 0; - } - /* because actual_v and actual_area have pointers to them from many - places, defer the freeing until nrn_recalc_node_ptrs is called - */ -#endif /* CACHEVEC */ if (nt->_sp13mat) { spDestroy(nt->_sp13mat); nt->_sp13mat = (char*) 0; @@ -1977,9 +1934,6 @@ int nrn_method_consistent(void) { use_sparse13 = 1; consist = 1; } - if (use_sparse13 != 0) { - nrn_cachevec(0); - } return consist; } @@ -2013,23 +1967,9 @@ static void nrn_matrix_node_alloc(void) { v_setup_vectors(); return; } else { - if (nt->_actual_rhs != (double*) 0) { - return; - } + // used to return here if the cache-efficient structures for a/b/... were non-null } } -/*printf("nrn_matrix_node_alloc does its work\n");*/ -#if CACHEVEC - FOR_THREADS(nt) { - nt->_actual_a = (double*) ecalloc(nt->end, sizeof(double)); - nt->_actual_b = (double*) ecalloc(nt->end, sizeof(double)); - } - nrn_recalc_node_ptrs(); -#endif /* CACHEVEC */ - -#if 0 -printf("nrn_matrix_node_alloc use_sparse13=%d cvode_active_=%d nrn_use_daspk_=%d\n", use_sparse13, cvode_active_, nrn_use_daspk_); -#endif ++nrn_matrix_cnt_; if (use_sparse13) { int in, err, extn, neqn, j; @@ -2041,7 +1981,7 @@ printf("nrn_matrix_node_alloc use_sparse13=%d cvode_active_=%d nrn_use_daspk_=%d } /*printf(" %d extracellular nodes\n", extn);*/ neqn += extn; - nt->_actual_rhs = (double*) ecalloc(neqn + 1, sizeof(double)); + nt->_sp13_rhs = (double*) ecalloc(neqn + 1, sizeof(double)); nt->_sp13mat = spCreate(neqn, 0, &err); if (err != spOKAY) { hoc_execerror("Couldn't create sparse matrix", (char*) 0); @@ -2060,13 +2000,13 @@ printf("nrn_matrix_node_alloc use_sparse13=%d cvode_active_=%d nrn_use_daspk_=%d nde = nd->extnode; pnd = nt->_v_parent[in]; i = nd->eqn_index_; - nd->_rhs = nt->_actual_rhs + i; - nd->_d = spGetElement(nt->_sp13mat, i, i); + nt->_sp13_rhs[i] = nt->actual_rhs(in); + nd->_d_matelm = spGetElement(nt->_sp13mat, i, i); if (nde) { for (ie = 0; ie < nlayer; ++ie) { k = i + ie + 1; nde->_d[ie] = spGetElement(nt->_sp13mat, k, k); - nde->_rhs[ie] = nt->_actual_rhs + k; + nde->_rhs[ie] = nt->_sp13_rhs + k; nde->_x21[ie] = spGetElement(nt->_sp13mat, k, k - 1); nde->_x12[ie] = spGetElement(nt->_sp13mat, k - 1, k); } @@ -2083,8 +2023,8 @@ printf("nrn_matrix_node_alloc use_sparse13=%d cvode_active_=%d nrn_use_daspk_=%d nde->_b_matelm[ie] = spGetElement(nt->_sp13mat, k, kp); } } else { /* not needed if index starts at 1 */ - nd->_a_matelm = (double*) 0; - nd->_b_matelm = (double*) 0; + nd->_a_matelm = nullptr; + nd->_b_matelm = nullptr; } } nrndae_alloc(); @@ -2092,198 +2032,340 @@ printf("nrn_matrix_node_alloc use_sparse13=%d cvode_active_=%d nrn_use_daspk_=%d FOR_THREADS(nt) { assert(nrndae_extra_eqn_count() == 0); assert(!nt->_ecell_memb_list || nt->_ecell_memb_list->nodecount == 0); - nt->_actual_d = (double*) ecalloc(nt->end, sizeof(double)); - nt->_actual_rhs = (double*) ecalloc(nt->end, sizeof(double)); - for (i = 0; i < nt->end; ++i) { - Node* nd = nt->_v_node[i]; - nd->_d = nt->_actual_d + i; - nd->_rhs = nt->_actual_rhs + i; - } } } } -void nrn_cachevec(int b) { - if (use_sparse13) { - use_cachevec = 0; - } else { - if (b && use_cachevec == 0) { - tree_changed = 1; +/** @brief Sort the underlying storage for a particular mechanism. + * + * After model building is complete the storage vectors backing all Mechanism + * instances can be permuted to ensure that preconditions are met for the + * computations performed while time-stepping. + * + * This method ensures that the Mechanism data is ready for this compute phase. + * It is guaranteed to remain "ready" until the returned tokens are destroyed. + */ +static void nrn_sort_mech_data( + neuron::container::Mechanism::storage::frozen_token_type& sorted_token, + neuron::cache::Model& cache, + neuron::container::Mechanism::storage& mech_data) { + // Do the actual sorting here. For now the algorithm is just to ensure that + // the mechanism instances are partitioned by NrnThread. + auto const type = mech_data.type(); + // Some special types are not "really" mechanisms and don't need to be + // sorted + if (type != MORPHOLOGY) { + std::size_t const mech_data_size{mech_data.size()}; + std::vector pdata_fields_to_cache{}; + neuron::cache::indices_to_cache(type, + [mech_data_size, + &pdata_fields_to_cache, + &pdata_hack = cache.mechanism.at(type).pdata_hack]( + auto field) { + if (field >= pdata_hack.size()) { + // we get called with the largest field first + pdata_hack.resize(field + 1); + } + pdata_hack.at(field).reserve(mech_data_size); + pdata_fields_to_cache.push_back(field); + }); + std::size_t global_i{}, trivial_counter{}; + std::vector mech_data_permutation(mech_data_size, + std::numeric_limits::max()); + NrnThread* nt{}; + FOR_THREADS(nt) { + // the Memb_list for this mechanism in this thread, this might be + // null if there are no entries, or if it's an artificial cell type(?) + auto* const ml = nt->_ml_list[type]; + if (ml) { + // Tell the Memb_list what global offset its values start at + ml->set_storage_offset(global_i); + } + // Record where in the global storage this NrnThread's instances of + // the mechanism start + cache.thread.at(nt->id).mechanism_offset.at(type) = global_i; + // Count how many times we see this mechanism in this NrnThread + auto nt_mech_count = 0; + // Loop through the Nodes in this NrnThread + for (auto i = 0; i < nt->end; ++i) { + auto* const nd = nt->_v_node[i]; + // See if this Node has a mechanism of this type + for (Prop* p = nd->prop; p; p = p->next) { + if (p->_type != type) { + continue; + } + // this condition comes from thread_memblist_setup(...) + if (!memb_func[type].current && !memb_func[type].state && + !memb_func[type].has_initialize()) { + continue; + } + // OK, p is an instance of the mechanism we're currently + // considering. + auto const current_global_row = p->id().current_row(); + trivial_counter += (current_global_row == global_i); + mech_data_permutation.at(current_global_row) = global_i++; + for (auto const field: pdata_fields_to_cache) { + cache.mechanism.at(type).pdata_hack.at(field).push_back(p->dparam + field); + } + // Checks + assert(ml->nodelist[nt_mech_count] == nd); + assert(ml->nodeindices[nt_mech_count] == nd->v_node_index); + ++nt_mech_count; + } + } + assert(!ml || ml->nodecount == nt_mech_count); + // Look for any artificial cells attached to this NrnThread + if (nrn_is_artificial_[type]) { + cTemplate* tmp = nrn_pnt_template_[type]; + hoc_Item* q; + ITERATE(q, tmp->olist) { + Object* obj = OBJ(q); + auto* pnt = static_cast(obj->u.this_pointer); + assert(pnt->prop->_type == type); + if (nt == pnt->_vnt) { + auto const current_global_row = pnt->prop->id().current_row(); + trivial_counter += (current_global_row == global_i); + mech_data_permutation.at(current_global_row) = global_i++; + for (auto const field: pdata_fields_to_cache) { + cache.mechanism.at(type).pdata_hack.at(field).push_back( + pnt->prop->dparam + field); + } + } + } + } } - use_cachevec = b; - } -} - -#if CACHEVEC -/* -Pointers that need to be updated are: -All Point process area pointers. -All mechanism POINTER variables that point to v. -All Graph addvar pointers that plot v. -All Vector record and play pointers that deal with v. -All PreSyn threshold detectors that watch v. -*/ - -static int n_recalc_ptr_callback; -static void (*recalc_ptr_callback[20])(); -static int recalc_cnt_; -static double **recalc_ptr_new_vp_, **recalc_ptr_old_vp_; -static int n_old_thread_; -static int* old_actual_v_size_; -static double** old_actual_v_; -static double** old_actual_area_; - -/* defer freeing a few things which may have pointers to them -until ready to update those pointers */ -void nrn_old_thread_save(void) { - int i; - int n = nrn_nthread; - if (old_actual_v_) { - return; - } /* one is already outstanding */ - n_old_thread_ = n; - old_actual_v_size_ = (int*) ecalloc(n, sizeof(int)); - old_actual_v_ = (double**) ecalloc(n, sizeof(double*)); - old_actual_area_ = (double**) ecalloc(n, sizeof(double*)); - for (i = 0; i < n; ++i) { - NrnThread* nt = nrn_threads + i; - old_actual_v_size_[i] = nt->end; - old_actual_v_[i] = nt->_actual_v; - old_actual_area_[i] = nt->_actual_area; - } -} - -static double* (*recalc_ptr_)(double*); - -double* nrn_recalc_ptr(double* old) { - if (recalc_ptr_) { - return (*recalc_ptr_)(old); - } - if (!recalc_ptr_old_vp_) { - return old; - } - if (old && *old >= 0 && *old <= recalc_cnt_) { - int k = (int) (*old); - if (old == recalc_ptr_old_vp_[k]) { - return recalc_ptr_new_vp_[k]; + if (global_i != mech_data_size) { + // This means that we did not "positively" find all the instances of + // this mechanism by traversing the model structure. This can happen + // if HOC (or probably Python) scripts create instances and then do + // not attach them anywhere, or do not explicitly destroy + // interpreter variables that are preventing reference counts from + // reaching zero. In this case we can figure out which the missing + // entries are and permute them to the end of the vector. + auto missing_elements = mech_data_size - global_i; + // There are `missing_elements` integers from the range [0 .. + // mech_data_size-1] whose values in `mech_data_permutation` are + // still std::numeric_limits::max(). + for (auto global_row = 0ul; global_row < mech_data_size; ++global_row) { + if (mech_data_permutation[global_row] == std::numeric_limits::max()) { + trivial_counter += (global_row == global_i); + mech_data_permutation[global_row] = global_i++; + --missing_elements; + if (missing_elements == 0) { + break; + } + } + } + if (global_i != mech_data_size) { + std::ostringstream oss; + oss << "(global_i = " << global_i << ") != (mech_data_size = " << mech_data_size + << ") for " << mech_data.name(); + throw std::runtime_error(oss.str()); + } } - } - return old; -} - -void nrn_register_recalc_ptr_callback(Pfrv f) { - if (n_recalc_ptr_callback >= 20) { - Printf("More than 20 recalc_ptr_callback functions\n"); - exit(1); - } - recalc_ptr_callback[n_recalc_ptr_callback++] = f; -} - -void nrn_recalc_ptrs(double* (*r)(double*) ) { - int i; - - recalc_ptr_ = r; - - /* update pointers managed by c++ */ - nrniv_recalc_ptrs(); - - /* user callbacks to update pointers */ - for (i = 0; i < n_recalc_ptr_callback; ++i) { - (*recalc_ptr_callback[i])(); - } - recalc_ptr_ = nullptr; -} - -void nrn_recalc_node_ptrs(void) { - int i, ii, j, k; - NrnThread* nt; - if (use_cachevec == 0) { - return; - } - /*printf("nrn_recalc_node_ptrs\n");*/ - recalc_cnt_ = 0; - FOR_THREADS(nt) { - recalc_cnt_ += nt->end; - } - recalc_ptr_new_vp_ = (double**) ecalloc(recalc_cnt_, sizeof(double*)); - recalc_ptr_old_vp_ = (double**) ecalloc(recalc_cnt_, sizeof(double*)); - /* first update the pointers without messing with the old NODEV,NODEAREA */ - /* to prepare for the update, copy all the v and area values into the */ - /* new arrays are replace the old values by index value. */ - /* a pointer dereference value of i allows us to easily check */ - /* if the pointer points to what v_node[i]->_v points to. */ - ii = 0; + assert(trivial_counter <= mech_data_size); + if (trivial_counter < mech_data_size) { + // The `mech_data_permutation` vector is not a unit transformation + mech_data.apply_reverse_permutation(std::move(mech_data_permutation), sorted_token); + } + } + // Make sure that everything ends up flagged as sorted, even morphologies, + // mechanism types with no instances, and cases where the permutation + // vector calculated was found to be trivial. + mech_data.mark_as_sorted(sorted_token); +} + +void nrn_fill_mech_data_caches(neuron::cache::Model& cache, + neuron::container::Mechanism::storage& mech_data) { + // Generate some temporary "flattened" vectors from pdata + // For example, when a mechanism uses an ion then one of its pdata fields holds Datum + // (=generic_data_handle) objects that wrap data_handles to ion (RANGE) variables. + // Dereferencing those fields to access the relevant double values can be indirect and + // expensive, so here we can generate a std::vector that can be used directly in + // hot loops. This is partitioned for the threads in the same way as the other data. + // Note that this needs to come after *all* of the mechanism types' data have been permuted, not + // just the type that we are filling the cache for. + // TODO could identify the case that the pointers are all monotonically increasing and optimise + // further? + auto const type = mech_data.type(); + // Some special types are not "really" mechanisms and don't need to be + // sorted + if (type != MORPHOLOGY) { + auto& mech_cache = cache.mechanism.at(type); + // Transform the vector in pdata_hack into vector in pdata + std::transform(mech_cache.pdata_hack.begin(), + mech_cache.pdata_hack.end(), + std::back_inserter(mech_cache.pdata), + [](std::vector& pdata_hack) { + std::vector tmp{}; + std::transform(pdata_hack.begin(), + pdata_hack.end(), + std::back_inserter(tmp), + [](Datum* datum) { return datum->get(); }); + pdata_hack.clear(); + pdata_hack.shrink_to_fit(); + return tmp; + }); + mech_cache.pdata_hack.clear(); + // Create a flat list of pointers we can use inside generated code + std::transform(mech_cache.pdata.begin(), + mech_cache.pdata.end(), + std::back_inserter(mech_cache.pdata_ptr_cache), + [](auto const& pdata) { return pdata.empty() ? nullptr : pdata.data(); }); + } +} + +/** @brief Sort the underlying storage for Nodes. + * + * After model building is complete the storage vectors backing all Node + * objects can be permuted to ensure that preconditions are met for the + * computations performed while time-stepping. + * + * This method ensures that the Node data is ready for this compute phase. + */ +static void nrn_sort_node_data(neuron::container::Node::storage::frozen_token_type& sorted_token, + neuron::cache::Model& cache) { + // Make sure the voltage storage follows the order encoded in _v_node. + // Generate the permutation vector to update the underlying storage for + // Nodes. This must come after nrn_multisplit_setup_, which can change the + // Node order. + auto& node_data = neuron::model().node_data(); + std::size_t const node_data_size{node_data.size()}; + std::size_t global_i{}; + std::vector node_data_permutation(node_data_size, + std::numeric_limits::max()); + // Process threads one at a time -- this means that the data for each + // NrnThread will be contiguous. + NrnThread* nt{}; FOR_THREADS(nt) { - nt->_actual_v = (double*) ecalloc(nt->end, sizeof(double)); - nt->_actual_area = (double*) ecalloc(nt->end, sizeof(double)); - } - FOR_THREADS(nt) for (i = 0; i < nt->end; ++i) { - Node* nd = nt->_v_node[i]; - nt->_actual_v[i] = *nd->_v; - recalc_ptr_new_vp_[ii] = nt->_actual_v + i; - recalc_ptr_old_vp_[ii] = nd->_v; - nt->_actual_area[i] = nd->_area; - *nd->_v = (double) ii; - ++ii; - } - /* update POINT_PROCESS pointers to NODEAREA */ - /* and relevant POINTER pointers to NODEV */ - FOR_THREADS(nt) for (i = 0; i < nt->end; ++i) { - Node* nd = nt->_v_node[i]; - Prop* p; - Datum* d; - int dpend; - for (p = nd->prop; p; p = p->next) { - if (memb_func[p->_type].is_point && !nrn_is_artificial_[p->_type]) { - p->dparam[0] = nt->_actual_area + i; - } - dpend = nrn_dparam_ptr_end_[p->_type]; - for (j = nrn_dparam_ptr_start_[p->_type]; j < dpend; ++j) { - if (double* pval = p->dparam[j].get(); - pval && *pval >= 0.0 && *pval <= recalc_cnt_) { - /* possible pointer to v */ - k = (int) (*pval); - if (pval == recalc_ptr_old_vp_[k]) { - p->dparam[j] = recalc_ptr_new_vp_[k]; - } + // What offset in the global node data structure do the values for this thread + // start at + nt->_node_data_offset = global_i; + cache.thread.at(nt - nrn_threads).node_data_offset = global_i; + for (int i = 0; i < nt->end; ++i, ++global_i) { + Node* nd = nt->_v_node[i]; + auto const current_node_row = nd->_node_handle.current_row(); + assert(current_node_row < node_data_size); + assert(global_i < node_data_size); + node_data_permutation.at(current_node_row) = global_i; + } + } + if (global_i != node_data_size) { + // This means that we did not "positively" find all the Nodes by traversing the NrnThread + // objects. In this case we can figure out which the missing entries are and permute them to + // the end of the global vectors. + auto missing_elements = node_data_size - global_i; + std::cout << "permuting " << missing_elements << " 'lost' Nodes to the end\n"; + // There are `missing_elements` integers from the range [0 .. node_data_size-1] whose values + // in `node_data_permutation` are still std::numeric_limits::max(). + for (auto global_row = 0ul; global_row < node_data_size; ++global_row) { + if (node_data_permutation[global_row] == std::numeric_limits::max()) { + node_data_permutation[global_row] = global_i++; + --missing_elements; + if (missing_elements == 0) { + break; } } } - } - - nrn_recalc_ptrs(nullptr); - - /* now that all the pointers are updated we update the NODEV */ - ii = 0; - FOR_THREADS(nt) for (i = 0; i < nt->end; ++i) { - Node* nd = nt->_v_node[i]; - nd->_v = recalc_ptr_new_vp_[ii]; - ++ii; - } - free(recalc_ptr_old_vp_); - free(recalc_ptr_new_vp_); - recalc_ptr_old_vp_ = (double**) 0; - recalc_ptr_new_vp_ = (double**) 0; - /* and free the old thread arrays if new ones were allocated */ - for (i = 0; i < n_old_thread_; ++i) { - if (old_actual_v_[i]) - hoc_free_val_array(old_actual_v_[i], old_actual_v_size_[i]); - if (old_actual_area_[i]) - free(old_actual_area_[i]); - } - free(old_actual_v_size_); - free(old_actual_v_); - free(old_actual_area_); - old_actual_v_size_ = 0; - old_actual_v_ = 0; - old_actual_area_ = 0; - n_old_thread_ = 0; - - nrn_node_ptr_change_cnt_++; - nrn_cache_prop_realloc(); - nrn_recalc_ptrvector(); - nrn_partrans_update_ptrs(); - nrn_imem_defer_free(nullptr); + if (global_i != node_data_size) { + std::ostringstream oss; + oss << "(global_i = " << global_i << ") != (node_data_size = " << node_data_size << ')'; + throw std::runtime_error(oss.str()); + } + } + // Apply the permutation, using `sorted_token` to authorise this operation + // despite the container being frozen. + node_data.apply_reverse_permutation(std::move(node_data_permutation), sorted_token); +} + +/** + * @brief Ensure neuron::container::* data are sorted. + * + * Set all of the containers to be in read-only mode, until the returned token + * is destroyed. This method can be called from multi-threaded regions. + */ +neuron::model_sorted_token nrn_ensure_model_data_are_sorted() { + // Rather than a more complicated lower-level solution, just serialise all + // calls to this method. The more complicated lower-level solution would + // presumably involve a more fully-fledged std::shared_mutex-type model, + // where the soa containers can be locked by many readers (clients that do + // not do anything that invalidates pointers/caches) or a single writer + // (who *is* authorised to perform those operations), with a deadlock + // avoiding algorithm used to acquire those two types of lock for all the + // different soa containers at once. + static std::mutex s_mut{}; + std::unique_lock _{s_mut}; + // Two scenarii: + // - model is already sorted, in which case we just assemble the return + // value but don't mutate anything or do any real work + // - something is not already sorted, and by extension the cache is not + // valid. + // In both cases, we want to start by acquiring tokens from all of the + // data containers in the model. Once we have locked the whole model in + // this way, we can trigger permuting the model (by loaning out the tokens + // one by one) to mark it as sorted. + auto& model = neuron::model(); + auto& node_data = model.node_data(); + // Get tokens for the whole model + auto node_token = node_data.issue_frozen_token(); + auto already_sorted = node_data.is_sorted(); + // How big does an array have to be to be indexed by mechanism type? + auto const mech_storage_size = model.mechanism_storage_size(); + std::vector mech_tokens{}; + mech_tokens.reserve(mech_storage_size); + model.apply_to_mechanisms([&already_sorted, &mech_tokens](auto& mech_data) { + mech_tokens.push_back(mech_data.issue_frozen_token()); + already_sorted = already_sorted && mech_data.is_sorted(); + }); + // Now the whole model is marked frozen/read-only, but it may or may not be + // marked sorted (if it is, the cache should be valid, otherwise it should + // not be). + if (already_sorted) { + // If everything was already sorted, the cache should already be valid. + assert(neuron::cache::model); + // There isn't any more work to be done, really. + } else { + // Some part of the model data is not already marked sorted. In this + // case we expect that the cache is *not* valid, because whatever + // caused something to not be sorted should also have invalidated the + // cache. + assert(!neuron::cache::model); + // Build a new cache (*not* in situ, so it doesn't get invalidated + // under our feet while we're in the middle of the job) and populate it + // by calling the various methods that sort the model data. + neuron::cache::Model cache{}; + cache.thread.resize(nrn_nthread); + for (auto& thread_cache: cache.thread) { + thread_cache.mechanism_offset.resize(mech_storage_size); + } + cache.mechanism.resize(mech_storage_size); + // The cache is initialised enough to be populated by the various data + // sorting algorithms. The small "problem" here is that all of the + // model data structures are already marked as frozen via the tokens + // that we acquired above. The way around this is to transfer those + // tokens back to the relevant containers, so they can check that the + // only active token is the one that has been provided back to them. In + // other words, any token is a "read lock" but a non-const token that + // refers to a container with token reference count of exactly one has + // an elevated "write lock" status. + nrn_sort_node_data(node_token, cache); + assert(node_data.is_sorted()); + // TODO: maybe we should separate out cache population from sorting. + std::size_t n{}; // eww + model.apply_to_mechanisms([&cache, &n, &mech_tokens](auto& mech_data) { + // TODO do we need to pass `node_token` to `nrn_sort_mech_data`? + nrn_sort_mech_data(mech_tokens[n], cache, mech_data); + assert(mech_data.is_sorted()); + ++n; + }); + // Now that all the mechanism data is sorted we can fill in pdata caches + model.apply_to_mechanisms( + [&cache](auto& mech_data) { nrn_fill_mech_data_caches(cache, mech_data); }); + // Move our working cache into the global storage. + neuron::cache::model = std::move(cache); + } + // Move our tokens into the return value and be done with it. + neuron::model_sorted_token ret{*neuron::cache::model, std::move(node_token)}; + ret.mech_data_tokens = std::move(mech_tokens); + return ret; } - -#endif /* CACHEVEC */ diff --git a/src/nrnoc/treeset.h b/src/nrnoc/treeset.h index 01a7d2bd47..2cd37061f5 100644 --- a/src/nrnoc/treeset.h +++ b/src/nrnoc/treeset.h @@ -1,3 +1,6 @@ #pragma once -double* nrn_recalc_ptr(double* old); -void nrn_register_recalc_ptr_callback(void (*f)()); +struct NrnThread; +void update_actual_d_based_on_sp13_mat(NrnThread* nt); +void update_actual_rhs_based_on_sp13_rhs(NrnThread* nt); +void update_sp13_mat_based_on_actual_d(NrnThread* nt); +void update_sp13_rhs_based_on_actual_rhs(NrnThread* nt); diff --git a/src/nrnpython/CMakeLists.txt b/src/nrnpython/CMakeLists.txt index c67688a3f6..79a24c67fb 100644 --- a/src/nrnpython/CMakeLists.txt +++ b/src/nrnpython/CMakeLists.txt @@ -4,25 +4,6 @@ add_compile_options(${NRN_COMPILE_FLAGS}) add_compile_definitions(${NRN_COMPILE_DEFS}) add_link_options(${NRN_LINK_FLAGS}) -# ============================================================================= -# List of python executables to build nrnpython against -# ============================================================================= -if(NRN_ENABLE_PYTHON_DYNAMIC) - if("${NRN_PYTHON_DYNAMIC}" STREQUAL "") - set(NRN_PYTHON_EXE_LIST - ${PYTHON_EXECUTABLE} - CACHE INTERNAL "" FORCE) - else() - set(NRN_PYTHON_EXE_LIST - ${NRN_PYTHON_DYNAMIC} - CACHE INTERNAL "" FORCE) - endif() -else() - set(NRN_PYTHON_EXE_LIST - ${PYTHON_EXECUTABLE} - CACHE INTERNAL "" FORCE) -endif() - # ============================================================================= # rxdmath libraries (always build) # ============================================================================= @@ -32,7 +13,6 @@ install(TARGETS rxdmath DESTINATION ${NRN_INSTALL_SHARE_LIB_DIR}) # ============================================================================= # nrnpython libraries (one lib per python) # ============================================================================= - set(nrnpython_lib_list) # user has selected dynamic python support (could be multiple versions) @@ -44,42 +24,35 @@ if(NRN_ENABLE_PYTHON_DYNAMIC) ../nrnoc ../ivoc ../nrniv - ../ivos ../gnu - ../mesch ../nrnmpi ${PROJECT_BINARY_DIR}/src/nrnpython ${PROJECT_BINARY_DIR}/src/ivos - ${PROJECT_BINARY_DIR}/src/oc - ${IV_INCLUDE_DIR}) - - if(LINK_AGAINST_PYTHON) - list(LENGTH NRN_PYTHON_EXE_LIST _num_pythons) - math(EXPR num_pythons "${_num_pythons} - 1") - foreach(val RANGE ${num_pythons}) - list(GET NRN_PYTHON_VER_LIST ${val} pyver) - list(GET NRN_PYTHON_INCLUDE_LIST ${val} pyinc) - list(GET NRN_PYTHON_LIB_LIST ${val} pylib) - add_library(nrnpython${pyver} SHARED ${NRN_NRNPYTHON_SRC_FILES}) - target_include_directories(nrnpython${pyver} BEFORE PUBLIC ${pyinc} ${INCLUDE_DIRS}) - target_link_libraries(nrnpython${pyver} nrniv_lib ${pylib} ${Readline_LIBRARY}) - add_dependencies(nrnpython${pyver} nrniv_lib) - list(APPEND nrnpython_lib_list nrnpython${pyver}) - install(TARGETS nrnpython${pyver} DESTINATION ${NRN_INSTALL_SHARE_LIB_DIR}) - endforeach() + ${PROJECT_BINARY_DIR}/src/oc) + if(NRN_ENABLE_INTERVIEWS) + list(APPEND INCLUDE_DIRS ${IV_INCLUDE_DIR}) else() - # build python3 library and install it - if(NRNPYTHON_INCLUDE3) - add_library(nrnpython3 SHARED ${NRN_NRNPYTHON_SRC_FILES}) - add_dependencies(nrnpython3 nrniv_lib) - target_include_directories(nrnpython3 PRIVATE "${NRN_OC_GENERATED_SOURCES}") - list(APPEND nrnpython_lib_list nrnpython3) - target_include_directories(nrnpython3 BEFORE PUBLIC ${NRNPYTHON_INCLUDE3} ${INCLUDE_DIRS}) - install(TARGETS nrnpython3 DESTINATION ${NRN_INSTALL_SHARE_LIB_DIR}) - endif() + list(APPEND INCLUDE_DIRS ../ivos) endif() + foreach(val RANGE ${NRN_PYTHON_ITERATION_LIMIT}) + list(GET NRN_PYTHON_VERSIONS ${val} pyver) + list(GET NRN_PYTHON_INCLUDES ${val} pyinc) + list(GET NRN_PYTHON_LIBRARIES ${val} pylib) + add_library(nrnpython${pyver} SHARED ${NRN_NRNPYTHON_SRC_FILES}) + target_include_directories(nrnpython${pyver} BEFORE PUBLIC ${pyinc} ${INCLUDE_DIRS}) + target_link_libraries(nrnpython${pyver} nrniv_lib ${Readline_LIBRARY}) + if(NRN_LINK_AGAINST_PYTHON) + target_link_libraries(nrnpython${pyver} ${pylib}) + endif() + add_dependencies(nrnpython${pyver} nrniv_lib) + list(APPEND nrnpython_lib_list nrnpython${pyver}) + install(TARGETS nrnpython${pyver} DESTINATION ${NRN_INSTALL_SHARE_LIB_DIR}) + endforeach() endif() +configure_file(_config_params.py.in "${PROJECT_BINARY_DIR}/lib/python/neuron/_config_params.py" + @ONLY) + # Install package files that were created in build (e.g. .py.in) install( DIRECTORY ${PROJECT_BINARY_DIR}/share/lib/python @@ -119,9 +92,8 @@ if(NRN_ENABLE_MODULE_INSTALL) set(binary_dir_filename ${CMAKE_CURRENT_BINARY_DIR}/inithoc.cpp) set(source_dir_filename ${CMAKE_CURRENT_SOURCE_DIR}/inithoc.cpp) - set(inithoc_hdeps - ${CMAKE_CURRENT_SOURCE_DIR}/../oc/nrnmpi.h ${CMAKE_CURRENT_BINARY_DIR}/../oc/nrnmpiuse.h - ${CMAKE_CURRENT_BINARY_DIR}/nrnpython_config.h) + set(inithoc_hdeps ${CMAKE_CURRENT_SOURCE_DIR}/../oc/nrnmpi.h + ${CMAKE_CURRENT_BINARY_DIR}/../oc/nrnmpiuse.h) add_custom_command( OUTPUT ${binary_dir_filename} @@ -187,20 +159,23 @@ if(NRN_ENABLE_MODULE_INSTALL) set(NRN_SETUP_PY_BUILD_EXT_OPTIONS "build_ext") if(NRN_ENABLE_MUSIC) # NRN_SETUP_PY_INC_DIRS is crafted for the MUSIC build, to be extended to other if needed - set(NRN_SETUP_PY_INC_DIRS "${MUSIC_INCDIR};${MPI_INCLUDE_PATH}") + set(NRN_SETUP_PY_INC_DIRS "${MUSIC_INCDIR};${MPI_C_INCLUDE_DIRS}") string(REPLACE ";" ":" NRN_SETUP_PY_INC_DIRS "${NRN_SETUP_PY_INC_DIRS}") list(APPEND NRN_SETUP_PY_BUILD_EXT_OPTIONS "--include-dirs=${NRN_SETUP_PY_INC_DIRS}") endif() + set(defines ${NRN_COMPILE_DEFS}) if(MINGW) - list(APPEND NRN_SETUP_PY_BUILD_EXT_OPTIONS "--define=MS_WIN64") + list(APPEND defines "MS_WIN64") endif() + list(JOIN defines , defines_str) + list(APPEND NRN_SETUP_PY_BUILD_EXT_OPTIONS "--define=${defines_str}") # force rebuild of cython generated files for PYTHON_DYNAMIC if(NRN_ENABLE_PYTHON_DYNAMIC) list(APPEND NRN_SETUP_PY_BUILD_EXT_OPTIONS "--force") endif() # Build python module - foreach(pyexe ${NRN_PYTHON_EXE_LIST}) + foreach(pyexe ${NRN_PYTHON_EXECUTABLES}) add_custom_command( TARGET hoc_module POST_BUILD @@ -215,4 +190,8 @@ if(NRN_ENABLE_MODULE_INSTALL) add_dependencies(hoc_module nrniv_lib rxdmath ${nrnpython_lib_list}) install(DIRECTORY ${NRN_PYTHON_BUILD_LIB} DESTINATION lib) +else() + # Make sure this is included in the wheels + install(FILES "${PROJECT_BINARY_DIR}/lib/python/neuron/_config_params.py" + DESTINATION lib/python/neuron) endif() diff --git a/src/nrnpython/_config_params.py.in b/src/nrnpython/_config_params.py.in new file mode 100644 index 0000000000..c6eeb6f371 --- /dev/null +++ b/src/nrnpython/_config_params.py.in @@ -0,0 +1,2 @@ +# Needed in pure Python so that neuron/__init__.py can give a good error... +supported_python_versions = [@NRN_DYNAMIC_PYTHON_LIST_OF_VERSION_STRINGS@] diff --git a/src/nrnpython/grids.cpp b/src/nrnpython/grids.cpp index 06f97d5d44..23040d396d 100644 --- a/src/nrnpython/grids.cpp +++ b/src/nrnpython/grids.cpp @@ -246,7 +246,6 @@ ICS_Grid_node::ICS_Grid_node(PyHocObject* my_states, double atolscale, double* ics_alphas) { int k; - ics_num_segs = 0; _num_nodes = num_nodes; diffusable = is_diffusable; this->atolscale = atolscale; @@ -271,7 +270,6 @@ ICS_Grid_node::ICS_Grid_node(PyHocObject* my_states, ics_surface_nodes_per_seg = NULL; ics_surface_nodes_per_seg_start_indices = NULL; - ics_concentration_seg_ptrs = NULL; ics_scale_factors = NULL; ics_current_seg_ptrs = NULL; @@ -605,14 +603,11 @@ extern "C" void ics_set_grid_concentrations(int grid_list_index, g->ics_surface_nodes_per_seg = nodes_per_seg; g->ics_surface_nodes_per_seg_start_indices = nodes_per_seg_start_indices; - - g->ics_concentration_seg_ptrs = (double**) malloc(n * sizeof(double*)); + g->ics_concentration_seg_handles.reserve(n); for (i = 0; i < n; i++) { - g->ics_concentration_seg_ptrs[i] = static_cast( - ((PyHocObject*) PyList_GET_ITEM(neuron_pointers, i))->u.px_); + g->ics_concentration_seg_handles.push_back( + reinterpret_cast(PyList_GET_ITEM(neuron_pointers, i))->u.px_); } - - g->ics_num_segs = n; } extern "C" void ics_set_grid_currents(int grid_list_index, @@ -663,10 +658,10 @@ extern "C" void set_grid_concentrations(int grid_list_index, } /* free the old concentration list */ - free(g->concentration_list); + delete[] g->concentration_list; /* allocate space for the new list */ - g->concentration_list = (Concentration_Pair*) malloc(sizeof(Concentration_Pair) * n); + g->concentration_list = new Concentration_Pair[n]; g->num_concentrations = n; /* populate the list */ @@ -674,8 +669,8 @@ extern "C" void set_grid_concentrations(int grid_list_index, for (i = 0; i < n; i++) { /* printf("set_grid_concentrations %ld\n", i); */ g->concentration_list[i].source = PyInt_AS_LONG(PyList_GET_ITEM(grid_indices, i)); - g->concentration_list[i].destination = static_cast( - ((PyHocObject*) PyList_GET_ITEM(neuron_pointers, i))->u.px_); + g->concentration_list[i].destination = + reinterpret_cast(PyList_GET_ITEM(neuron_pointers, i))->u.px_; } } @@ -707,10 +702,10 @@ extern "C" void set_grid_currents(int grid_list_index, } /* free the old current list */ - free(g->current_list); + delete[] g->current_list; /* allocate space for the new list */ - g->current_list = (Current_Triple*) malloc(sizeof(Current_Triple) * n); + g->current_list = new Current_Triple[n]; g->num_currents = n; /* populate the list */ @@ -718,8 +713,8 @@ extern "C" void set_grid_currents(int grid_list_index, for (i = 0; i < n; i++) { g->current_list[i].destination = PyInt_AS_LONG(PyList_GET_ITEM(grid_indices, i)); g->current_list[i].scale_factor = PyFloat_AS_DOUBLE(PyList_GET_ITEM(scale_factors, i)); - g->current_list[i].source = static_cast( - ((PyHocObject*) PyList_GET_ITEM(neuron_pointers, i))->u.px_); + g->current_list[i].source = + reinterpret_cast(PyList_GET_ITEM(neuron_pointers, i))->u.px_; /* printf("set_grid_currents %ld out of %ld, %ld, %ld\n", i, n, * PyList_Size(neuron_pointers), PyList_Size(scale_factors)); */ } /* @@ -880,7 +875,7 @@ void ECS_Grid_node::do_grid_currents(double* output, double dt, int grid_id) { /*TODO: Handle multiple grids with one pass*/ /*Maybe TODO: Should check #currents << #voxels and not the other way round*/ double* val; - // MEM_ZERO(output,sizeof(double)*grid->size_x*grid->size_y*grid->size_z); + // memset(output, 0, sizeof(double)*grid->size_x*grid->size_y*grid->size_z); /* currents, via explicit Euler */ n = num_all_currents; m = num_currents; @@ -929,7 +924,7 @@ void ECS_Grid_node::do_grid_currents(double* output, double dt, int grid_id) { /*Remove the contribution from membrane currents*/ for (i = 0; i < induced_current_count; i++) output[induced_currents_index[i]] -= dt * (induced_currents[i] * induced_currents_scale[i]); - MEM_ZERO(induced_currents, induced_current_count * sizeof(double)); + memset(induced_currents, 0, induced_current_count * sizeof(double)); } double* ECS_Grid_node::set_rxd_currents(int current_count, @@ -946,7 +941,7 @@ double* ECS_Grid_node::set_rxd_currents(int current_count, induced_currents_index = current_indices; for (i = 0; i < current_count; i++) { for (j = 0; j < num_all_currents; j++) { - if (static_cast(ptrs[i]->u.px_) == current_list[j].source) { + if (ptrs[i]->u.px_ == current_list[j].source) { volume_fraction = (VARIABLE_ECS_VOLUME == VOLUME_FRACTION ? alpha[current_list[j].destination] : alpha[0]); @@ -1174,7 +1169,7 @@ void ECS_Grid_node::initialize_multicompartment_reaction() { proc_induced_current_count[nrnmpi_numprocs - 1]; all_scales = (double*) malloc(induced_current_count * sizeof(double)); - all_indices = (int*) malloc(induced_current_count * sizeof(double)); + all_indices = (int*) malloc(induced_current_count * sizeof(int)); memcpy(&all_scales[proc_induced_current_offset[nrnmpi_myid]], induced_currents_scale, sizeof(double) * proc_induced_current_count[nrnmpi_myid]); @@ -1239,7 +1234,7 @@ void ECS_Grid_node::do_multicompartment_reactions(double* result) { for (i = 0; i < total_reaction_states; i++) result[all_reaction_indices[i]] += all_reaction_states[i]; } - MEM_ZERO(all_reaction_states, total_reaction_states * sizeof(int)); + memset(all_reaction_states, 0, total_reaction_states * sizeof(int)); } // TODO: Implement this @@ -1251,8 +1246,8 @@ ECS_Grid_node::~ECS_Grid_node() { free(states_x); free(states_y); free(states_cur); - free(concentration_list); - free(current_list); + delete[] concentration_list; + delete[] current_list; free(bc); free(current_dest); #if NRNMPI @@ -1645,13 +1640,13 @@ void ICS_Grid_node::apply_node_flux3D(double dt, double* ydot) { } void ICS_Grid_node::do_grid_currents(double* output, double dt, int) { - MEM_ZERO(states_cur, sizeof(double) * _num_nodes); + memset(states_cur, 0, sizeof(double) * _num_nodes); if (ics_current_seg_ptrs != NULL) { - ssize_t i, j, n; + ssize_t i, j; int seg_start_index, seg_stop_index; int state_index; double seg_cur; - n = ics_num_segs; + auto const n = ics_concentration_seg_handles.size(); for (i = 0; i < n; i++) { seg_start_index = ics_surface_nodes_per_seg_start_indices[i]; seg_stop_index = ics_surface_nodes_per_seg_start_indices[i + 1]; @@ -1726,23 +1721,17 @@ void ICS_Grid_node::variable_step_hybrid_connections(const double* cvode_states_ } void ICS_Grid_node::scatter_grid_concentrations() { - ssize_t i, j, n; - double total_seg_concentration; - double average_seg_concentration; - int seg_start_index, seg_stop_index; - - n = ics_num_segs; - - for (i = 0; i < n; i++) { - total_seg_concentration = 0.0; - seg_start_index = ics_surface_nodes_per_seg_start_indices[i]; - seg_stop_index = ics_surface_nodes_per_seg_start_indices[i + 1]; - for (j = seg_start_index; j < seg_stop_index; j++) { + auto const n = ics_concentration_seg_handles.size(); + for (auto i = 0ul; i < n; ++i) { + double total_seg_concentration{}; + auto const seg_start_index = ics_surface_nodes_per_seg_start_indices[i]; + auto const seg_stop_index = ics_surface_nodes_per_seg_start_indices[i + 1]; + for (auto j = seg_start_index; j < seg_stop_index; j++) { total_seg_concentration += states[ics_surface_nodes_per_seg[j]]; } - average_seg_concentration = total_seg_concentration / (seg_stop_index - seg_start_index); - - *ics_concentration_seg_ptrs[i] = average_seg_concentration; + auto const average_seg_concentration = total_seg_concentration / + (seg_stop_index - seg_start_index); + *ics_concentration_seg_handles[i] = average_seg_concentration; } } @@ -1753,8 +1742,8 @@ ICS_Grid_node::~ICS_Grid_node() { free(states_y); free(states_z); free(states_cur); - free(concentration_list); - free(current_list); + delete[] concentration_list; + delete[] current_list; free(current_dest); #if NRNMPI if (nrnmpi_use) { diff --git a/src/nrnpython/grids.h b/src/nrnpython/grids.h index 38a95ad6fd..1f5e9f1042 100644 --- a/src/nrnpython/grids.h +++ b/src/nrnpython/grids.h @@ -51,16 +51,17 @@ typedef struct Flux_pair { int grid_index; // Location in grid } Flux; -typedef struct { - double* destination; /* memory loc to transfer concentration to */ - long source; /* index in grid for source */ -} Concentration_Pair; +struct Concentration_Pair { + neuron::container::data_handle destination; /* memory loc to transfer concentration to + */ + long source; /* index in grid for source */ +}; -typedef struct { - long destination; /* index in grid */ - double* source; /* memory loc of e.g. ica */ +struct Current_Triple { + long destination; /* index in grid */ + neuron::container::data_handle source; /* memory loc of e.g. ica */ double scale_factor; -} Current_Triple; +}; typedef void (*ReactionRate)(double**, double**, @@ -146,10 +147,9 @@ class Grid_node { int64_t* ics_surface_nodes_per_seg; int64_t* ics_surface_nodes_per_seg_start_indices; - double** ics_concentration_seg_ptrs; + std::vector> ics_concentration_seg_handles; double** ics_current_seg_ptrs; double* ics_scale_factors; - int ics_num_segs; int insert(int grid_list_index); int node_flux_count; @@ -158,7 +158,7 @@ class Grid_node { PyObject** node_flux_src; - virtual ~Grid_node(){}; + virtual ~Grid_node() {} virtual void set_diffusion(double*, int) = 0; virtual void set_num_threads(const int n) = 0; virtual void do_grid_currents(double*, double, int) = 0; diff --git a/src/nrnpython/inithoc.cpp b/src/nrnpython/inithoc.cpp index a138e66fea..184ccafc00 100644 --- a/src/nrnpython/inithoc.cpp +++ b/src/nrnpython/inithoc.cpp @@ -1,8 +1,8 @@ +#include "../../nrnconf.h" #include "nrnmpiuse.h" #include #include #include "nrnmpi.h" -#include "nrnpython_config.h" #if defined(__MINGW32__) #define _hypot hypot #endif @@ -13,13 +13,6 @@ #include #include -#if defined(NRNPYTHON_DYNAMICLOAD) && NRNPYTHON_DYNAMICLOAD > 0 -// when compiled with different Python.h, force correct value -#undef NRNPYTHON_DYNAMICLOAD -#define NRNPYTHON_DYNAMICLOAD PY_MAJOR_VERSION -#endif - - extern int nrn_is_python_extension; extern int nrn_nobanner_; extern int ivocmain(int, const char**, const char**); @@ -28,17 +21,12 @@ extern int nrn_main_launch; // int nrn_global_argc; extern char** nrn_global_argv; - -extern void nrnpy_augment_path(); extern void (*p_nrnpython_finalize)(); extern PyObject* nrnpy_hoc(); #if NRNMPI_DYNAMICLOAD extern void nrnmpi_stubs(); -extern std::string nrnmpi_load(int is_python); -#endif -#if NRNPYTHON_DYNAMICLOAD -extern int nrnpy_site_problem; +extern std::string nrnmpi_load(); #endif #if NRN_ENABLE_THREADS @@ -260,14 +248,14 @@ extern "C" PyObject* PyInit_hoc() { * In case of dynamic mpi build we load MPI unless NEURON_INIT_MPI is explicitly set to 0. * and there is no '-mpi' arg. * We call nrnmpi_load to load MPI library which returns: - * - nil if loading is successfull + * - nullptr if loading is successfull * - error message in case of loading error */ if (env_mpi != NULL && strcmp(env_mpi, "0") == 0 && !have_opt("-mpi")) { libnrnmpi_is_loaded = 0; } if (libnrnmpi_is_loaded) { - pmes = nrnmpi_load(1); + pmes = nrnmpi_load(); if (!pmes.empty() && env_mpi == NULL) { // common case on MAC distribution is no NEURON_INIT_MPI and // no MPI installed (so nrnmpi_load fails) @@ -312,7 +300,8 @@ extern "C" PyObject* PyInit_hoc() { } #endif // NRNMPI - std::string buf{NRNHOSTCPU "/.libs/libnrnmech.so"}; + std::string buf{neuron::config::system_processor}; + buf += "/.libs/libnrnmech.so"; // printf("buf = |%s|\n", buf); FILE* f; if ((f = fopen(buf.c_str(), "r")) != 0) { @@ -375,10 +364,6 @@ extern "C" PyObject* PyInit_hoc() { nrn_main_launch = 2; ivocmain(argc, (const char**) argv, (const char**) env); -// nrnpy_augment_path(); -#if NRNPYTHON_DYNAMICLOAD - nrnpy_site_problem = 0; -#endif // NRNPYTHON_DYNAMICLOAD return nrnpy_hoc(); } diff --git a/src/nrnpython/nrn_pyhocobject.h b/src/nrnpython/nrn_pyhocobject.h index 0c1db3eb68..11fb4d525e 100644 --- a/src/nrnpython/nrn_pyhocobject.h +++ b/src/nrnpython/nrn_pyhocobject.h @@ -1,4 +1,5 @@ #pragma once +#include "neuron/container/data_handle.hpp" #include "nrnpython.h" struct Object; @@ -11,7 +12,7 @@ struct PyHocObject { char* s_; char** pstr_; Object* ho_; - double* px_; + neuron::container::data_handle px_; PyHoc::IteratorState its_; } u; Symbol* sym_; // for functions and arrays diff --git a/src/nrnpython/nrnpy_hoc.cpp b/src/nrnpython/nrnpy_hoc.cpp index 6d847ce6db..19e8531a1b 100644 --- a/src/nrnpython/nrnpy_hoc.cpp +++ b/src/nrnpython/nrnpy_hoc.cpp @@ -1,9 +1,13 @@ #include "ivocvect.h" +#include "neuron/container/data_handle.hpp" #include "nrniv_mf.h" #include "nrn_pyhocobject.h" #include "nrnoc2iv.h" +#include "nrnpy.h" #include "nrnpy_utils.h" #include "nrnpython.h" +#include + #include "nrnwrap_dlfcn.h" #include "ocfile.h" #include "ocjump.h" @@ -17,29 +21,11 @@ #include #include -#if defined(NRNPYTHON_DYNAMICLOAD) && NRNPYTHON_DYNAMICLOAD > 0 -// when compiled with different Python.h, force correct value -#undef NRNPYTHON_DYNAMICLOAD -#define NRNPYTHON_DYNAMICLOAD PY_MAJOR_VERSION -#endif - extern PyTypeObject* psection_type; - -// copied from nrnpy_nrn -typedef struct { - PyObject_HEAD - Section* sec_; - char* name_; - PyObject* cell_; -} NPySecObj; - +extern std::vector py_exposed_classes; #include "parse.hpp" extern void (*nrnpy_sectionlist_helper_)(void*, Object*); -extern Object** (*nrnpy_gui_helper_)(const char*, Object*); -extern Object** (*nrnpy_gui_helper3_)(const char*, Object*, int); -extern char** (*nrnpy_gui_helper3_str_)(const char*, Object*, int); -extern double (*nrnpy_object_to_double_)(Object*); extern void* (*nrnpy_get_pyobj)(Object* obj); extern void (*nrnpy_restore_savestate)(int64_t, char*); extern void (*nrnpy_store_savestate)(char** save_data, uint64_t* save_data_size); @@ -75,13 +61,8 @@ extern PyObject* nrnpy_cas(PyObject*, PyObject*); extern PyObject* nrnpy_forall(PyObject*, PyObject*); extern PyObject* nrnpy_newsecobj(PyObject*, PyObject*, PyObject*); extern int section_object_seen; -extern Symbol* nrnpy_pyobj_sym_; extern Symbol* nrn_child_sym; extern int nrn_secref_nchild(Section*); -extern PyObject* nrnpy_hoc2pyobject(Object*); -PyObject* nrnpy_ho2po(Object*); -Object* nrnpy_po2ho(PyObject*); -extern Object* nrnpy_pyobject_in_obj(PyObject*); static void pyobject_in_objptr(Object**, PyObject*); extern IvocVect* (*nrnpy_vec_from_python_p_)(void*); extern Object** (*nrnpy_vec_to_python_p_)(void*); @@ -89,12 +70,10 @@ extern Object** (*nrnpy_vec_as_numpy_helper_)(int, double*); extern Object* (*nrnpy_rvp_rxd_to_callable)(Object*); extern "C" int nrnpy_set_vec_as_numpy(PyObject* (*p)(int, double*) ); // called by ctypes. extern "C" int nrnpy_set_gui_callback(PyObject*); -extern double** nrnpy_setpointer_helper(PyObject*, PyObject*); extern Symbol* ivoc_alias_lookup(const char* name, Object* ob); class NetCon; extern int nrn_netcon_weight(NetCon*, double**); extern int nrn_matrix_dim(void*, int); -extern NPySecObj* newpysechelp(Section* sec); extern PyObject* pmech_types; // Python map for name to Mechanism extern PyObject* rangevars_; // Python map for name to Symbol @@ -106,6 +85,10 @@ static cTemplate* hoc_vec_template_; static cTemplate* hoc_list_template_; static cTemplate* hoc_sectionlist_template_; +static std::unordered_map sym_to_type_map; +static std::unordered_map type_to_sym_map; +static std::vector exposed_py_type_names; + // typestr returned by Vector.__array_interface__ // byteorder (first element) is modified at import time // to reflect the system byteorder @@ -145,8 +128,31 @@ static PyObject* get_mech_object_ = NULL; static PyObject* nrnpy_rvp_pyobj_callback = NULL; PyTypeObject* hocobject_type; + static PyObject* hocobj_call(PyHocObject* self, PyObject* args, PyObject* kwrds); +bool nrn_chk_data_handle(const neuron::container::data_handle& pd) { + if (pd) { + return true; + } + PyErr_SetString(PyExc_ValueError, "Invalid data_handle"); + return false; +} + +/** @brief if hoc_evalpointer calls hoc_execerror, return 1 + **/ +static int hoc_evalpointer_err() { + try { + hoc_evalpointer(); + } catch (std::exception const& e) { + std::ostringstream oss; + oss << "subscript out of range (array size or number of dimensions changed?)"; + PyErr_SetString(PyExc_IndexError, oss.str().c_str()); + return 1; + } + return 0; +} + static PyObject* nrnexec(PyObject* self, PyObject* args) { const char* cmd; if (!PyArg_ParseTuple(args, "s", &cmd)) { @@ -194,8 +200,11 @@ static void hocobj_dealloc(PyHocObject* self) { static PyObject* hocobj_new(PyTypeObject* subtype, PyObject* args, PyObject* kwds) { PyObject* subself; + PyObject* base; + PyHocObject* hbase = nullptr; + subself = subtype->tp_alloc(subtype, 0); - // printf("hocobj_new %s %p\n", subtype->tp_name, subself); + // printf("hocobj_new %s %p %p\n", subtype->tp_name, subtype, subself); if (subself == NULL) { return NULL; } @@ -207,37 +216,47 @@ static PyObject* hocobj_new(PyTypeObject* subtype, PyObject* args, PyObject* kwd self->nindex_ = 0; self->type_ = PyHoc::HocTopLevelInterpreter; self->iteritem_ = 0; - if (kwds && PyDict_Check(kwds)) { - PyObject* base = PyDict_GetItemString(kwds, "hocbase"); - if (base) { - int ok = 0; - if (PyObject_TypeCheck(base, hocobject_type)) { - PyHocObject* hbase = (PyHocObject*) base; - if (hbase->type_ == PyHoc::HocFunction && hbase->sym_->type == TEMPLATE) { - // printf("hocobj_new base %s\n", hbase->sym_->name); - // remove the hocbase keyword since hocobj_call only allows - // the "sec" keyword argument - PyDict_DelItemString(kwds, "hocbase"); - PyObject* r = hocobj_call(hbase, args, kwds); - if (!r) { - Py_DECREF(subself); - return NULL; - } - PyHocObject* rh = (PyHocObject*) r; - self->type_ = rh->type_; - self->ho_ = rh->ho_; - hoc_obj_ref(self->ho_); - Py_DECREF(r); - ok = 1; - } - } - if (!ok) { - Py_DECREF(subself); - PyErr_SetString(PyExc_TypeError, "HOC base class not valid"); - return NULL; - } + + // if subtype is a subclass of some NEURON class, then one of its + // tp_mro's is in sym_to_type_map + for (Py_ssize_t i = 0; i < PyTuple_Size(subtype->tp_mro); i++) { + PyObject* item = PyTuple_GetItem(subtype->tp_mro, i); + auto symbol_result = type_to_sym_map.find((PyTypeObject*) item); + if (symbol_result != type_to_sym_map.end()) { + hbase = (PyHocObject*) hocobj_new(hocobject_type, 0, 0); + hbase->type_ = PyHoc::HocFunction; + hbase->sym_ = symbol_result->second; + break; + } + } + + if (kwds && PyDict_Check(kwds) && (base = PyDict_GetItemString(kwds, "hocbase"))) { + if (PyObject_TypeCheck(base, hocobject_type)) { + hbase = (PyHocObject*) base; + } else { + PyErr_SetString(PyExc_TypeError, "HOC base class not valid"); + Py_DECREF(subself); + return NULL; + } + PyDict_DelItemString(kwds, "hocbase"); + } + + if (hbase and hbase->type_ == PyHoc::HocFunction && hbase->sym_->type == TEMPLATE) { + // printf("hocobj_new base %s\n", hbase->sym_->name); + // remove the hocbase keyword since hocobj_call only allows + // the "sec" keyword argument + PyObject* r = hocobj_call(hbase, args, kwds); + if (!r) { + Py_DECREF(subself); + return NULL; } + PyHocObject* rh = (PyHocObject*) r; + self->type_ = rh->type_; + self->ho_ = rh->ho_; + hoc_obj_ref(self->ho_); + Py_DECREF(r); } + return subself; } @@ -268,63 +287,58 @@ static void pyobject_in_objptr(Object** op, PyObject* po) { } static PyObject* hocobj_name(PyObject* pself, PyObject* args) { - PyHocObject* self = (PyHocObject*) pself; - char buf[512], *cp; - buf[0] = '\0'; - cp = buf; - auto cp_size = sizeof(buf); - PyObject* po; + auto* const self = reinterpret_cast(pself); + std::string cp; if (self->type_ == PyHoc::HocObject) { - std::snprintf(cp, cp_size, "%s", hoc_object_name(self->ho_)); + cp = hoc_object_name(self->ho_); } else if (self->type_ == PyHoc::HocFunction || self->type_ == PyHoc::HocArray) { - std::snprintf(cp, - cp_size, - "%s%s%s", - self->ho_ ? hoc_object_name(self->ho_) : "", - self->ho_ ? "." : "", - self->sym_->name); + if (self->ho_) { + cp.append(hoc_object_name(self->ho_)); + cp.append(1, '.'); + } + cp.append(self->sym_->name); if (self->type_ == PyHoc::HocArray) { for (int i = 0; i < self->nindex_; ++i) { - auto tmp = strlen(cp); - cp += tmp; - cp_size -= tmp; - std::snprintf(cp, cp_size, "[%d]", self->indices_[i]); - } - auto tmp = strlen(cp); - cp += tmp; - cp_size -= tmp; - std::snprintf(cp, cp_size, "[?]"); + cp.append(1, '['); + cp.append(std::to_string(self->indices_[i])); + cp.append(1, ']'); + } + cp.append("[?]"); } else { - auto tmp = strlen(cp); - cp += tmp; - cp_size -= tmp; - std::snprintf(cp, cp_size, "()"); + cp.append("()"); } } else if (self->type_ == PyHoc::HocRefNum) { - std::snprintf(cp, cp_size, "", self->u.x_); + cp.append("u.x_)); + cp.append(1, '>'); } else if (self->type_ == PyHoc::HocRefStr) { - std::snprintf(cp, cp_size, "", self->u.s_); + cp.append("u.s_); + cp.append("\">"); } else if (self->type_ == PyHoc::HocRefPStr) { - std::snprintf(cp, cp_size, "", *self->u.pstr_); + cp.append("u.pstr_); + cp.append("\">"); } else if (self->type_ == PyHoc::HocRefObj) { - std::snprintf(cp, cp_size, "", hoc_object_name(self->u.ho_)); + cp.append("u.ho_)); + cp.append("\">"); } else if (self->type_ == PyHoc::HocForallSectionIterator) { - std::snprintf(cp, cp_size, ""); + cp.append(""); } else if (self->type_ == PyHoc::HocSectionListIterator) { - std::snprintf(cp, cp_size, ""); + cp.append(""); } else if (self->type_ == PyHoc::HocScalarPtr) { - if (self->u.px_) { - std::snprintf(cp, cp_size, "", *self->u.px_); - } else { - std::snprintf(cp, cp_size, ""); - } + std::ostringstream oss; + oss << self->u.px_; + cp = std::move(oss).str(); } else if (self->type_ == PyHoc::HocArrayIncomplete) { - std::snprintf(cp, cp_size, "", self->sym_->name); + cp.append("sym_->name); + cp.append(1, '>'); } else { - std::snprintf(cp, cp_size, ""); + cp.append(""); } - po = Py_BuildValue("s", buf); - return po; + return Py_BuildValue("s", cp.c_str()); } static PyObject* hocobj_repr(PyObject* p) { @@ -337,7 +351,8 @@ static Inst* save_pc(Inst* newpc) { return savpc; } -static int hocobj_pushargs(PyObject* args, std::vector& s2free) { +// also called from nrnpy_nrn.cpp +int hocobj_pushargs(PyObject* args, std::vector& s2free) { int i, narg = PyTuple_Size(args); for (i = 0; i < narg; ++i) { PyObject* po = PyTuple_GetItem(args, i); @@ -384,7 +399,10 @@ static int hocobj_pushargs(PyObject* args, std::vector& s2free) { } else if (tp == PyHoc::HocRefObj) { hoc_pushobj(&pho->u.ho_); } else if (tp == PyHoc::HocScalarPtr) { - hoc_pushpx(pho->u.px_); + if (!pho->u.px_) { + hoc_execerr_ext("Invalid pointer (arg %d)", i); + } + hoc_push(pho->u.px_); } else if (tp == PyHoc::HocRefPStr) { hoc_pushstr(pho->u.pstr_); } else { @@ -406,7 +424,7 @@ static int hocobj_pushargs(PyObject* args, std::vector& s2free) { return narg; } -static void hocobj_pushargs_free_strings(std::vector& s2free) { +void hocobj_pushargs_free_strings(std::vector& s2free) { std::vector::iterator it = s2free.begin(); for (; it != s2free.end(); ++it) { if (*it) { @@ -518,11 +536,17 @@ PyObject* nrnpy_ho2po(Object* o) { po = hocobj_new(hocobject_type, 0, 0); ((PyHocObject*) po)->ho_ = o; ((PyHocObject*) po)->type_ = PyHoc::HocObject; + auto location = sym_to_type_map.find(o->ctemplate->sym); + if (location != sym_to_type_map.end()) { + Py_INCREF(location->second); + po->ob_type = location->second; + } hoc_obj_ref(o); } return po; } +// not static because it's used in nrnpy_nrn.cpp Object* nrnpy_po2ho(PyObject* po) { // po may be None, or encapsulate a hoc object (via the // PyHocObject, or be a native Python instance such as [1,2,3] @@ -550,7 +574,7 @@ Object* nrnpy_po2ho(PyObject* po) { return o; } -PyObject* nrnpy_hoc_pop() { +PyObject* nrnpy_hoc_pop(const char* mes) { PyObject* result = 0; Object* ho; Object** d; @@ -559,13 +583,13 @@ PyObject* nrnpy_hoc_pop() { result = Py_BuildValue("s", *hoc_strpop()); break; case VAR: { - double* px = hoc_pxpop(); - if (px) { + // remove mes arg when test coverage development completed + // printf("VAR nrnpy_hoc_pop %s\n", mes); + auto const px = hoc_pop_handle(); + if (nrn_chk_data_handle(px)) { // unfortunately, this is nonsense if NMODL POINTER is pointing // to something other than a double. result = Py_BuildValue("d", *px); - } else { - PyErr_SetString(PyExc_AttributeError, "POINTER is NULL"); } } break; case NUMBER: @@ -597,10 +621,8 @@ static int set_final_from_stk(PyObject* po) { } break; case VAR: { - double x; - double* px; - if (PyArg_Parse(po, "d", &x) == 1) { - px = hoc_pxpop(); + if (double x; PyArg_Parse(po, "d", &x) == 1) { + auto px = hoc_pop_handle(); if (px) { // This is a future crash if NMODL POINTER is pointing // to something other than a double. @@ -668,7 +690,8 @@ static void* fcall(void* vself, void* vargs) { case 1: return nrnpy_hoc_int_pop(); default: - return (void*) nrnpy_hoc_pop(); + // No callable hoc function returns a data handle. + return nrnpy_hoc_pop("self->ho_ fcall"); } } if (self->sym_->type == BLTIN) { @@ -682,11 +705,21 @@ static void* fcall(void* vself, void* vargs) { PyHocObject* result = (PyHocObject*) hocobj_new(hocobject_type, 0, 0); result->ho_ = ho; result->type_ = PyHoc::HocObject; + // Note: I think the only reason we're not using ho2po here is because we don't have to + // hocref ho since it was created by hoc_newobj1... but it would be better if we did + // so we could avoid repetitive code + auto location = sym_to_type_map.find(ho->ctemplate->sym); + if (location != sym_to_type_map.end()) { + Py_INCREF(location->second); + ((PyObject*) result)->ob_type = location->second; + } + hocobj_pushargs_free_strings(strings_to_free); return result; } else { - HocTopContextSet Inst fc[4]; - // ugh. so a potential call of hoc_get_last_pointer_symbol will return nil. + HocTopContextSet + Inst fc[4]; + // ugh. so a potential call of hoc_get_last_pointer_symbol will return nullptr. fc[0].in = STOP; fc[1].sym = self->sym_; fc[2].i = narg; @@ -694,11 +727,11 @@ static void* fcall(void* vself, void* vargs) { Inst* pcsav = save_pc(fc + 1); hoc_call(); hoc_pc = pcsav; - HocContextRestore + HocContextRestore; } hocobj_pushargs_free_strings(strings_to_free); - return (void*) nrnpy_hoc_pop(); + return nrnpy_hoc_pop("laststatement fcall"); } static PyObject* curargs_; @@ -879,18 +912,22 @@ static void eval_component(PyHocObject* po, int ix) { --po->nindex_; } -extern "C" PyObject* nrn_hocobj_ptr(double* pd) { +PyObject* nrn_hocobj_handle(neuron::container::data_handle d) { PyObject* result = hocobj_new(hocobject_type, 0, 0); - PyHocObject* po = (PyHocObject*) result; + auto* const po = reinterpret_cast(result); po->type_ = PyHoc::HocScalarPtr; - po->u.px_ = pd; + po->u.px_ = d; return result; } -int nrn_is_hocobj_ptr(PyObject* po, double*& pd) { +extern "C" PyObject* nrn_hocobj_ptr(double* pd) { + return nrn_hocobj_handle(neuron::container::data_handle{pd}); +} + +int nrn_is_hocobj_ptr(PyObject* po, neuron::container::data_handle& pd) { int ret = 0; if (PyObject_TypeCheck(po, hocobject_type)) { - PyHocObject* hpo = (PyHocObject*) po; + auto* const hpo = reinterpret_cast(po); if (hpo->type_ == PyHoc::HocScalarPtr) { pd = hpo->u.px_; ret = 1; @@ -970,6 +1007,13 @@ static PyObject* hocobj_getattr(PyObject* subself, PyObject* pyname) { } Symbol* sym = getsym(n, self->ho_, 0); + // Return well known types right away + auto location = sym_to_type_map.find(sym); + if (location != sym_to_type_map.end()) { + Py_INCREF(location->second); + return (PyObject*) location->second; + } + if (!sym) { if (self->type_ == PyHoc::HocObject && self->ho_->ctemplate->sym == nrnpy_pyobj_sym_) { PyObject* p = nrnpy_hoc2pyobject(self->ho_); @@ -1141,7 +1185,7 @@ static PyObject* hocobj_getattr(PyObject* subself, PyObject* pyname) { // an array int t = sym->type; if (t == VAR || t == STRING || t == OBJECTVAR || t == RANGEVAR || t == SECTION || - t == SECTIONREF || t == VARALIAS || t == OBJECTALIAS) { + t == SECTIONREF || t == VARALIAS || t == OBJECTALIAS || t == RANGEOBJ) { if (sym != nrn_child_sym && !ISARRAY(sym)) { hoc_push_object(po->ho_); nrn_inpython_ = 1; @@ -1161,9 +1205,10 @@ static PyObject* hocobj_getattr(PyObject* subself, PyObject* pyname) { return result; } else { if (isptr) { - return nrn_hocobj_ptr(hoc_pxpop()); + auto handle = hoc_pop_handle(); + return nrn_hocobj_handle(std::move(handle)); } else { - return nrnpy_hoc_pop(); + return nrnpy_hoc_pop("use-the-component-fork hocobj_getattr"); } } } else { @@ -1180,7 +1225,8 @@ static PyObject* hocobj_getattr(PyObject* subself, PyObject* pyname) { } } // top level interpreter fork - HocTopContextSet switch (sym->type) { + HocTopContextSet + switch (sym->type) { case VAR: // double* if (!ISARRAY(sym)) { if (sym->subtype == USERINT) { @@ -1289,7 +1335,8 @@ static PyObject* hocobj_getattr(PyObject* subself, PyObject* pyname) { } } } - HocContextRestore return result; + HocContextRestore + return result; } static PyObject* hocobj_baseattr(PyObject* subself, PyObject* args) { @@ -1358,7 +1405,6 @@ static int hocobj_setattro(PyObject* subself, PyObject* pyname, PyObject* value) PyObject* p = nrnpy_hoc2pyobject(self->ho_); return PyObject_GenericSetAttr(p, pyname, value); } else if (strncmp(n, "_ref_", 5) == 0) { - extern int nrn_pointer_assign(Prop*, Symbol*, PyObject*); Symbol* rvsym = getsym(n + 5, self->ho_, 0); if (rvsym && rvsym->type == RANGEVAR) { Prop* prop = ob2pntproc_0(self->ho_)->prop; @@ -1413,7 +1459,8 @@ static int hocobj_setattro(PyObject* subself, PyObject* pyname, PyObject* value) return -1; } } - HocTopContextSet switch (sym->type) { + HocTopContextSet + switch (sym->type) { case VAR: // double* if (ISARRAY(sym)) { PyErr_SetString(PyExc_TypeError, "Wrong number of subscripts"); @@ -1445,16 +1492,11 @@ static int hocobj_setattro(PyObject* subself, PyObject* pyname, PyObject* value) } } else { hoc_pushs(sym); - hoc_evalpointer(); - err = PyArg_Parse(value, "d", hoc_pxpop()) == 0; - if (!err && sym->subtype == DYNAMICUNITS) { - char mes[100]; - Sprintf(mes, - "Assignment to %s value of physical constant %s", - _nrnunit_use_legacy_ ? "legacy" : "modern", - sym->name); - err = PyErr_WarnEx(PyExc_Warning, mes, 1); + if (hoc_evalpointer_err()) { // not possible to raise error. + HocContextRestore; + return -1; } + err = PyArg_Parse(value, "d", hoc_pxpop()) == 0; } } break; @@ -1508,7 +1550,8 @@ static int hocobj_setattro(PyObject* subself, PyObject* pyname, PyObject* value) err = -1; break; } - HocContextRestore return err; + HocContextRestore + return err; } static Symbol* sym_vec_x; @@ -1800,7 +1843,16 @@ static PyObject* hocobj_getitem(PyObject* self, Py_ssize_t ix) { return NULL; } if (po->type_ == PyHoc::HocScalarPtr) { - result = Py_BuildValue("d", po->u.px_[ix]); + try { + auto const h = po->u.px_.next_array_element(ix); + if (nrn_chk_data_handle(h)) { + result = Py_BuildValue("d", *h); + } + } catch (std::exception const& e) { + // next_array_element throws if ix is invalid + PyErr_SetString(PyExc_IndexError, e.what()); + return nullptr; + } } else if (po->type_ == PyHoc::HocRefNum) { result = Py_BuildValue("d", po->u.x_); } else if (po->type_ == PyHoc::HocRefStr) { @@ -1815,6 +1867,9 @@ static PyObject* hocobj_getitem(PyObject* self, Py_ssize_t ix) { if (po->type_ == PyHoc::HocObject) { // might be in an iterator context if (po->ho_->ctemplate == hoc_vec_template_) { Vect* hv = (Vect*) po->ho_->u.this_pointer; + if (ix < 0) { + ix += vector_capacity(hv); + } if (ix < 0 || ix >= vector_capacity(hv)) { char e[200]; Sprintf(e, "%s", hoc_object_name(po->ho_)); @@ -1825,6 +1880,9 @@ static PyObject* hocobj_getitem(PyObject* self, Py_ssize_t ix) { } } else if (po->ho_->ctemplate == hoc_list_template_) { OcList* hl = (OcList*) po->ho_->u.this_pointer; + if (ix < 0) { + ix += hl->count(); + } if (ix < 0 || ix >= hl->count()) { char e[200]; Sprintf(e, "%s", hoc_object_name(po->ho_)); @@ -1882,14 +1940,19 @@ static PyObject* hocobj_getitem(PyObject* self, Py_ssize_t ix) { if (po->type_ == PyHoc::HocArrayIncomplete) { result = nrn_hocobj_ptr(hoc_pxpop()); } else { - result = nrnpy_hoc_pop(); + result = nrnpy_hoc_pop("po->ho_ hocobj_getitem"); } } } else { // must be a top level intermediate - HocTopContextSet switch (po->sym_->type) { + HocTopContextSet + switch (po->sym_->type) { case VAR: hocobj_pushtop(po, po->sym_, ix); - hoc_evalpointer(); + if (hoc_evalpointer_err()) { + --po->nindex_; + HocContextRestore; + return NULL; + } --po->nindex_; if (po->type_ == PyHoc::HocArrayIncomplete) { assert(!po->u.px_); @@ -1912,14 +1975,42 @@ static PyObject* hocobj_getitem(PyObject* self, Py_ssize_t ix) { --po->nindex_; break; } - HocContextRestore + HocContextRestore; } } return result; } +static PyObject* hocobj_slice_getitem(PyObject* self, PyObject* slice) { + // Non slice indexing still uses original function + if (!PySlice_Check(slice)) { + return hocobj_getitem(self, PyLong_AsLong(slice)); + } + auto* po = (PyHocObject*) self; + if (!po->ho_) { + PyErr_SetString(PyExc_TypeError, "Obj is NULL"); + return nullptr; + } + if (po->type_ != PyHoc::HocObject || po->ho_->ctemplate != hoc_vec_template_) { + PyErr_SetString(PyExc_TypeError, "sequence index must be integer, not 'slice'"); + return nullptr; + } + auto* v = (Vect*) po->ho_->u.this_pointer; + Py_ssize_t start = 0; + Py_ssize_t end = 0; + Py_ssize_t step = 0; + Py_ssize_t slicelen = 0; + Py_ssize_t len = vector_capacity(v); + PySlice_GetIndicesEx(slice, len, &start, &end, &step, &slicelen); + if (step == 0) { + PyErr_SetString(PyExc_ValueError, "slice step cannot be zero"); + return nullptr; + } + Object** obj = new_vect(v, slicelen, start, step); + return nrnpy_ho2po(*obj); +} + static int hocobj_setitem(PyObject* self, Py_ssize_t i, PyObject* arg) { - // printf("hocobj_setitem %d\n", i); int err = -1; PyHocObject* po = (PyHocObject*) self; if (po->type_ > PyHoc::HocArray) { @@ -1932,11 +2023,18 @@ static int hocobj_setitem(PyObject* self, Py_ssize_t i, PyObject* arg) { return -1; } if (po->type_ == PyHoc::HocScalarPtr) { - if (i != 0) { - PyErr_SetString(PyExc_IndexError, "index of pointer to hoc scalar must be 0"); + try { + auto const h = po->u.px_.next_array_element(i); + if (nrn_chk_data_handle(h)) { + PyArg_Parse(arg, "d", static_cast(h)); + } else { + return -1; + } + } catch (std::exception const& e) { + // next_array_element throws if ix is invalid + PyErr_SetString(PyExc_IndexError, e.what()); return -1; } - PyArg_Parse(arg, "d", static_cast(po->u.px_)); } else if (po->type_ == PyHoc::HocRefNum) { PyArg_Parse(arg, "d", &po->u.x_); } else if (po->type_ == PyHoc::HocRefStr) { @@ -1994,10 +2092,15 @@ static int hocobj_setitem(PyObject* self, Py_ssize_t i, PyObject* arg) { err = set_final_from_stk(arg); } } else { // must be a top level intermediate - HocTopContextSet switch (po->sym_->type) { + HocTopContextSet + switch (po->sym_->type) { case VAR: hocobj_pushtop(po, po->sym_, i); - hoc_evalpointer(); + if (hoc_evalpointer_err()) { + HocContextRestore; + --po->nindex_; + return -1; + } --po->nindex_; err = PyArg_Parse(arg, "d", hoc_pxpop()) != 1; break; @@ -2025,11 +2128,59 @@ static int hocobj_setitem(PyObject* self, Py_ssize_t i, PyObject* arg) { PyErr_SetString(PyExc_TypeError, "not assignable"); break; } - HocContextRestore + HocContextRestore; } return err; } +static int hocobj_slice_setitem(PyObject* self, PyObject* slice, PyObject* arg) { + // Non slice indexing still uses original function + if (!PySlice_Check(slice)) { + return hocobj_setitem(self, PyLong_AsLong(slice), arg); + } + auto* po = (PyHocObject*) self; + if (!po->ho_) { + PyErr_SetString(PyExc_TypeError, "Obj is NULL"); + return -1; + } + if (po->type_ != PyHoc::HocObject || po->ho_->ctemplate != hoc_vec_template_) { + PyErr_SetString(PyExc_TypeError, "sequence index must be integer, not 'slice'"); + return -1; + } + auto v = (Vect*) po->ho_->u.this_pointer; + Py_ssize_t start = 0; + Py_ssize_t end = 0; + Py_ssize_t step = 0; + Py_ssize_t slicelen = 0; + Py_ssize_t cap = vector_capacity(v); + PySlice_GetIndicesEx(slice, cap, &start, &end, &step, &slicelen); + // Slice index assignment requires a list of the same size as the slice + PyObject* iter = PyObject_GetIter(arg); + if (!iter) { + PyErr_SetString(PyExc_TypeError, "can only assign an iterable"); + return -1; + } + PyObject* val = nullptr; + for (Py_ssize_t i = 0; i < slicelen; ++i) { + val = PyIter_Next(iter); + if (!val) { + Py_DECREF(iter); + PyErr_SetString(PyExc_IndexError, "iterable object must have the same length as slice"); + return -1; + } + PyArg_Parse(val, "d", vector_vec(v) + (i * step + start)); + Py_DECREF(val); + } + val = PyIter_Next(iter); + Py_DECREF(iter); + if (val) { + Py_DECREF(val); + PyErr_SetString(PyExc_IndexError, "iterable object must have the same length as slice"); + return -1; + } + return 0; +} + static PyObject* mkref(PyObject* self, PyObject* args) { PyObject* pa; PyHocObject* result; @@ -2090,6 +2241,7 @@ static PyObject* setpointer(PyObject* self, PyObject* args) { if (href->type_ != PyHoc::HocScalarPtr) { goto done; } + neuron::container::generic_data_handle* gh{}; if (PyObject_TypeCheck(pp, hocobject_type)) { PyHocObject* hpp = (PyHocObject*) pp; if (hpp->type_ != PyHoc::HocObject) { @@ -2110,14 +2262,14 @@ static PyObject* setpointer(PyObject* self, PyObject* args) { PyErr_SetString(PyExc_TypeError, "Point_process not located in a section"); return NULL; } - ppd = &(prop->dparam[sym->u.rng.index].literal_value()); + gh = &(prop->dparam[sym->u.rng.index]); } else { - ppd = nrnpy_setpointer_helper(name, pp); - if (!ppd) { + gh = nrnpy_setpointer_helper(name, pp); + if (!gh) { goto done; } } - *ppd = href->u.px_; + *gh = neuron::container::generic_data_handle{href->u.px_}; result = Py_None; Py_INCREF(result); } @@ -2481,7 +2633,7 @@ static PyObject* gui_helper_3_helper_(const char* name, Object* obj, int handle_ } else if (hoc_is_pdouble_arg(iiarg)) { PyHocObject* ptr_nrn = (PyHocObject*) hocobj_new(hocobject_type, 0, 0); ptr_nrn->type_ = PyHoc::HocScalarPtr; - ptr_nrn->u.px_ = hoc_pgetarg(iiarg); + ptr_nrn->u.px_ = hoc_hgetarg(iiarg); PyObject* py_ptr = (PyObject*) ptr_nrn; Py_INCREF(py_ptr); PyTuple_SetItem(args, iarg + 3, py_ptr); @@ -2965,7 +3117,6 @@ static PyObject* py_hocobj_mul(PyObject* obj1, PyObject* obj2) { static PyObject* py_hocobj_div(PyObject* obj1, PyObject* obj2) { return py_hocobj_math("div", obj1, obj2); } -static PyMemberDef hocobj_members[] = {{NULL, 0, 0, 0, NULL}}; #include "nrnpy_hoc.h" @@ -3117,21 +3268,31 @@ static char* nrncore_arg(double tstop) { return NULL; } + +static PyType_Spec obj_spec_from_name(const char* name) { + return { + name, + sizeof(PyHocObject), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + nrnpy_HocObjectType_slots, + }; +} + PyObject* nrnpy_hoc() { PyObject* m; + PyObject* bases; + PyTypeObject* pto; + PyType_Spec spec; nrnpy_vec_from_python_p_ = nrnpy_vec_from_python; nrnpy_vec_to_python_p_ = nrnpy_vec_to_python; nrnpy_vec_as_numpy_helper_ = vec_as_numpy_helper; nrnpy_sectionlist_helper_ = sectionlist_helper_; - nrnpy_gui_helper_ = gui_helper_; - nrnpy_gui_helper3_ = gui_helper_3_; - nrnpy_gui_helper3_str_ = gui_helper_3_str_; nrnpy_get_pyobj = nrnpy_get_pyobj_; nrnpy_decref = nrnpy_decref_; nrnpy_nrncore_arg_p_ = nrncore_arg; nrnpy_nrncore_enable_value_p_ = nrncore_enable_value; nrnpy_nrncore_file_mode_value_p_ = nrncore_file_mode_value; - nrnpy_object_to_double_ = object_to_double_; nrnpy_rvp_rxd_to_callable = rvp_rxd_to_callable_; PyLockGIL lock; @@ -3145,13 +3306,27 @@ PyObject* nrnpy_hoc() { m = PyModule_Create(&hocmodule); assert(m); Symbol* s = NULL; - hocobject_type = (PyTypeObject*) PyType_FromSpec(&nrnpy_HocObjectType_spec); + spec = obj_spec_from_name("hoc.HocObject"); + hocobject_type = (PyTypeObject*) PyType_FromSpec(&spec); if (PyType_Ready(hocobject_type) < 0) goto fail; - Py_INCREF(hocobject_type); - // printf("AddObject HocObject\n"); PyModule_AddObject(m, "HocObject", (PyObject*) hocobject_type); + bases = PyTuple_Pack(1, hocobject_type); + Py_INCREF(bases); + for (auto name: py_exposed_classes) { + // TODO: obj_spec_from_name needs a hoc. prepended + exposed_py_type_names.push_back(std::string("hoc.") + name); + spec = obj_spec_from_name(exposed_py_type_names.back().c_str()); + pto = (PyTypeObject*) PyType_FromSpecWithBases(&spec, bases); + sym_to_type_map[hoc_lookup(name)] = pto; + type_to_sym_map[pto] = hoc_lookup(name); + if (PyType_Ready(pto) < 0) + goto fail; + PyModule_AddObject(m, name, (PyObject*) pto); + } + Py_DECREF(bases); + topmethdict = PyDict_New(); for (PyMethodDef* meth = toplevel_methods; meth->ml_name != NULL; meth++) { PyObject* descr; @@ -3200,3 +3375,10 @@ PyObject* nrnpy_hoc() { fail: return NULL; } + +void nrnpython_reg_real_nrnpy_hoc_cpp(neuron::python::impl_ptrs* ptrs) { + ptrs->gui_helper = gui_helper_; + ptrs->gui_helper3 = gui_helper_3_; + ptrs->gui_helper3_str = gui_helper_3_str_; + ptrs->object_to_double = object_to_double_; +} diff --git a/src/nrnpython/nrnpy_hoc.h b/src/nrnpython/nrnpy_hoc.h index 547bd3cf78..47a1de6d1f 100644 --- a/src/nrnpython/nrnpy_hoc.h +++ b/src/nrnpython/nrnpy_hoc.h @@ -16,7 +16,9 @@ static PyType_Slot nrnpy_HocObjectType_slots[] = { {Py_tp_doc, (void*) hocobj_docstring}, {Py_nb_bool, (void*) hocobj_nonzero}, {Py_sq_length, (void*) hocobj_len}, + {Py_mp_subscript, (void*) hocobj_slice_getitem}, {Py_sq_item, (void*) hocobj_getitem}, + {Py_mp_ass_subscript, (void*) hocobj_slice_setitem}, {Py_sq_ass_item, (void*) hocobj_setitem}, {Py_nb_add, (PyObject*) py_hocobj_add}, {Py_nb_subtract, (PyObject*) py_hocobj_sub}, @@ -27,13 +29,6 @@ static PyType_Slot nrnpy_HocObjectType_slots[] = { {Py_nb_true_divide, (PyObject*) py_hocobj_div}, {0, 0}, }; -static PyType_Spec nrnpy_HocObjectType_spec = { - "hoc.HocObject", - sizeof(PyHocObject), - 0, - Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, - nrnpy_HocObjectType_slots, -}; static struct PyModuleDef hocmodule = {PyModuleDef_HEAD_INIT, diff --git a/src/nrnpython/nrnpy_nrn.cpp b/src/nrnpython/nrnpy_nrn.cpp index 3150cbc9da..c875edc10e 100644 --- a/src/nrnpython/nrnpy_nrn.cpp +++ b/src/nrnpython/nrnpy_nrn.cpp @@ -4,6 +4,7 @@ #include #include "nrniv_mf.h" #include +#include "nrnpy.h" #include "nrnpy_utils.h" #ifndef M_PI #define M_PI (3.14159265358979323846) @@ -26,16 +27,11 @@ extern void nrn_area_ri(Section* sec); extern void sec_free(hoc_Item*); extern Symlist* hoc_built_in_symlist; extern Section* nrn_noerr_access(); -double* nrnpy_rangepointer(Section*, Symbol*, double, int*, int); extern PyObject* nrn_ptr_richcmp(void* self_ptr, void* other_ptr, int op); extern int has_membrane(char*, Section*); -typedef struct { - PyObject_HEAD - Section* sec_; - char* name_; - PyObject* cell_weakref_; -} NPySecObj; -NPySecObj* newpysechelp(Section* sec); +// used to be static in nrnpy_hoc.cpp +extern int hocobj_pushargs(PyObject*, std::vector&); +extern void hocobj_pushargs_free_strings(std::vector&); typedef struct { PyObject_HEAD @@ -59,13 +55,22 @@ typedef struct { PyObject_HEAD NPySegObj* pyseg_; Prop* prop_; + // Following cannot be initialized when NPyMechObj allocated by Python. See new_pymechobj + // wrapper. + neuron::container::non_owning_identifier_without_container prop_id_; + int type_; +} NPyMechObj; + +typedef struct { + PyObject_HEAD + NPyMechObj* pymech_; } NPyMechOfSegIter; typedef struct { PyObject_HEAD - NPySegObj* pyseg_; - Prop* prop_; -} NPyMechObj; + NPyMechObj* pymech_; + NPyDirectMechFunc* f_; +} NPyMechFunc; typedef struct { PyObject_HEAD @@ -94,6 +99,7 @@ static PyTypeObject* pseg_of_sec_iter_type; static PyTypeObject* psegment_type; static PyTypeObject* pmech_of_seg_iter_generic_type; static PyTypeObject* pmech_generic_type; +static PyTypeObject* pmechfunc_generic_type; static PyTypeObject* pvar_of_mech_iter_generic_type; static PyTypeObject* range_type; @@ -113,15 +119,7 @@ extern void nrn_diam_change(Section*); extern void nrn_length_change(Section*, double); extern void mech_insert1(Section*, int); extern void mech_uninsert1(Section*, Symbol*); -extern "C" PyObject* nrn_hocobj_ptr(double*); -extern int nrn_is_hocobj_ptr(PyObject*, double*&); extern PyObject* nrnpy_forall(PyObject* self, PyObject* args); -extern Object* nrnpy_po2ho(PyObject*); -extern Object* nrnpy_pyobject_in_obj(PyObject*); -extern Symbol* nrnpy_pyobj_sym_; -extern int nrnpy_ho_eq_po(Object*, PyObject*); -extern PyObject* nrnpy_hoc2pyobject(Object*); -extern PyObject* nrnpy_ho2po(Object*); static void nrnpy_reg_mech(int); extern void (*nrnpy_reg_mech_p_)(int); static int ob_is_seg(Object*); @@ -147,6 +145,10 @@ void nrnpy_sec_referr() { PyErr_SetString(PyExc_ReferenceError, "can't access a deleted section"); } +void nrnpy_prop_referr() { + PyErr_SetString(PyExc_ReferenceError, "mechanism instance is invalid"); +} + static char* pysec_name(Section* sec) { static char buf[512]; if (sec->prop) { @@ -257,12 +259,46 @@ static void NPyRangeVar_dealloc(NPyRangeVar* self) { static void NPyMechObj_dealloc(NPyMechObj* self) { // printf("NPyMechObj_dealloc %p %s\n", self, self->ob_type->tp_name); Py_XDECREF(self->pyseg_); + // Must manually call destructor since it was manually constructed in new_pymechobj wrapper + self->prop_id_.~non_owning_identifier_without_container(); + ((PyObject*) self)->ob_type->tp_free((PyObject*) self); +} + +static NPyMechObj* new_pymechobj() { + NPyMechObj* m = PyObject_New(NPyMechObj, pmech_generic_type); + if (m) { + // Use "placement new" idiom since Python C allocation cannot call the initializer to start + // it as "null". So later `a = b` might segfault because copy constructor decrements the + // refcount of `a`s nonsense memory. + new (&m->prop_id_) neuron::container::non_owning_identifier_without_container; + } + + return m; +} + +// Only call if p is valid +static NPyMechObj* new_pymechobj(NPySegObj* pyseg, Prop* p) { + NPyMechObj* m = new_pymechobj(); + if (!m) { + return NULL; + } + m->pyseg_ = pyseg; + Py_INCREF(m->pyseg_); + m->prop_ = p; + m->prop_id_ = p->id(); + m->type_ = p->_type; + return m; +} + +static void NPyMechFunc_dealloc(NPyMechFunc* self) { + // printf("NPyMechFunc_dealloc %p %s\n", self, self->ob_type->tp_name); + Py_XDECREF(self->pymech_); ((PyObject*) self)->ob_type->tp_free((PyObject*) self); } static void NPyMechOfSegIter_dealloc(NPyMechOfSegIter* self) { // printf("NPyMechOfSegIter_dealloc %p %s\n", self, self->ob_type->tp_name); - Py_XDECREF(self->pyseg_); + Py_XDECREF(self->pymech_); ((PyObject*) self)->ob_type->tp_free((PyObject*) self); } @@ -416,6 +452,7 @@ static PyObject* NPyMechObj_new(PyTypeObject* type, PyObject* args, PyObject* kw // printf("NPyMechObj_new %p %s\n", self, // ((PyObject*)self)->ob_type->tp_name); if (self != NULL) { + new (self) NPyMechObj; self->pyseg_ = pyseg; Py_INCREF(self->pyseg_); } @@ -1081,24 +1118,64 @@ static PyObject* pysec_same(NPySecObj* self, PyObject* args) { } static PyObject* NPyMechObj_name(NPyMechObj* self) { - CHECK_SEC_INVALID(self->pyseg_->pysec_->sec_); - PyObject* result = NULL; - if (self->prop_) { - result = PyString_FromString(memb_func[self->prop_->_type].sym->name); + std::string s = memb_func[self->type_].sym->name; + if (!self->prop_id_) { + Section* sec = self->pyseg_->pysec_->sec_; + if (!sec || !sec->prop) { + s = ""; // legacy message + } else { + s = ""; + } } + PyObject* result = PyString_FromString(s.c_str()); + return result; +} + +static PyObject* NPyMechFunc_name(NPyMechFunc* self) { + PyObject* result = NULL; + std::string s = memb_func[self->pymech_->type_].sym->name; + s += "."; + s += self->f_->name; + result = PyString_FromString(s.c_str()); + return result; +} + +static PyObject* NPyMechFunc_call(NPyMechFunc* self, PyObject* args) { + CHECK_PROP_INVALID(self->pymech_->prop_id_); + PyObject* result = NULL; + auto pyseg = self->pymech_->pyseg_; + auto& f = self->f_->func; + + // patterning after fcall + Symbol sym{}; // in case of error, need the name. + sym.name = (char*) self->f_->name; + std::vector strings_to_free; + int narg = hocobj_pushargs(args, strings_to_free); + hoc_push_frame(&sym, narg); // get_argument uses the current frame + try { + double x = (f) (self->pymech_->prop_); + result = Py_BuildValue("d", x); + } catch (std::exception const& e) { + std::ostringstream oss; + oss << "mechanism.function call error: " << e.what(); + PyErr_SetString(PyExc_RuntimeError, oss.str().c_str()); + } + hoc_pop_frame(); + hocobj_pushargs_free_strings(strings_to_free); + return result; } static PyObject* NPyMechObj_is_ion(NPyMechObj* self) { - CHECK_SEC_INVALID(self->pyseg_->pysec_->sec_); - if (self->prop_ && nrn_is_ion(self->prop_->_type)) { + CHECK_PROP_INVALID(self->prop_id_); + if (nrn_is_ion(self->type_)) { Py_RETURN_TRUE; } Py_RETURN_FALSE; } static PyObject* NPyMechObj_segment(NPyMechObj* self) { - CHECK_SEC_INVALID(self->pyseg_->pysec_->sec_); + CHECK_PROP_INVALID(self->prop_id_); PyObject* result = NULL; if (self->pyseg_) { result = (PyObject*) (self->pyseg_); @@ -1107,13 +1184,24 @@ static PyObject* NPyMechObj_segment(NPyMechObj* self) { return result; } +static PyObject* NPyMechFunc_mech(NPyMechFunc* self) { + PyObject* result = NULL; + if (self->pymech_) { + CHECK_PROP_INVALID(self->pymech_->prop_id_); + result = (PyObject*) (self->pymech_); + Py_INCREF(result); + } + return result; +} + static PyObject* pymech_repr(PyObject* p) { NPyMechObj* pymech = (NPyMechObj*) p; - Section* sec = pymech->pyseg_->pysec_->sec_; - if (sec && sec->prop) { - return NPyMechObj_name(pymech); - } - return PyString_FromString(""); + return NPyMechObj_name(pymech); +} + +static PyObject* pymechfunc_repr(PyObject* p) { + NPyMechFunc* pyfunc = (NPyMechFunc*) p; + return NPyMechFunc_name(pyfunc); } static PyObject* NPyRangeVar_name(NPyRangeVar* self) { @@ -1476,7 +1564,7 @@ static PyObject* seg_volume(NPySegObj* self) { Node* nd = node_exact(sec, x); for (Prop* p = nd->prop; p; p = p->next) { if (p->_type == MORPHOLOGY) { - double diam = p->param[0]; + double diam = p->param(0); a = M_PI * diam * diam / 4 * length; break; } @@ -1511,6 +1599,7 @@ static Prop* mech_of_segment_prop(Prop* p) { break; } // printf("segment_iter %d %s\n", p->_type, memb_func[p->_type].sym->name); + // Only return density mechanisms (skip POINT_PROCESS) if (PyDict_GetItemString(pmech_types, memb_func[p->_type].sym->name)) { // printf("segment_iter found\n"); break; @@ -1529,11 +1618,17 @@ static PyObject* mech_of_segment_iter(NPySegObj* self) { // printf("mech_of_segment_iter\n"); Node* nd = node_exact(sec, self->x_); Prop* p = mech_of_segment_prop(nd->prop); - NPyMechOfSegIter* m = PyObject_New(NPyMechOfSegIter, pmech_of_seg_iter_generic_type); - m->pyseg_ = self; - Py_INCREF(m->pyseg_); - m->prop_ = p; - return (PyObject*) m; + NPyMechOfSegIter* mi = PyObject_New(NPyMechOfSegIter, pmech_of_seg_iter_generic_type); + if (!mi) { + return NULL; + } + NPyMechObj* m = new_pymechobj(self, p); + if (!m) { + Py_XDECREF(mi); + return NULL; + } + mi->pymech_ = m; + return (PyObject*) mi; } static Object* seg_from_sec_x(Section* sec, double x) { @@ -1588,7 +1683,10 @@ static void rv_noexist(Section* sec, const char* n, double x, int err) { static NPyRangeVar* rvnew(Symbol* sym, NPySecObj* sec, double x) { NPyRangeVar* r = PyObject_New(NPyRangeVar, range_type); - r->pymech_ = PyObject_New(NPyMechObj, pmech_generic_type); + if (!r) { + return NULL; + } + r->pymech_ = new_pymechobj(); r->pymech_->pyseg_ = PyObject_New(NPySegObj, psegment_type); r->pymech_->pyseg_->pysec_ = sec; Py_INCREF(sec); @@ -1744,18 +1842,22 @@ static int section_setattro(NPySecObj* self, PyObject* pyname, PyObject* value) static PyObject* mech_of_seg_next(NPyMechOfSegIter* self) { // printf("mech_of_seg_next\n"); - Prop* p = mech_of_segment_prop(self->prop_); - NPyMechObj* m = NULL; - if (p) { - m = PyObject_New(NPyMechObj, pmech_generic_type); + // The return on this iteration is self->pymech_. NULL means it's over. + NPyMechObj* m = self->pymech_; + if (!m) { + return NULL; } - if (m == NULL) { + if (!m->prop_id_) { + PyErr_SetString(PyExc_ReferenceError, + "mechanism instance became invalid in middle of the mechanism iterator"); return NULL; } - m->pyseg_ = self->pyseg_; - Py_INCREF(m->pyseg_); - m->prop_ = p; - self->prop_ = p->next; + Prop* pnext = mech_of_segment_prop(m->prop_->next); + NPyMechObj* mnext{}; + if (pnext) { + mnext = new_pymechobj(m->pyseg_, pnext); + } + self->pymech_ = mnext; return (PyObject*) m; } @@ -1825,21 +1927,19 @@ static PyObject* segment_getattro(NPySegObj* self, PyObject* pyname) { rv_noexist(sec, n, self->x_, 1); result = NULL; } else { - NPyMechObj* m = PyObject_New(NPyMechObj, pmech_generic_type); - if (m == NULL) { - result = NULL; - } else { - m->pyseg_ = self; - m->prop_ = p; - Py_INCREF(m->pyseg_); - result = (PyObject*) m; - } + result = (PyObject*) new_pymechobj(self, p); } } else if ((rv = PyDict_GetItemString(rangevars_, n)) != NULL) { sym = ((NPyRangeVar*) rv)->sym_; - if (ISARRAY(sym)) { + if (sym->type == RANGEOBJ) { + int mtype = sym->u.rng.type; + Node* nd = node_exact(sec, self->x_); + Prop* p = nrn_mechanism(mtype, nd); + Object* ob = nrn_nmodlrandom_wrap(p, sym); + result = nrnpy_ho2po(ob); + } else if (ISARRAY(sym)) { NPyRangeVar* r = PyObject_New(NPyRangeVar, range_type); - r->pymech_ = PyObject_New(NPyMechObj, pmech_generic_type); + r->pymech_ = new_pymechobj(); r->pymech_->pyseg_ = self; Py_INCREF(r->pymech_->pyseg_); r->sym_ = sym; @@ -1862,12 +1962,12 @@ static PyObject* segment_getattro(NPySegObj* self, PyObject* pyname) { } else if (strncmp(n, "_ref_", 5) == 0) { if (strcmp(n + 5, "v") == 0) { Node* nd = node_exact(sec, self->x_); - result = nrn_hocobj_ptr(&(NODEV(nd))); + result = nrn_hocobj_handle(nd->v_handle()); } else if ((sym = hoc_table_lookup(n + 5, hoc_built_in_symlist)) != 0 && sym->type == RANGEVAR) { if (ISARRAY(sym)) { NPyRangeVar* r = PyObject_New(NPyRangeVar, range_type); - r->pymech_ = PyObject_New(NPyMechObj, pmech_generic_type); + r->pymech_ = new_pymechobj(); r->pymech_->pyseg_ = self; Py_INCREF(self); r->sym_ = sym; @@ -1881,7 +1981,7 @@ static PyObject* segment_getattro(NPySegObj* self, PyObject* pyname) { rv_noexist(sec, n + 5, self->x_, err); result = NULL; } else { - result = nrn_hocobj_ptr(d); + result = nrn_hocobj_handle(d); } } } else { @@ -1914,11 +2014,13 @@ static PyObject* segment_getattro(NPySegObj* self, PyObject* pyname) { int nrn_pointer_assign(Prop* prop, Symbol* sym, PyObject* value) { int err = 0; if (sym->subtype == NRNPOINTER) { - double* pd; - double** ppd = &prop->dparam[sym->u.rng.index].literal_value(); - assert(ppd); - if (nrn_is_hocobj_ptr(value, pd)) { - *ppd = pd; + if (neuron::container::data_handle dh{}; nrn_is_hocobj_ptr(value, dh)) { + // The challenge is that we need to store a data handle here, + // because POINTER variables are set up before the data are + // permuted, but that handle then gets read as part of the + // translated mechanism code, inside the translated MOD files, where + // we might not otherwise like to pay the extra cost of indirection. + prop->dparam[sym->u.rng.index] = std::move(dh); } else { PyErr_SetString(PyExc_ValueError, "must be a hoc pointer"); err = -1; @@ -2031,7 +2133,7 @@ static PyObject* mech_getattro(NPyMechObj* self, PyObject* pyname) { PyErr_SetString(PyExc_ReferenceError, "nrn.Mechanism can't access a deleted section"); return NULL; } - + CHECK_PROP_INVALID(self->prop_id_); Py_INCREF(pyname); Py2NRNString name(pyname); char* n = name.c_str(); @@ -2054,27 +2156,29 @@ static PyObject* mech_getattro(NPyMechObj* self, PyObject* pyname) { std::snprintf(buf, bufsz, "%s_%s", isptr ? n + 5 : n, mname); } Symbol* sym = np.find(buf); - if (sym) { + if (sym && sym->type == RANGEVAR) { // printf("mech_getattro sym %s\n", sym->name); if (ISARRAY(sym)) { NPyRangeVar* r = PyObject_New(NPyRangeVar, range_type); - r->pymech_ = PyObject_New(NPyMechObj, pmech_generic_type); - r->pymech_->pyseg_ = self->pyseg_; - Py_INCREF(self->pyseg_); + r->pymech_ = self; + Py_INCREF(self); r->sym_ = sym; r->isptr_ = isptr; r->attr_from_sec_ = 0; result = (PyObject*) r; } else { - double* px = np.prop_pval(sym, 0); + auto const px = np.prop_pval(sym, 0); if (!px) { rv_noexist(sec, sym->name, self->pyseg_->x_, 2); } else if (isptr) { - result = nrn_hocobj_ptr(px); + result = nrn_hocobj_handle(px); } else { result = Py_BuildValue("d", *px); } } + } else if (sym && sym->type == RANGEOBJ) { + Object* ob = nrn_nmodlrandom_wrap(self->prop_, sym); + result = nrnpy_ho2po(ob); } else if (strcmp(n, "__dict__") == 0) { result = PyDict_New(); for (Symbol* s = np.first_var(); np.more_var(); s = np.next_var()) { @@ -2084,8 +2188,28 @@ static PyObject* mech_getattro(NPyMechObj* self, PyObject* pyname) { int err = PyDict_SetItemString(result, buf, Py_None); assert(err == 0); } + // FUNCTION and PROCEDURE + for (auto& it: nrn_mech2funcs_map[self->prop_->_type]) { + int err = PyDict_SetItemString(result, it.first.c_str(), Py_None); + assert(err == 0); + } } else { - result = PyObject_GenericGetAttr((PyObject*) self, pyname); + bool found_func{false}; + if (self->prop_) { + auto& funcs = nrn_mech2funcs_map[self->prop_->_type]; + if (funcs.count(n)) { + found_func = true; + auto& f = funcs[n]; + NPyMechFunc* pymf = PyObject_New(NPyMechFunc, pmechfunc_generic_type); + pymf->pymech_ = self; + Py_INCREF(self); + pymf->f_ = f; + result = (PyObject*) pymf; + } + } + if (!found_func) { + result = PyObject_GenericGetAttr((PyObject*) self, pyname); + } } Py_DECREF(pyname); delete[] buf; @@ -2127,7 +2251,7 @@ static int mech_setattro(NPyMechObj* self, PyObject* pyname, PyObject* value) { err = nrn_pointer_assign(self->prop_, sym, value); } else { double x; - double* pd = np.prop_pval(sym, 0); + auto pd = np.prop_pval(sym, 0); if (pd) { if (PyArg_Parse(value, "d", &x) == 1) { *pd = x; @@ -2147,7 +2271,7 @@ static int mech_setattro(NPyMechObj* self, PyObject* pyname, PyObject* value) { return err; } -double** nrnpy_setpointer_helper(PyObject* pyname, PyObject* mech) { +neuron::container::generic_data_handle* nrnpy_setpointer_helper(PyObject* pyname, PyObject* mech) { if (PyObject_TypeCheck(mech, pmech_generic_type) == 0) { return nullptr; } @@ -2164,7 +2288,7 @@ double** nrnpy_setpointer_helper(PyObject* pyname, PyObject* mech) { if (!sym || sym->type != RANGEVAR || sym->subtype != NRNPOINTER) { return nullptr; } - return &m->prop_->dparam[np.prop_index(sym)].literal_value(); + return &(m->prop_->dparam[np.prop_index(sym)]); } static PyObject* NPySecObj_call(NPySecObj* self, PyObject* args) { @@ -2217,7 +2341,7 @@ static PyObject* rv_getitem(PyObject* self, Py_ssize_t ix) { return NULL; } if (r->isptr_) { - result = nrn_hocobj_ptr(d); + result = nrn_hocobj_handle(d); } else { result = Py_BuildValue("d", *d); } @@ -2236,7 +2360,7 @@ static int rv_setitem(PyObject* self, Py_ssize_t ix, PyObject* value) { return -1; } int err; - auto const d = nrnpy_rangepointer(sec, r->sym_, r->pymech_->pyseg_->x_, &err, 0 /* idx */); + auto const d = nrnpy_rangepointer(sec, r->sym_, r->pymech_->pyseg_->x_, &err, ix); if (!d) { rv_noexist(sec, r->sym_->name, r->pymech_->pyseg_->x_, err); return -1; @@ -2251,10 +2375,12 @@ static int rv_setitem(PyObject* self, Py_ssize_t ix, PyObject* value) { } hoc_pushx(double(ix)); hoc_push_ndim(1); - nrn_rangeconst(r->pymech_->pyseg_->pysec_->sec_, r->sym_, &x, 0); + nrn_rangeconst(r->pymech_->pyseg_->pysec_->sec_, + r->sym_, + neuron::container::data_handle{neuron::container::do_not_search, &x}, + 0); } else { - assert(ix == 0); // d += ix; - if (!PyArg_Parse(value, "d", d)) { + if (!PyArg_Parse(value, "d", static_cast(d))) { PyErr_SetString(PyExc_ValueError, "bad value"); return -1; } @@ -2455,6 +2581,14 @@ static PyMethodDef NPyMechObj_methods[] = { "Returns the segment of the Mechanism instance"}, {NULL}}; +static PyMethodDef NPyMechFunc_methods[] = { + {"name", (PyCFunction) NPyMechFunc_name, METH_NOARGS, "Mechanism function"}, + {"mech", + (PyCFunction) NPyMechFunc_mech, + METH_NOARGS, + "Returns the Mechanism for this instance"}, + {NULL}}; + static PyMethodDef NPyRangeVar_methods[] = { {"name", (PyCFunction) NPyRangeVar_name, METH_NOARGS, "Range variable name name"}, {"mech", @@ -2489,7 +2623,7 @@ static PyMethodDef nrnpy_methods[] = { static PyObject* nrnmodule_; static void rangevars_add(Symbol* sym) { - assert(sym && sym->type == RANGEVAR); + assert(sym && (sym->type == RANGEVAR || sym->type == RANGEOBJ)); NPyRangeVar* r = PyObject_New(NPyRangeVar, range_type); // printf("%s\n", sym->name); r->sym_ = sym; @@ -2555,21 +2689,27 @@ PyObject* nrnpy_nrn(void) { PyModule_AddObject(m, "Segment", (PyObject*) psegment_type); pmech_generic_type = (PyTypeObject*) PyType_FromSpec(&nrnpy_MechanismType_spec); + pmechfunc_generic_type = (PyTypeObject*) PyType_FromSpec(&nrnpy_MechFuncType_spec); pmech_of_seg_iter_generic_type = (PyTypeObject*) PyType_FromSpec(&nrnpy_MechOfSegIterType_spec); pvar_of_mech_iter_generic_type = (PyTypeObject*) PyType_FromSpec(&nrnpy_VarOfMechIterType_spec); pmech_generic_type->tp_new = PyType_GenericNew; + pmechfunc_generic_type->tp_new = PyType_GenericNew; pmech_of_seg_iter_generic_type->tp_new = PyType_GenericNew; pvar_of_mech_iter_generic_type->tp_new = PyType_GenericNew; if (PyType_Ready(pmech_generic_type) < 0) goto fail; + if (PyType_Ready(pmechfunc_generic_type) < 0) + goto fail; if (PyType_Ready(pmech_of_seg_iter_generic_type) < 0) goto fail; if (PyType_Ready(pvar_of_mech_iter_generic_type) < 0) goto fail; Py_INCREF(pmech_generic_type); + Py_INCREF(pmechfunc_generic_type); Py_INCREF(pmech_of_seg_iter_generic_type); Py_INCREF(pvar_of_mech_iter_generic_type); PyModule_AddObject(m, "Mechanism", (PyObject*) pmech_generic_type); + PyModule_AddObject(m, "MechFunc", (PyObject*) pmechfunc_generic_type); PyModule_AddObject(m, "MechOfSegIterator", (PyObject*) pmech_of_seg_iter_generic_type); PyModule_AddObject(m, "VarOfMechIterator", (PyObject*) pvar_of_mech_iter_generic_type); remake_pmech_types(); @@ -2610,27 +2750,23 @@ void remake_pmech_types() { void nrnpy_reg_mech(int type) { int i; char* s; - Memb_func* mf = memb_func + type; + Memb_func& mf = memb_func[type]; if (!nrnmodule_) { return; } - if (mf->is_point) { + if (mf.is_point) { if (nrn_is_artificial_[type] == 0) { Symlist* sl = nrn_pnt_template_[type]->symtable; Symbol* s = hoc_table_lookup("get_segment", sl); if (!s) { s = hoc_install("get_segment", OBFUNCTION, 0, &sl); s->cpublic = 1; -#if MAC - s->u.u_proc->defn.pfo = (Object * *(*) (...)) pp_get_segment; -#else s->u.u_proc->defn.pfo = (Object * *(*) ()) pp_get_segment; -#endif } } return; } - s = mf->sym->name; + s = mf.sym->name; // printf("nrnpy_reg_mech %s %d\n", s, type); if (PyDict_GetItemString(pmech_types, s)) { hoc_execerror(s, "mechanism already exists"); @@ -2638,8 +2774,8 @@ void nrnpy_reg_mech(int type) { Py_INCREF(pmech_generic_type); PyModule_AddObject(nrnmodule_, s, (PyObject*) pmech_generic_type); PyDict_SetItemString(pmech_types, s, Py_BuildValue("i", type)); - for (i = 0; i < mf->sym->s_varn; ++i) { - Symbol* sym = mf->sym->u.ppsym[i]; + for (i = 0; i < mf.sym->s_varn; ++i) { + Symbol* sym = mf.sym->u.ppsym[i]; rangevars_add(sym); } } diff --git a/src/nrnpython/nrnpy_nrn.h b/src/nrnpython/nrnpy_nrn.h index d7ea61ee80..1153eed2cc 100644 --- a/src/nrnpython/nrnpy_nrn.h +++ b/src/nrnpython/nrnpy_nrn.h @@ -115,6 +115,22 @@ static PyType_Spec nrnpy_MechanismType_spec = { nrnpy_MechanismType_slots, }; +static PyType_Slot nrnpy_MechFuncType_slots[] = { + {Py_tp_dealloc, (void*) NPyMechFunc_dealloc}, + {Py_tp_repr, (void*) pymechfunc_repr}, + {Py_tp_methods, (void*) NPyMechFunc_methods}, + {Py_tp_call, (void*) NPyMechFunc_call}, + {Py_tp_doc, (void*) "Mechanism Function"}, + {0, 0}, +}; +static PyType_Spec nrnpy_MechFuncType_spec = { + "nrn.MechFunc", + sizeof(NPyMechFunc), + 0, + Py_TPFLAGS_DEFAULT | Py_TPFLAGS_BASETYPE, + nrnpy_MechFuncType_slots, +}; + static PyType_Slot nrnpy_VarOfMechIterType_slots[] = { {Py_tp_dealloc, (void*) NPyVarOfMechIter_dealloc}, {Py_tp_iter, (void*) PyObject_SelfIter}, diff --git a/src/nrnpython/nrnpy_p2h.cpp b/src/nrnpython/nrnpy_p2h.cpp index 2f4146ccf9..74a05c649b 100644 --- a/src/nrnpython/nrnpy_p2h.cpp +++ b/src/nrnpython/nrnpy_p2h.cpp @@ -6,105 +6,36 @@ #include #include #include +#include "nrnpy.h" #include "nrnpy_utils.h" +#include "oc_ansi.h" #include "parse.hpp" -extern void hoc_nopop(); -extern void hoc_pop_defer(); -extern Object* hoc_new_object(Symbol*, void*); -extern int hoc_stack_type(); -extern char** hoc_strpop(); -extern Object** hoc_objpop(); -extern void hoc_tobj_unref(Object**); -extern int hoc_ipop(); -PyObject* nrnpy_hoc2pyobject(Object*); -PyObject* hocobj_call_arg(int); -Object* nrnpy_pyobject_in_obj(PyObject*); -int nrnpy_ho_eq_po(Object*, PyObject*); -char* nrnpyerr_str(); -extern void* (*nrnpy_save_thread)(); -extern void (*nrnpy_restore_thread)(void*); -static void* save_thread() { - return PyEval_SaveThread(); -} -static void restore_thread(void* g) { - PyEval_RestoreThread((PyThreadState*) g); -} -extern Symbol* nrnpy_pyobj_sym_; -extern void (*nrnpy_py2n_component)(Object*, Symbol*, int, int); -extern void (*nrnpy_hpoasgn)(Object*, int); -extern double (*nrnpy_praxis_efun)(Object*, Object*); -extern int (*nrnpy_hoccommand_exec)(Object*); -extern int (*nrnpy_hoccommand_exec_strret)(Object*, char*, int); -extern void (*nrnpy_cmdtool)(Object*, int, double, double, int); -extern double (*nrnpy_func_call)(Object*, int, int*); -extern Object* (*nrnpy_callable_with_args)(Object*, int); -extern double (*nrnpy_guigetval)(Object*); -extern void (*nrnpy_guisetval)(Object*, double); -extern int (*nrnpy_guigetstr)(Object*, char**); -extern char* (*nrnpy_po2pickle)(Object*, size_t* size); -extern Object* (*nrnpy_pickle2po)(char*, size_t size); -extern char* (*nrnpy_callpicklef)(char*, size_t size, int narg, size_t* retsize); -extern int (*nrnpy_pysame)(Object*, Object*); // contain same Python object -extern Object* (*nrnpympi_alltoall_type)(int, int); -extern Object* (*nrnpy_p_po2ho)(PyObject*); -extern PyObject* (*nrnpy_p_ho2po)(Object*); -typedef struct { - PyObject_HEAD - Section* sec_; - char* name_; - PyObject* cell_; -} NPySecObj; -extern NPySecObj* newpysechelp(Section* sec); -extern void (*nrnpy_call_python_with_section)(Object*, Section*); -extern "C" void nrnpython_reg_real(); -PyObject* nrnpy_ho2po(Object*); -void nrnpy_decref_defer(PyObject*); -PyObject* nrnpy_pyCallObject(PyObject*, PyObject*); - -Object* nrnpy_po2ho(PyObject*); -static void py2n_component(Object*, Symbol*, int, int); -static void hpoasgn(Object*, int); -static double praxis_efun(Object*, Object*); -static int hoccommand_exec(Object*); -static int hoccommand_exec_strret(Object*, char*, int); -static void grphcmdtool(Object*, int, double, double, int); -static double func_call(Object*, int, int*); -static Object* callable_with_args(Object*, int); -static double guigetval(Object*); -static void guisetval(Object*, double); -static int guigetstr(Object*, char**); -static char* po2pickle(Object*, size_t* size); -static Object* pickle2po(char*, size_t size); -static char* call_picklef(char*, size_t size, int narg, size_t* retsize); -static Object* py_alltoall_type(int, int); -static int pysame(Object*, Object*); +static void nrnpy_decref_defer(PyObject*); +static char* nrnpyerr_str(); +static PyObject* nrnpy_pyCallObject(PyObject*, PyObject*); static PyObject* main_module; static PyObject* main_namespace; static hoc_List* dlist; -#if NRNPYTHON_DYNAMICLOAD -extern int nrnpy_site_problem; -extern int* nrnpy_site_problem_p; -#endif -class Py2Nrn { - public: - Py2Nrn(); - virtual ~Py2Nrn(); - int type_; // 0 toplevel - PyObject* po_; +struct Py2Nrn final { + ~Py2Nrn() { + PyLockGIL lock{}; + Py_XDECREF(po_); + } + int type_{}; // 0 toplevel + PyObject* po_{}; }; static void* p_cons(Object*) { - Py2Nrn* p = new Py2Nrn(); - return p; + return new Py2Nrn{}; } static void p_destruct(void* v) { - delete (Py2Nrn*) v; + delete static_cast(v); } -Member_func p_members[] = {{0, 0}}; +Member_func p_members[] = {{nullptr, nullptr}}; static void call_python_with_section(Object* pyact, Section* sec) { PyObject* po = ((Py2Nrn*) pyact->u.this_pointer)->po_; @@ -128,7 +59,6 @@ static void call_python_with_section(Object* pyact, Section* sec) { } } -extern void* (*nrnpy_opaque_obj2pyobj_p_)(Object*); static void* opaque_obj2pyobj(Object* ho) { assert(ho && ho->ctemplate->sym == nrnpy_pyobj_sym_); PyObject* po = ((Py2Nrn*) ho->u.this_pointer)->po_; @@ -136,52 +66,6 @@ static void* opaque_obj2pyobj(Object* ho) { return po; } -extern "C" void nrnpython_reg_real() { - // printf("nrnpython_reg_real()\n"); - class2oc("PythonObject", p_cons, p_destruct, p_members, NULL, NULL, NULL); - Symbol* s = hoc_lookup("PythonObject"); - assert(s); - nrnpy_pyobj_sym_ = s; - nrnpy_py2n_component = py2n_component; - nrnpy_call_python_with_section = call_python_with_section; - nrnpy_hpoasgn = hpoasgn; - nrnpy_praxis_efun = praxis_efun; - nrnpy_hoccommand_exec = hoccommand_exec; - nrnpy_hoccommand_exec_strret = hoccommand_exec_strret; - nrnpy_cmdtool = grphcmdtool; - nrnpy_func_call = func_call; - nrnpy_callable_with_args = callable_with_args; - nrnpy_guigetval = guigetval; - nrnpy_guisetval = guisetval; - nrnpy_guigetstr = guigetstr; - nrnpy_po2pickle = po2pickle; - nrnpy_pickle2po = pickle2po; - nrnpy_callpicklef = call_picklef; - nrnpympi_alltoall_type = py_alltoall_type; - nrnpy_pysame = pysame; - nrnpy_save_thread = save_thread; - nrnpy_restore_thread = restore_thread; - nrnpy_opaque_obj2pyobj_p_ = opaque_obj2pyobj; - nrnpy_p_ho2po = nrnpy_ho2po; - nrnpy_p_po2ho = nrnpy_po2ho; - dlist = hoc_l_newlist(); -#if NRNPYTHON_DYNAMICLOAD - nrnpy_site_problem_p = &nrnpy_site_problem; -#endif -} - - -Py2Nrn::Py2Nrn() { - po_ = NULL; - type_ = 0; - // printf("Py2Nrn() %p\n", this); -} -Py2Nrn::~Py2Nrn() { - PyLockGIL lock; - Py_XDECREF(po_); - // printf("~Py2Nrn() %p\n", this); -} - int nrnpy_ho_eq_po(Object* ho, PyObject* po) { if (ho->ctemplate->sym == nrnpy_pyobj_sym_) { return ((Py2Nrn*) ho->u.this_pointer)->po_ == po; @@ -189,7 +73,8 @@ int nrnpy_ho_eq_po(Object* ho, PyObject* po) { return 0; } -int pysame(Object* o1, Object* o2) { +// contain same Python object +static int pysame(Object* o1, Object* o2) { if (o2->ctemplate->sym == nrnpy_pyobj_sym_) { return nrnpy_ho_eq_po(o1, ((Py2Nrn*) o2->u.this_pointer)->po_); } @@ -220,11 +105,12 @@ Object* nrnpy_pyobject_in_obj(PyObject* po) { return on; } -PyObject* nrnpy_pyCallObject(PyObject* callable, PyObject* args) { +static PyObject* nrnpy_pyCallObject(PyObject* callable, PyObject* args) { // When hoc calls a PythonObject method, then in case python // calls something back in hoc, the hoc interpreter must be // at the top level - HocTopContextSet PyObject* p = PyObject_CallObject(callable, args); + HocTopContextSet + PyObject* p = PyObject_CallObject(callable, args); #if 0 printf("PyObject_CallObject callable\n"); PyObject_Print(callable, stdout, 0); @@ -233,29 +119,29 @@ PyObject_Print(args, stdout, 0); printf("\nreturn %p\n", p); #endif HocContextRestore - // It would be nice to handle the error here, ending with a hoc_execerror - // for any Exception (note, that does not include SystemExit). However - // since many, but not all, of the callers need to clean up and - // release the GIL, errors get handled by the caller or higher up. - // The almost generic idiom is: - /** - if (!p) { - char* mes = nrnpyerr_str(); - if (mes) { - Fprintf(stderr, "%s\n", mes); - free(mes); - hoc_execerror("Call of Python Callable failed", NULL); - } - if (PyErr_Occurred()) { - PyErr_Print(); // Python process will exit with the error code specified by the - SystemExit instance. - } - } - **/ - return p; + // It would be nice to handle the error here, ending with a hoc_execerror + // for any Exception (note, that does not include SystemExit). However + // since many, but not all, of the callers need to clean up and + // release the GIL, errors get handled by the caller or higher up. + // The almost generic idiom is: + /** + if (!p) { + char* mes = nrnpyerr_str(); + if (mes) { + Fprintf(stderr, "%s\n", mes); + free(mes); + hoc_execerror("Call of Python Callable failed", NULL); + } + if (PyErr_Occurred()) { + PyErr_Print(); // Python process will exit with the error code specified by the + SystemExit instance. + } + } + **/ + return p; } -void py2n_component(Object* ob, Symbol* sym, int nindex, int isfunc) { +static void py2n_component(Object* ob, Symbol* sym, int nindex, int isfunc) { #if 0 if (isfunc) { printf("py2n_component %s.%s(%d)\n", hoc_object_name(ob), sym->name, nindex); @@ -295,7 +181,13 @@ void py2n_component(Object* ob, Symbol* sym, int nindex, int isfunc) { if (isfunc) { args = PyTuple_New(nindex); for (i = 0; i < nindex; ++i) { - PyObject* arg = nrnpy_hoc_pop(); + PyObject* arg = nrnpy_hoc_pop("isfunc py2n_component"); + if (!arg) { + PyErr2NRNString e; + e.get_pyerr(); + Py_DECREF(args); + hoc_execerr_ext("arg %d error: %s", i, e.c_str()); + } // PyObject_Print(arg, stdout, 0); // printf(" %d arg %d\n", arg->ob_refcnt, i); if (PyTuple_SetItem(args, nindex - 1 - i, arg)) { @@ -334,7 +226,11 @@ void py2n_component(Object* ob, Symbol* sym, int nindex, int isfunc) { if (hoc_stack_type() == NUMBER) { arg = Py_BuildValue("l", (long) hoc_xpop()); } else { - arg = nrnpy_hoc_pop(); + // I don't think it is syntactically possible + // for this to be a VAR. It is possible for it to + // be an Object but the GetItem below will raise + // TypeError: list indices must be integers or slices, not hoc.HocObject + arg = nrnpy_hoc_pop("nindex py2n_component"); } result = PyObject_GetItem(tail, arg); if (!result) { @@ -433,7 +329,7 @@ static void hpoasgn(Object* o, int type) { } } -void nrnpy_decref_defer(PyObject* po) { +static void nrnpy_decref_defer(PyObject* po) { if (po) { #if 0 PyObject* ps = PyObject_Str(po); @@ -512,9 +408,10 @@ static int hoccommand_exec(Object* ho) { if (r == NULL) { char* mes = nrnpyerr_str(); if (mes) { - Fprintf(stderr, "%s\n", mes); + std::string tmp{"Python Callback failed [hoccommand_exec]:\n"}; + tmp.append(mes); free(mes); - hoc_execerror("Python Callback failed", 0); + hoc_execerror(tmp.c_str(), nullptr); } if (PyErr_Occurred()) { PyErr_Print(); @@ -581,7 +478,8 @@ static Object* callable_with_args(Object* ho, int narg) { hoc_execerror("PyTuple_New failed", 0); } for (int i = 0; i < narg; ++i) { - PyObject* item = nrnpy_hoc_pop(); + // not used with datahandle args. + PyObject* item = nrnpy_hoc_pop("callable_with_args"); if (item == NULL) { Py_XDECREF(args); hoc_execerror("nrnpy_hoc_pop failed", 0); @@ -613,7 +511,7 @@ static double func_call(Object* ho, int narg, int* err) { hoc_execerror("PyTuple_New failed", 0); } for (int i = 0; i < narg; ++i) { - PyObject* item = nrnpy_hoc_pop(); + PyObject* item = nrnpy_hoc_pop("func_call"); if (item == NULL) { Py_XDECREF(args); hoc_execerror("nrnpy_hoc_pop failed", 0); @@ -751,7 +649,7 @@ static char* pickle(PyObject* p, size_t* size) { return buf; } -static char* po2pickle(Object* ho, size_t* size) { +static char* po2pickle(Object* ho, std::size_t* size) { setpickle(); if (ho && ho->ctemplate->sym == nrnpy_pyobj_sym_) { PyObject* po = nrnpy_hoc2pyobject(ho); @@ -772,7 +670,7 @@ static PyObject* unpickle(char* s, size_t size) { return po; } -static Object* pickle2po(char* s, size_t size) { +static Object* pickle2po(char* s, std::size_t size) { setpickle(); PyObject* po = unpickle(s, size); Object* ho = nrnpy_pyobject_in_obj(po); @@ -783,7 +681,7 @@ static Object* pickle2po(char* s, size_t size) { /** Full python traceback error message returned as string. * Caller should free the return value if not NULL **/ -char* nrnpyerr_str() { +static char* nrnpyerr_str() { if (PyErr_Occurred() && PyErr_ExceptionMatches(PyExc_Exception)) { PyObject *ptype, *pvalue, *ptraceback; PyErr_Fetch(&ptype, &pvalue, &ptraceback); @@ -841,7 +739,7 @@ char* nrnpyerr_str() { return NULL; } -char* call_picklef(char* fname, size_t size, int narg, size_t* retsize) { +char* call_picklef(char* fname, std::size_t size, int narg, std::size_t* retsize) { // fname is a pickled callable, narg is the number of args on the // hoc stack with types double, char*, hoc Vector, and PythonObject // callable return must be pickleable. @@ -859,7 +757,7 @@ char* call_picklef(char* fname, size_t size, int narg, size_t* retsize) { args = PyTuple_New(narg); for (int i = 0; i < narg; ++i) { - PyObject* arg = nrnpy_hoc_pop(); + PyObject* arg = nrnpy_hoc_pop("call_picklef"); if (PyTuple_SetItem(args, narg - 1 - i, arg)) { assert(0); } @@ -992,7 +890,7 @@ static PyObject* py_broadcast(PyObject* psrc, int root) { // type 1-alltoall, 2-allgather, 3-gather, 4-broadcast, 5-scatter // size for 3, 4, 5 refer to rootrank. -Object* py_alltoall_type(int size, int type) { +static Object* py_alltoall_type(int size, int type) { int np = nrnmpi_numprocs; // of subworld communicator PyObject* psrc = NULL; PyObject* pdest = NULL; @@ -1215,3 +1113,51 @@ Object* py_alltoall_type(int size, int type) { return NULL; #endif } + +static PyThreadState* save_thread() { + return PyEval_SaveThread(); +} +static void restore_thread(PyThreadState* g) { + PyEval_RestoreThread(g); +} + +void nrnpython_reg_real_nrnpython_cpp(neuron::python::impl_ptrs* ptrs); +void nrnpython_reg_real_nrnpy_hoc_cpp(neuron::python::impl_ptrs* ptrs); + +/** + * @brief Populate NEURON state with information from a specific Python. + * @param ptrs Logically a return value; avoidi + */ +extern "C" void nrnpython_reg_real(neuron::python::impl_ptrs* ptrs) { + assert(ptrs); + class2oc("PythonObject", p_cons, p_destruct, p_members, nullptr, nullptr, nullptr); + nrnpy_pyobj_sym_ = hoc_lookup("PythonObject"); + assert(nrnpy_pyobj_sym_); + ptrs->callable_with_args = callable_with_args; + ptrs->call_func = func_call; + ptrs->call_picklef = call_picklef; + ptrs->call_python_with_section = call_python_with_section; + ptrs->cmdtool = grphcmdtool; + ptrs->guigetstr = guigetstr; + ptrs->guigetval = guigetval; + ptrs->guisetval = guisetval; + ptrs->hoccommand_exec = hoccommand_exec; + ptrs->hoccommand_exec_strret = hoccommand_exec_strret; + ptrs->ho2po = nrnpy_ho2po; + ptrs->hpoasgn = hpoasgn; + ptrs->mpi_alltoall_type = py_alltoall_type; + ptrs->opaque_obj2pyobj = opaque_obj2pyobj; + ptrs->pickle2po = pickle2po; + ptrs->po2ho = nrnpy_po2ho; + ptrs->po2pickle = po2pickle; + ptrs->praxis_efun = praxis_efun; + ptrs->pysame = pysame; + ptrs->py2n_component = py2n_component; + ptrs->restore_thread = restore_thread; + ptrs->save_thread = save_thread; + // call a function in nrnpython.cpp to register the functions defined there + nrnpython_reg_real_nrnpython_cpp(ptrs); + // call a function in nrnpy_hoc.cpp to register the functions defined there + nrnpython_reg_real_nrnpy_hoc_cpp(ptrs); + dlist = hoc_l_newlist(); +} diff --git a/src/nrnpython/nrnpy_reg.h b/src/nrnpython/nrnpy_reg.h deleted file mode 100644 index 979b10da74..0000000000 --- a/src/nrnpython/nrnpy_reg.h +++ /dev/null @@ -1,4 +0,0 @@ -extern PyObject* nrnpy_hoc(); -extern PyObject* nrnpy_nrn(); - -static PyObject* (*nrnpy_reg_[])() = {nrnpy_hoc, nrnpy_nrn, 0}; diff --git a/src/nrnpython/nrnpy_utils.h b/src/nrnpython/nrnpy_utils.h index 863ffc8dc5..3e62b4a94e 100644 --- a/src/nrnpython/nrnpy_utils.h +++ b/src/nrnpython/nrnpy_utils.h @@ -102,6 +102,65 @@ class Py2NRNString { bool disable_release_; }; +/** @brief For when hoc_execerror must handle the Python error. + * Idiom: PyErr2NRNString e; + * -- clean up any python objects -- + * hoc_execerr_ext("hoc message : %s", e.c_str()); + * e will be automatically deleted even though execerror does not return. + */ +class PyErr2NRNString { + public: + PyErr2NRNString() { + str_ = NULL; + } + + ~PyErr2NRNString() { + if (str_) { + free(str_); + } + } + + inline char* c_str() const { + return str_; + } + + inline char* get_pyerr() { + PyObject* ptype = NULL; + PyObject* pvalue = NULL; + PyObject* ptraceback = NULL; + if (PyErr_Occurred()) { + PyErr_Fetch(&ptype, &pvalue, &ptraceback); + if (pvalue) { + PyObject* pstr = PyObject_Str(pvalue); + if (pstr) { + const char* err_msg = PyUnicode_AsUTF8(pstr); + if (err_msg) { + str_ = strdup(err_msg); + } else { + str_ = strdup("get_pyerr failed at PyUnicode_AsUTF8"); + } + Py_XDECREF(pstr); + } else { + str_ = strdup("get_pyerr failed at PyObject_Str"); + } + } else { + str_ = strdup("get_pyerr failed at PyErr_Fetch"); + } + } + PyErr_Clear(); // in case could not turn pvalue into c_str. + Py_XDECREF(ptype); + Py_XDECREF(pvalue); + Py_XDECREF(ptraceback); + return str_; + } + + private: + PyErr2NRNString(const PyErr2NRNString&); + PyErr2NRNString& operator=(const PyErr2NRNString&); + + char* str_; +}; + struct PyLockGIL { PyLockGIL() @@ -127,4 +186,13 @@ extern void nrnpy_sec_referr(); } \ } +extern void nrnpy_prop_referr(); +#define CHECK_PROP_INVALID(propid) \ + { \ + if (!propid) { \ + nrnpy_prop_referr(); \ + return NULL; \ + } \ + } + #endif /* end of include guard: nrnpy_utils_h */ diff --git a/src/nrnpython/nrnpython.cpp b/src/nrnpython/nrnpython.cpp index 43f5268766..2097630f63 100644 --- a/src/nrnpython/nrnpython.cpp +++ b/src/nrnpython/nrnpython.cpp @@ -1,23 +1,24 @@ #include #include +#include "oc_ansi.h" #include #include #if HAVE_IV #include #endif #include -#include #include -#include #include // bool isDirExist(const std::string& path); #include -extern "C" void nrnpython_real(); -extern "C" int nrnpython_start(int); -extern int hoc_get_line(); +#include "nrnpy.h" + +#include +#include +#include extern HocStr* hoc_cbufstr; extern int nrnpy_nositeflag; -extern char* nrnpy_pyhome; +extern std::string nrnpy_pyexe; extern char* hoc_ctp; extern FILE* hoc_fin; extern const char* hoc_promptstr; @@ -28,60 +29,98 @@ extern const char* path_prefix_to_libnrniv(); static char* nrnpython_getline(FILE*, FILE*, const char*); extern int nrn_global_argc; extern char** nrn_global_argv; -void nrnpy_augment_path(); int nrnpy_pyrun(const char*); extern int (*p_nrnpy_pyrun)(const char*); -extern int nrn_global_argc; -extern char** nrn_global_argv; -#if NRNPYTHON_DYNAMICLOAD -int nrnpy_site_problem; -#endif - -extern "C" { -extern void rl_stuff_char(int); -} // extern "C" -void nrnpy_augment_path() { - static int augmented = 0; - if (!augmented && strlen(neuronhome_forward()) > 0) { - augmented = 1; - int err = PyRun_SimpleString("import sys"); - assert(err == 0); +static std::string python_sys_path_to_append() { + std::string path{neuronhome_forward()}; + if (path.empty()) { + return {}; + } #if defined(__linux__) || defined(DARWIN) - // If /where/installed/lib/python/neuron exists, then append to sys.path - std::string lib = std::string(path_prefix_to_libnrniv()); + // If /where/installed/lib/python/neuron exists, then append to sys.path + path = path_prefix_to_libnrniv(); #else // not defined(__linux__) || defined(DARWIN) - std::string lib = std::string(neuronhome_forward()) + std::string("/lib/"); + path += "/lib/"; #endif // not defined(__linux__) || defined(DARWIN) - if (isDirExist(lib + std::string("python/neuron"))) { - std::string cmd = std::string("sys.path.append('") + lib + "python')"; - err = PyRun_SimpleString(cmd.c_str()); - assert(err == 0); - } - err = PyRun_SimpleString("sys.path.insert(0, '')"); - assert(err == 0); + path += "python"; + if (isDirExist(path + "/neuron")) { + return path; } + return {}; } -/** @brief Execute a Python script. - * @return 0 on failure, 1 on success. +namespace { +struct PythonConfigWrapper { + PythonConfigWrapper() { + PyConfig_InitPythonConfig(&config); + } + ~PythonConfigWrapper() { + PyConfig_Clear(&config); + } + operator PyConfig*() { + return &config; + } + PyConfig* operator->() { + return &config; + } + PyConfig config; +}; +struct PyMem_RawFree_Deleter { + void operator()(wchar_t* ptr) const { + PyMem_RawFree(ptr); + } +}; +PyObject* basic_sys_path{}; + +/** + * @brief Reset sys.path to be basic_sys_path and prepend something. + * @param new_first Path to decode and prepend to sys.path. */ -int nrnpy_pyrun(const char* fname) { -#ifdef MINGW - // perhaps this should be the generic implementation - auto const sz = strlen(fname) + 40; - char* cmd = new char[sz]; - std::snprintf(cmd, sz, "exec(open(\"%s\").read(), globals())", fname); - int err = PyRun_SimpleString(cmd); - delete[] cmd; - if (err != 0) { - PyErr_Print(); - PyErr_Clear(); - return 0; +void reset_sys_path(std::string_view new_first) { + PyLockGIL _{}; + auto* const path = PySys_GetObject("path"); + nrn_assert(path); + // Clear sys.path + nrn_assert(PyList_SetSlice(path, 0, PyList_Size(path), nullptr) != -1); + // Decode new_first and make a Python unicode string out of it + auto* const ustr = PyUnicode_DecodeFSDefaultAndSize(new_first.data(), new_first.size()); + nrn_assert(ustr); + // Put the decoded string into sys.path + nrn_assert(PyList_Insert(path, 0, ustr) == 0); + // Append basic_sys_path to sys.path + assert(basic_sys_path && PyTuple_Check(basic_sys_path)); // failing in docs build + nrn_assert(PySequence_SetSlice(path, 1, 1 + PyTuple_Size(basic_sys_path), basic_sys_path) == 0); +} +} // namespace + +/** + * @brief Reset sys.path to its value at initialisation and prepend fname. + * + * Calling this with fname empty is appropriate ahead of executing code similar to `python -c + * "..."`, if fname is non-empty then resolve symlinks in it and get the directory name -- this is + * appropriate for `python script.py` compatibility. + */ +static void nrnpython_set_path(std::string_view fname) { + if (fname.empty()) { + reset_sys_path(fname); + } else { + // Figure out what sys.path[0] should be; this involves first resolving symlinks in fname + // and second getting the directory name from it. + auto const realpath = std::filesystem::canonical(fname); + // .string() ensures this is not a wchar_t string on Windows + auto const dirname = realpath.parent_path().string(); + reset_sys_path(dirname); } - return 1; -#else // MINGW not defined - FILE* fp = fopen(fname, "r"); +} + +/** + * @brief Execute a Python script. + * @return 0 on failure, 1 on success. + */ +int nrnpy_pyrun(const char* fname) { + nrnpython_set_path(fname); + auto* const fp = fopen(fname, "r"); if (fp) { int const code = PyRun_AnyFile(fp, fname); fclose(fp); @@ -90,44 +129,10 @@ int nrnpy_pyrun(const char* fname) { std::cerr << "Could not open " << fname << std::endl; return 0; } -#endif // MINGW not defined -} - -static wchar_t** wcargv; - -static void del_wcargv(int argc) { - if (wcargv) { - for (int i = 0; i < argc; ++i) { - PyMem_Free(wcargv[i]); - } - PyMem_Free(wcargv); - wcargv = NULL; - } -} - -static void copy_argv_wcargv(int argc, char** argv) { - del_wcargv(argc); - // basically a copy of code from Modules/python.c - wcargv = (wchar_t**) PyMem_Malloc(sizeof(wchar_t*) * argc); - if (!wcargv) { - fprintf(stderr, "out of memory\n"); - exit(1); - } - for (int i = 0; i < argc; ++i) { - wcargv[i] = Py_DecodeLocale(argv[i], NULL); - if (!wcargv[i]) { - fprintf(stderr, "out of memory\n"); - exit(1); - } - } } -static wchar_t* mywstrdup(char* s) { - size_t sz = mbstowcs(NULL, s, 0); - wchar_t* ws = new wchar_t[sz + 1]; - int count = mbstowcs(ws, s, sz + 1); - return ws; -} +extern PyObject* nrnpy_hoc(); +extern PyObject* nrnpy_nrn(); /** @brief Start the Python interpreter. * @arg b Mode of operation, can be 0 (finalize), 1 (initialize), @@ -138,60 +143,140 @@ static wchar_t* mywstrdup(char* s) { * been initialized. Mode 1 only has an effect if Python is not initialized, * while the other modes only take effect if Python is already initialized. */ -extern "C" int nrnpython_start(int b) { +static int nrnpython_start(int b) { #if USE_PYTHON static int started = 0; - // printf("nrnpython_start %d started=%d\n", b, started); if (b == 1 && !started) { p_nrnpy_pyrun = nrnpy_pyrun; if (nrnpy_nositeflag) { Py_NoSiteFlag = 1; } - // nrnpy_pyhome hopefully holds the python base root and should - // work with virtual environments. - // But use only if not overridden by the PYTHONHOME environment variable. - char* _p_pyhome = getenv("PYTHONHOME"); - if (_p_pyhome == NULL) { - _p_pyhome = nrnpy_pyhome; - } - if (_p_pyhome) { - Py_SetPythonHome(mywstrdup(_p_pyhome)); + // Create a Python configuration, see + // https://docs.python.org/3.8/c-api/init_config.html#python-configuration, so that + // {nrniv,special} -python behaves as similarly as possible to python. In particular this + // affects locale coercion. Under some circumstances Python does not straightforwardly + // handle settings like LC_ALL=C, so using a different configuration can lead to surprising + // differences. + PythonConfigWrapper config; + auto const check = [](const char* desc, PyStatus status) { + if (PyStatus_Exception(status)) { + std::ostringstream oss; + oss << desc; + if (status.err_msg) { + oss << ": " << status.err_msg; + if (status.func) { + oss << " in " << status.func; + } + } + throw std::runtime_error(oss.str()); + } + }; + // Virtual environments are discovered by Python by looking for pyvenv.cfg in the directory + // above sys.executable (https://docs.python.org/3/library/site.html), so we want to make + // sure that sys.executable is the path to a reasonable choice of Python executable. If we + // were to let sys.executable be `/some/path/to/arch/special` then we pick up a surprising + // dependency on whether or not `nrnivmodl` happened to be run in the root directory of the + // virtual environment + auto pyexe = nrnpy_pyexe; +#ifndef NRNPYTHON_DYNAMICLOAD + // In non-dynamic builds, the -pyexe option has no effect on which Python is linked and + // used, but it can be used to change PyConfig.program_name. If -pyexe is not passed then + // we use the Python that was discovered at build time. We have to make an std::string + // because Python's API requires the null terminator. + auto const& default_python = neuron::config::default_python_executable; + if (pyexe.empty() && !default_python.empty()) { + // -pyexe was not passed + pyexe = default_python; } - Py_Initialize(); -#if NRNPYTHON_DYNAMICLOAD - // return from Py_Initialize means there was no site problem - nrnpy_site_problem = 0; #endif - copy_argv_wcargv(nrn_global_argc, nrn_global_argv); - PySys_SetArgv(nrn_global_argc, wcargv); - started = 1; - // see nrnpy_reg.h - for (int i = 0; nrnpy_reg_[i]; ++i) { - (*nrnpy_reg_[i])(); + if (pyexe.empty()) { + throw std::runtime_error("Do not know what to set PyConfig.program_name to"); } - nrnpy_augment_path(); + // Surprisingly, given the documentation, it seems that passing a non-absolute path to + // PyConfig.program_name does not lead to a lookup in $PATH, but rather to the real (nrniv) + // path being placed in sys.executable -- at least on macOS. + if (auto p = std::filesystem::path{pyexe}; !p.is_absolute()) { + std::ostringstream oss; + oss << "Setting PyConfig.program_name to a non-absolute path (" << pyexe + << ") is not portable; try passing an absolute path to -pyexe or NRN_PYTHONEXE"; + throw std::runtime_error(oss.str()); + } + // TODO: in non-dynamic builds then -pyexe cannot change the used Python version, and `nrniv + // -pyexe /path/to/python3.10 -python` may well not use Python 3.10 at all. Should we do + // something about that? + check("Could not set PyConfig.program_name", + PyConfig_SetBytesString(config, &config->program_name, pyexe.c_str())); + // PySys_SetArgv is deprecated in Python 3.11+, write to config.XXX instead. + // nrn_global_argv contains the arguments passed to nrniv/special, which are not valid + // Python arguments, so tell Python not to try and parse them. In future we might like to + // remove the NEURON-specific arguments and pass whatever is left to Python? + config->parse_argv = 0; + check("Could not set PyConfig.argv", + PyConfig_SetBytesArgv(config, nrn_global_argc, nrn_global_argv)); + // Initialise Python + check("Could not initialise Python", Py_InitializeFromConfig(config)); + // Manipulate sys.path, starting from the default values + { + PyLockGIL _{}; + auto* const sys_path = PySys_GetObject("path"); + if (!sys_path) { + throw std::runtime_error("Could not get sys.path from C++"); + } + // Append a path to sys.path based on where libnrniv.so is, if it's not already there. + // Note that this is magic that is specific to launching via nrniv/special and not + // Python, which is unfortunate for consistency... + if (auto const path = python_sys_path_to_append(); !path.empty()) { + auto* ustr = PyUnicode_DecodeFSDefaultAndSize(path.c_str(), path.size()); + assert(ustr); + auto const already_there = PySequence_Contains(sys_path, ustr); + assert(already_there != -1); + if (already_there == 0 && PyList_Append(sys_path, ustr)) { + // TODO need to cover this without breaking sys.path consistency tests + throw std::runtime_error("Could not append " + path + " to sys.path"); + } + } + // To match regular Python, we should also prepend an entry to sys.path: + // from https://docs.python.org/3/library/sys.html#sys.path: + // * python -m module command line: prepend the current working directory. + // * python script.py command line: prepend the script’s directory. If + // it's a symbolic link, resolve symbolic links. + // * python -c code and python (REPL) command lines: prepend an empty + // string, which means the current working directory. + // We only find out later what we are going to do, so for the moment we just save a copy + // of sys.path and then restore + modify a copy of it before each script or command we + // execute. + assert(PyList_Check(sys_path) && !basic_sys_path); + basic_sys_path = PyList_AsTuple(sys_path); + } + started = 1; + nrnpy_hoc(); + nrnpy_nrn(); } if (b == 0 && started) { PyGILState_STATE gilsav = PyGILState_Ensure(); + assert(basic_sys_path); + Py_DECREF(basic_sys_path); + basic_sys_path = nullptr; Py_Finalize(); - del_wcargv(nrn_global_argc); // because of finalize, no PyGILState_Release(gilsav); started = 0; } if (b == 2 && started) { - int i; - copy_argv_wcargv(nrn_global_argc, nrn_global_argv); - PySys_SetArgv(nrn_global_argc, wcargv); - nrnpy_augment_path(); + // There used to be a call to PySys_SetArgv here, which dates back to + // e48d933e03b5c25a454e294deea55e399f8ba1b1 and a comment about sys.argv not being set with + // nrniv -python. Today, it seems like this is not needed any more. #if !defined(MINGW) // cannot get this to avoid crashing with MINGW PyOS_ReadlineFunctionPointer = nrnpython_getline; #endif // Is there a -c "command" or file.py arg. - bool python_error_encountered{false}; - for (i = 1; i < nrn_global_argc; ++i) { + bool python_error_encountered{false}, have_reset_sys_path{false}; + for (int i = 1; i < nrn_global_argc; ++i) { char* arg = nrn_global_argv[i]; if (strcmp(arg, "-c") == 0 && i + 1 < nrn_global_argc) { + // sys.path[0] should be an empty string for -c + reset_sys_path(""); + have_reset_sys_path = true; if (PyRun_SimpleString(nrn_global_argv[i + 1])) { python_error_encountered = true; } @@ -200,6 +285,7 @@ extern "C" int nrnpython_start(int b) { if (!nrnpy_pyrun(arg)) { python_error_encountered = true; } + have_reset_sys_path = true; // inside nrnpy_pyrun break; } } @@ -207,6 +293,12 @@ extern "C" int nrnpython_start(int b) { // code. In noninteractive/batch mode that happens immediately, in // interactive mode then we start a Python interpreter first. if (nrn_istty_) { + if (!have_reset_sys_path) { + // sys.path[0] should be 0 for interactive use, but if we're dropping into an + // interactive shell after executing something else then we don't want to mess with + // it. + reset_sys_path(""); + } PyRun_InteractiveLoop(hoc_fin, "stdin"); } return python_error_encountered; @@ -215,16 +307,24 @@ extern "C" int nrnpython_start(int b) { return 0; } -extern "C" void nrnpython_real() { +/** + * @brief Backend to nrnpython(...) in HOC/Python code. + * + * This can be called both with nrniv and python as the top-level executable, with different code + * responsible for initialising Python in the two cases. We trust that Python was initialised + * correctly somewhere higher up the call stack. + */ +static void nrnpython_real() { int retval = 0; #if USE_PYTHON - HocTopContextSet { + HocTopContextSet + { PyLockGIL lock; - retval = PyRun_SimpleString(gargstr(1)) == 0; + retval = (PyRun_SimpleString(hoc_gargstr(1)) == 0); } HocContextRestore #endif - hoc_retpushx(double(retval)); + hoc_retpushx(retval); } static char* nrnpython_getline(FILE*, FILE*, const char* prompt) { @@ -233,21 +333,22 @@ static char* nrnpython_getline(FILE*, FILE*, const char* prompt) { int r = hoc_get_line(); // printf("r=%d c=%d\n", r, hoc_cbufstr->buf[0]); if (r == 1) { - size_t n = strlen(hoc_cbufstr->buf) + 1; + auto const n = std::strlen(hoc_cbufstr->buf) + 1; hoc_ctp = hoc_cbufstr->buf + n - 1; - char* p = static_cast(PyMem_RawMalloc(n)); - if (p == 0) { - return 0; + auto* const p = static_cast(PyMem_RawMalloc(n)); + if (!p) { + return nullptr; } - strcpy(p, hoc_cbufstr->buf); + std::strcpy(p, hoc_cbufstr->buf); return p; } else if (r == EOF) { - char* p = static_cast(PyMem_RawMalloc(2)); - if (p == 0) { - return 0; - } - p[0] = '\0'; - return p; + return static_cast(PyMem_RawCalloc(1, sizeof(char))); } return 0; } + +void nrnpython_reg_real_nrnpython_cpp(neuron::python::impl_ptrs* ptrs) { + ptrs->hoc_nrnpython = nrnpython_real; + ptrs->interpreter_set_path = nrnpython_set_path; + ptrs->interpreter_start = nrnpython_start; +} diff --git a/src/nrnpython/nrnpython.h b/src/nrnpython/nrnpython.h index c55a4c4f3d..c20fed1b7e 100644 --- a/src/nrnpython/nrnpython.h +++ b/src/nrnpython/nrnpython.h @@ -6,13 +6,9 @@ #define MS_WIN32 #define MS_WINDOWS #endif - +#include "neuron/container/data_handle.hpp" +#include "neuron/container/generic_data_handle.hpp" #include <../../nrnconf.h> -#include - -#if defined(NRNPYTHON_DYNAMICLOAD) && NRNPYTHON_DYNAMICLOAD >= 30 -#define PY_LIMITED_API -#endif #if defined(USE_PYTHON) #undef _POSIX_C_SOURCE @@ -21,6 +17,8 @@ #endif /*USE_PYTHON*/ +#include + #define PyString_FromString PyUnicode_FromString #define PyInt_Check PyLong_Check #define PyInt_CheckExact PyLong_CheckExact @@ -31,7 +29,7 @@ static_assert(PY_MAJOR_VERSION > 3 || (PY_MAJOR_VERSION == 3 && PY_MINOR_VERSION >= 8), "Python >= 3.8 required"); -extern PyObject* nrnpy_hoc_pop(); +extern PyObject* nrnpy_hoc_pop(const char* mes); extern int nrnpy_numbercheck(PyObject*); #if defined(__SIZEOF_POINTER__) && __SIZEOF_POINTER__ > __SIZEOF_LONG__ @@ -71,5 +69,30 @@ enum ObjectType { }; enum IteratorState { Begin, NextNotLast, Last }; } // namespace PyHoc +// Declare methods that are used in different translation units within one libnrnpythonX.Y +struct Object; +struct Section; +PyObject* hocobj_call_arg(int); +struct NPySecObj { + PyObject_HEAD + Section* sec_; + char* name_; + PyObject* cell_weakref_; +}; +NPySecObj* newpysechelp(Section* sec); +PyObject* nrnpy_hoc2pyobject(Object* ho); +int nrnpy_ho_eq_po(Object*, PyObject*); +PyObject* nrnpy_ho2po(Object*); +Object* nrnpy_po2ho(PyObject*); +Object* nrnpy_pyobject_in_obj(PyObject*); + +struct Prop; +struct Symbol; +bool nrn_chk_data_handle(const neuron::container::data_handle&); +PyObject* nrn_hocobj_handle(neuron::container::data_handle d); +extern "C" PyObject* nrn_hocobj_ptr(double*); +int nrn_is_hocobj_ptr(PyObject*, neuron::container::data_handle&); +int nrn_pointer_assign(Prop*, Symbol*, PyObject*); +neuron::container::generic_data_handle* nrnpy_setpointer_helper(PyObject*, PyObject*); #endif diff --git a/src/nrnpython/nrnpython_config.h.in b/src/nrnpython/nrnpython_config.h.in deleted file mode 100755 index 50ea7f8efb..0000000000 --- a/src/nrnpython/nrnpython_config.h.in +++ /dev/null @@ -1,15 +0,0 @@ -#ifndef H_nrnpython_config_included -#define H_nrnpython_config_included -/* Define if Python available */ -#undef USE_PYTHON -/* Define to sys.api_version if dynamic loading desired */ -/* 1013 is good for 2.5-2.7 */ -#undef NRNPYTHON_DYNAMICLOAD - -#ifndef NRNHOSTCPU -/* Define to the nrnivmodl consistent cpu name */ -#undef NRNHOSTCPU -#endif - -#endif /* H_nrnpython_config_included */ - diff --git a/src/nrnpython/nrnwrap_Python.h b/src/nrnpython/nrnwrap_Python.h index 94fc86d8b6..ef25ee47fa 100644 --- a/src/nrnpython/nrnwrap_Python.h +++ b/src/nrnpython/nrnwrap_Python.h @@ -1,7 +1,5 @@ #pragma once -#undef HAVE_FTIME #undef HAVE_PROTOTYPES -#undef HAVE_DIRENT_H #if defined(__MINGW32__) #undef _hypot #define _hypot hypot diff --git a/src/nrnpython/rxd.cpp b/src/nrnpython/rxd.cpp index 53f5df3ece..5c1117c4a3 100644 --- a/src/nrnpython/rxd.cpp +++ b/src/nrnpython/rxd.cpp @@ -3,6 +3,7 @@ #include #include #include "grids.h" +#include #include "rxd.h" #include <../nrnoc/section.h> #include <../nrnoc/nrn_ansi.h> @@ -12,14 +13,14 @@ #include #include +#include "ocmatrix.h" +#include "ivocvect.h" static void ode_solve(double, double*, double*); extern PyTypeObject* hocobject_type; extern int structure_change_cnt; extern int states_cvode_offset; -extern int _nrnunit_use_legacy_; int prev_structure_change_cnt = 0; -int prev_nrnunit_use_legacy = _nrnunit_use_legacy_; unsigned char initialized = FALSE; /* @@ -73,10 +74,9 @@ int _num_reactions = 0; int _curr_count; int* _curr_indices = NULL; double* _curr_scales = NULL; -double** _curr_ptrs = NULL; +std::vector> _conc_ptrs, _curr_ptrs; int _conc_count; int* _conc_indices = NULL; -double** _conc_ptrs = NULL; /*membrane fluxes*/ int _memb_curr_total = 0; /*number of membrane currents (sum of @@ -94,7 +94,7 @@ int* _memb_species_count; /*array of length _memb_count current*/ /*arrays of size _memb_count by _memb_species_count*/ -double*** _memb_cur_ptrs; /*hoc pointers TODO: replace with index for _curr_ptrs*/ +std::vector>> _memb_cur_ptrs; int** _memb_cur_charges; int*** _memb_cur_mapped; /*array of pairs of indices*/ int*** _memb_cur_mapped_ecs; /*array of pointer into ECS grids*/ @@ -128,7 +128,7 @@ static void transfer_to_legacy() { /*TODO: support 3D*/ int i; for (i = 0; i < _conc_count; i++) { - *(double*) _conc_ptrs[i] = states[_conc_indices[i]]; + *_conc_ptrs[i] = states[_conc_indices[i]]; } } @@ -162,9 +162,7 @@ extern "C" void free_curr_ptrs() { if (_curr_scales != NULL) free(_curr_scales); _curr_scales = NULL; - if (_curr_ptrs != NULL) - free(_curr_ptrs); - _curr_ptrs = NULL; + _curr_ptrs.clear(); } extern "C" void free_conc_ptrs() { @@ -172,9 +170,7 @@ extern "C" void free_conc_ptrs() { if (_conc_indices != NULL) free(_conc_indices); _conc_indices = NULL; - if (_conc_ptrs != NULL) - free(_conc_ptrs); - _conc_ptrs = NULL; + _conc_ptrs.clear(); } @@ -182,7 +178,6 @@ extern "C" void rxd_setup_curr_ptrs(int num_currents, int* curr_index, double* curr_scale, PyHocObject** curr_ptrs) { - int i; free_curr_ptrs(); /* info for NEURON currents - to update states */ _curr_count = num_currents; @@ -192,9 +187,9 @@ extern "C" void rxd_setup_curr_ptrs(int num_currents, _curr_scales = (double*) malloc(sizeof(double) * num_currents); memcpy(_curr_scales, curr_scale, sizeof(double) * num_currents); - _curr_ptrs = (double**) malloc(sizeof(double*) * num_currents); - for (i = 0; i < num_currents; i++) - _curr_ptrs[i] = (double*) curr_ptrs[i]->u.px_; + _curr_ptrs.resize(num_currents); + for (int i = 0; i < num_currents; i++) + _curr_ptrs[i] = curr_ptrs[i]->u.px_; } extern "C" void rxd_setup_conc_ptrs(int conc_count, int* conc_index, PyHocObject** conc_ptrs) { @@ -204,10 +199,9 @@ extern "C" void rxd_setup_conc_ptrs(int conc_count, int* conc_index, PyHocObject _conc_count = conc_count; _conc_indices = (int*) malloc(sizeof(int) * conc_count); memcpy(_conc_indices, conc_index, sizeof(int) * conc_count); - - _conc_ptrs = (double**) malloc(sizeof(double*) * conc_count); + _conc_ptrs.resize(conc_count); for (i = 0; i < conc_count; i++) - _conc_ptrs[i] = (double*) conc_ptrs[i]->u.px_; + _conc_ptrs[i] = conc_ptrs[i]->u.px_; } extern "C" void rxd_include_node_flux3D(int grid_count, @@ -337,12 +331,11 @@ void apply_node_flux(int n, states[j] += dt * (double) PyLong_AsLong(result) / scale[i]; } else if (PyInt_Check(result)) { states[j] += dt * (double) PyInt_AsLong(result) / scale[i]; - } - - else { + } else { PyErr_SetString(PyExc_Exception, "node._include_flux callback did not return a number.\n"); } + Py_DECREF(result); } } else { PyErr_SetString(PyExc_Exception, "node._include_flux unrecognised source term.\n"); @@ -492,7 +485,6 @@ extern "C" void set_setup_matrices(fptr setup_matrices) { extern "C" void set_setup_units(fptr setup_units) { _setup_units = setup_units; - _setup_units(); } /* nrn_tree_solve modified from nrnoc/ldifus.c */ @@ -617,9 +609,8 @@ static void free_currents() { free(_memb_cur_mapped[i][j]); } free(_memb_cur_mapped[i]); - free(_memb_cur_ptrs[i]); } - free(_memb_cur_ptrs); + _memb_cur_ptrs.clear(); free(_memb_cur_mapped); free(_memb_species_count); free(_cur_node_indices); @@ -666,7 +657,7 @@ extern "C" void setup_currents(int num_currents, _membrane_lookup = (int*) malloc(sizeof(int) * num_states); memset(_membrane_lookup, SPECIES_ABSENT, sizeof(int) * num_states); - _memb_cur_ptrs = (double***) malloc(sizeof(double**) * num_currents); + _memb_cur_ptrs.resize(num_currents); _memb_cur_mapped_ecs = (int***) malloc(sizeof(int*) * num_currents); _memb_cur_mapped = (int***) malloc(sizeof(int**) * num_currents); induced_currents_ecs_idx = (int*) malloc(sizeof(int) * _memb_curr_total); @@ -676,14 +667,13 @@ extern "C" void setup_currents(int num_currents, memset(induced_currents_ecs_idx, SPECIES_ABSENT, sizeof(int) * _memb_curr_total); for (i = 0, k = 0; i < num_currents; i++) { - _memb_cur_ptrs[i] = (double**) malloc(sizeof(double*) * num_species[i]); - // memcpy(_memb_cur_ptrs[i], &ptrs[k], sizeof(PyHocObject*)*num_species[i]); + _memb_cur_ptrs[i].resize(num_species[i]); _memb_cur_mapped_ecs[i] = (int**) malloc(sizeof(int*) * num_species[i]); _memb_cur_mapped[i] = (int**) malloc(sizeof(int*) * num_species[i]); for (j = 0; j < num_species[i]; j++, k++) { - _memb_cur_ptrs[i][j] = (double*) ptrs[k]->u.px_; + _memb_cur_ptrs[i][j] = ptrs[k]->u.px_; _memb_cur_mapped[i][j] = (int*) malloc(2 * sizeof(int)); _memb_cur_mapped_ecs[i][j] = (int*) malloc(2 * sizeof(int)); @@ -783,10 +773,6 @@ extern "C" int rxd_nonvint_block(int method, int size, double* p1, double* p2, i /*Needed for node.include_flux*/ _setup_matrices(); } - if (prev_nrnunit_use_legacy != _nrnunit_use_legacy_) { - _setup_units(); - prev_nrnunit_use_legacy = _nrnunit_use_legacy_; - } } switch (method) { case 0: @@ -895,7 +881,7 @@ extern "C" void register_rate(int nspecies, } else { react->vptrs = NULL; } - react->state_idx = (int***) malloc(nseg * sizeof(double**)); + react->state_idx = (int***) malloc(nseg * sizeof(int**)); for (i = 0, idx = 0; i < nseg; i++) { react->state_idx[i] = (int**) malloc((nspecies + nparam) * sizeof(int*)); for (j = 0; j < nspecies + nparam; j++) { @@ -1330,7 +1316,7 @@ void _rhs_variable_step(const double* p1, double* p2) { rhs); /*reactions*/ - MEM_ZERO(&ydot[num_states - _rxd_num_zvi], sizeof(double) * _ecs_count); + memset(&ydot[num_states - _rxd_num_zvi], 0, sizeof(double) * _ecs_count); get_all_reaction_rates(states, rhs, ydot); @@ -1416,7 +1402,7 @@ void get_reaction_rates(ICSReactions* react, double* states, double* rates, doub states_for_reaction[i][j] = NAN; } } - MEM_ZERO(result_array[i], react->num_regions * sizeof(double)); + memset(result_array[i], 0, react->num_regions * sizeof(double)); } for (k = 0; i < react->num_species + react->num_params; i++, k++) { for (j = 0; j < react->num_regions; j++) { @@ -1442,7 +1428,7 @@ void get_reaction_rates(ICSReactions* react, double* states, double* rates, doub ecs_params_for_reaction[k] = NAN; } } - MEM_ZERO(ecs_result, react->num_ecs_species * sizeof(double)); + memset(ecs_result, 0, react->num_ecs_species * sizeof(double)); for (i = 0; i < react->num_mult; i++) { mc_mult[i] = react->mc_multiplier[i][segment]; @@ -1522,10 +1508,9 @@ void solve_reaction(ICSReactions* react, double pd; double dt = *dt_ptr; double dx = FLT_EPSILON; - MAT* jacobian = m_get(N, N); - VEC* b = v_get(N); - VEC* x = v_get(N); - PERM* pivot = px_get(N); + auto jacobian = std::make_unique(N, N); + auto b = std::make_unique(N); + auto x = std::make_unique(N); double** states_for_reaction = (double**) malloc(react->num_species * sizeof(double*)); double** states_for_reaction_dx = (double**) malloc(react->num_species * sizeof(double*)); @@ -1581,8 +1566,8 @@ void solve_reaction(ICSReactions* react, states_for_reaction_dx[i][j] = states_for_reaction[i][j]; } } - MEM_ZERO(result_array[i], react->num_regions * sizeof(double)); - MEM_ZERO(result_array_dx[i], react->num_regions * sizeof(double)); + memset(result_array[i], 0, react->num_regions * sizeof(double)); + memset(result_array_dx[i], 0, react->num_regions * sizeof(double)); } for (k = 0; i < react->num_species + react->num_params; i++, k++) { for (j = 0; j < react->num_regions; j++) { @@ -1612,8 +1597,8 @@ void solve_reaction(ICSReactions* react, } if (react->num_ecs_species > 0) { - MEM_ZERO(ecs_result, react->num_ecs_species * sizeof(double)); - MEM_ZERO(ecs_result_dx, react->num_ecs_species * sizeof(double)); + memset(ecs_result, 0, react->num_ecs_species * sizeof(double)); + memset(ecs_result_dx, 0, react->num_ecs_species * sizeof(double)); } for (i = 0; i < react->num_mult; i++) { @@ -1635,9 +1620,9 @@ void solve_reaction(ICSReactions* react, for (j = 0; j < react->num_regions; j++) { if (react->state_idx[segment][i][j] != SPECIES_ABSENT) { if (bval == NULL) - v_set_val(b, idx, dt * result_array[i][j]); + b->elem(idx) = dt * result_array[i][j]; else - v_set_val(b, idx, bval[react->state_idx[segment][i][j]]); + b->elem(idx) = bval[react->state_idx[segment][i][j]]; // set up the changed states array @@ -1662,7 +1647,7 @@ void solve_reaction(ICSReactions* react, if (react->state_idx[segment][jac_i][jac_j] != SPECIES_ABSENT) { pd = (result_array_dx[jac_i][jac_j] - result_array[jac_i][jac_j]) / dx; - m_set_val(jacobian, jac_idx, idx, (idx == jac_idx) - dt * pd); + *jacobian->mep(jac_idx, idx) = (idx == jac_idx) - dt * pd; jac_idx += 1; } result_array_dx[jac_i][jac_j] = 0; @@ -1672,7 +1657,7 @@ void solve_reaction(ICSReactions* react, // pd is our Jacobian approximated if (react->ecs_state[segment][jac_i] != NULL) { pd = (ecs_result_dx[jac_i] - ecs_result[jac_i]) / dx; - m_set_val(jacobian, jac_idx, idx, -dt * pd); + *jacobian->mep(jac_idx, idx) = -dt * pd; jac_idx += 1; } ecs_result_dx[jac_i] = 0; @@ -1688,9 +1673,9 @@ void solve_reaction(ICSReactions* react, for (i = 0; i < react->num_ecs_species; i++) { if (react->ecs_state[segment][i] != NULL) { if (bval == NULL) - v_set_val(b, idx, dt * ecs_result[i]); + b->elem(idx) = dt * ecs_result[i]; else - v_set_val(b, idx, cvode_b[react->ecs_index[segment][i]]); + b->elem(idx) = cvode_b[react->ecs_index[segment][i]]; // set up the changed states array @@ -1714,7 +1699,7 @@ void solve_reaction(ICSReactions* react, // pd is our Jacobian approximated if (react->state_idx[segment][jac_i][jac_j] != SPECIES_ABSENT) { pd = (result_array_dx[jac_i][jac_j] - result_array[jac_i][jac_j]) / dx; - m_set_val(jacobian, jac_idx, idx, -dt * pd); + *jacobian->mep(jac_idx, idx) = -dt * pd; jac_idx += 1; } } @@ -1723,10 +1708,10 @@ void solve_reaction(ICSReactions* react, // pd is our Jacobian approximated if (react->ecs_state[segment][jac_i] != NULL) { pd = (ecs_result_dx[jac_i] - ecs_result[jac_i]) / dx; - m_set_val(jacobian, jac_idx, idx, (idx == jac_idx) - dt * pd); + *jacobian->mep(jac_idx, idx) = (idx == jac_idx) - dt * pd; jac_idx += 1; } else { - m_set_val(jacobian, idx, idx, 1.0); + *jacobian->mep(idx, idx) = 1.0; } // reset dx array ecs_states_for_reaction_dx[i] -= dx; @@ -1735,8 +1720,7 @@ void solve_reaction(ICSReactions* react, } } // solve for x, destructively - LUfactor(jacobian, pivot); - LUsolve(jacobian, pivot, b, x); + jacobian->solv(b.get(), x.get(), false); if (bval != NULL) // variable-step { @@ -1744,15 +1728,14 @@ void solve_reaction(ICSReactions* react, for (j = 0; j < react->num_regions; j++) { idx = react->state_idx[segment][i][j]; if (idx != SPECIES_ABSENT) { - bval[idx] = v_get_val(x, jac_idx++); + bval[idx] = x->elem(jac_idx++); } } } for (i = 0; i < react->num_ecs_species; i++) { if (react->ecs_state[segment][i] != NULL) - react->ecs_grid[i]->all_reaction_states[ecsindex[i]++] = v_get_val(x, - jac_idx++); - // cvode_b[react->ecs_index[segment][i]] = v_get_val(x, jac_idx++); + react->ecs_grid[i]->all_reaction_states[ecsindex[i]++] = x->elem(jac_idx++); + // cvode_b[react->ecs_index[segment][i]] = x->elem(jac_idx++); } } else // fixed-step { @@ -1760,21 +1743,16 @@ void solve_reaction(ICSReactions* react, for (j = 0; j < react->num_regions; j++) { idx = react->state_idx[segment][i][j]; if (idx != SPECIES_ABSENT) - states[idx] += v_get_val(x, jac_idx++); + states[idx] += x->elem(jac_idx++); } } for (i = 0; i < react->num_ecs_species; i++) { if (react->ecs_state[segment][i] != NULL) - react->ecs_grid[i]->all_reaction_states[ecsindex[i]++] = v_get_val(x, - jac_idx++); + react->ecs_grid[i]->all_reaction_states[ecsindex[i]++] = x->elem(jac_idx++); } } } free(ecsindex); - m_free(jacobian); - v_free(b); - v_free(x); - px_free(pivot); for (i = 0; i < react->num_species; i++) { free(states_for_reaction[i]); free(states_for_reaction_dx[i]); @@ -1811,7 +1789,7 @@ void do_ics_reactions(double* states, double* b, double* cvode_states, double* c void get_all_reaction_rates(double* states, double* rates, double* ydot) { ICSReactions* react; if (_membrane_flux) - MEM_ZERO(_rxd_induced_currents, sizeof(double) * _memb_curr_total); + memset(_rxd_induced_currents, 0, sizeof(double) * _memb_curr_total); for (react = _reactions; react != NULL; react = react->next) { if (react->icsN + react->ecsN > 0) get_reaction_rates(react, states, rates, ydot); diff --git a/src/nrnpython/rxd.h b/src/nrnpython/rxd.h index d50daa079e..7371bca493 100644 --- a/src/nrnpython/rxd.h +++ b/src/nrnpython/rxd.h @@ -1,20 +1,11 @@ #pragma once -#include "matrix2.h" -// mesch defines many macros that interact badly with C++ headers -#undef catch -#undef max -#undef min - #include #include #include #include -/*borrowed from Meschach Version 1.2b*/ -#define v_get_val(x, i) ((x)->ve[(i)]) -#define m_get_val(A, i, j) ((A)->me[(i)][(j)]) -#define SPECIES_ABSENT -1 -#define PREFETCH 4 +#define SPECIES_ABSENT -1 +#define PREFETCH 4 typedef void (*fptr)(void); @@ -74,23 +65,6 @@ typedef struct ICSReactions { struct ICSReactions* next; } ICSReactions; -typedef struct { - /*variables for reactions*/ - double* states_for_reaction; - double* states_for_reaction_dx; - double* ecs_states_for_reaction; - double* ecs_states_for_reaction_dx; - double* result_array; - double* result_array_dx; - double* result_ecs; - double* result_ecs_dx; - MAT* jacobian; - VEC* x; - VEC* b; - PERM* pivot; - -} ReactionVariables; - typedef struct TaskList { void* (*task)(void*); void* args; diff --git a/src/nrnpython/rxd_extracellular.cpp b/src/nrnpython/rxd_extracellular.cpp index e42d31a77c..15eb7db8b5 100644 --- a/src/nrnpython/rxd_extracellular.cpp +++ b/src/nrnpython/rxd_extracellular.cpp @@ -6,6 +6,8 @@ #include "rxd.h" #include #include +#include +#include #define loc(x, y, z) ((z) + (y) *grid->size_z + (x) *grid->size_z * grid->size_y) @@ -288,10 +290,9 @@ void* ecs_do_reactions(void* dataptr) { double* mc_mults_array = NULL; double dx = FLT_EPSILON; double pd; - MAT* jacobian; - VEC* x; - VEC* b; - PERM* pivot; + std::unique_ptr jacobian; + std::vector x{}; + std::vector b{}; for (react = ecs_reactions; react != NULL; react = react->next) { // TODO: This is bad. Need to refactor @@ -315,10 +316,10 @@ void* ecs_do_reactions(void* dataptr) { if (react->num_species_involved == 0) continue; /*allocate data structures*/ - jacobian = m_get(react->num_species_involved, react->num_species_involved); - b = v_get(react->num_species_involved); - x = v_get(react->num_species_involved); - pivot = px_get(jacobian->m); + jacobian = std::make_unique(react->num_species_involved, + react->num_species_involved); + b.resize(react->num_species_involved); + x.resize(react->num_species_involved); states_cache = (double*) malloc(sizeof(double) * react->num_species_involved); params_cache = (double*) malloc(sizeof(double) * react->num_params_involved); states_cache_dx = (double*) malloc(sizeof(double) * react->num_species_involved); @@ -335,7 +336,7 @@ void* ecs_do_reactions(void* dataptr) { states_cache_dx[j] = react->species_states[j][offset_idx]; mc_mults_array[j] = react->mc3d_mults[j][i]; } - MEM_ZERO(results_array, react->num_species_involved * sizeof(double)); + memset(results_array, 0, react->num_species_involved * sizeof(double)); for (k = 0; j < react->num_species_involved + react->num_params_involved; k++, j++) { offset_idx = i + react->mc3d_indices_offsets[j]; @@ -346,35 +347,34 @@ void* ecs_do_reactions(void* dataptr) { for (j = 0; j < react->num_species_involved; j++) { states_cache_dx[j] += dx; - MEM_ZERO(results_array_dx, - react->num_species_involved * sizeof(double)); + memset(results_array_dx, + 0, + react->num_species_involved * sizeof(double)); react->reaction(states_cache_dx, params_cache, results_array_dx, mc_mults_array); - v_set_val(b, j, dt * results_array[j]); + b[j] = dt * results_array[j]; for (k = 0; k < react->num_species_involved; k++) { pd = (results_array_dx[k] - results_array[k]) / dx; - m_set_val(jacobian, k, j, (j == k) - dt * pd); + *jacobian->mep(k, j) = (j == k) - dt * pd; } states_cache_dx[j] -= dx; } // solve for x if (react->num_species_involved == 1) { - react->species_states[0][i] += v_get_val(b, 0) / - m_get_val(jacobian, 0, 0); + react->species_states[0][i] += b[0] / jacobian->getval(0, 0); } else { // find entry in leftmost column with largest absolute value // Pivot for (j = 0; j < react->num_species_involved; j++) { for (k = j + 1; k < react->num_species_involved; k++) { - if (abs(m_get_val(jacobian, j, j)) < - abs(m_get_val(jacobian, k, j))) { + if (abs(jacobian->getval(j, j)) < abs(jacobian->getval(k, j))) { for (n = 0; n < react->num_species_involved; n++) { - temp = m_get_val(jacobian, j, n); - m_set_val(jacobian, j, n, m_get_val(jacobian, k, n)); - m_set_val(jacobian, k, n, temp); + temp = jacobian->getval(j, n); + *jacobian->mep(j, n) = jacobian->getval(k, n); + *jacobian->mep(k, n) = temp; } } } @@ -382,28 +382,24 @@ void* ecs_do_reactions(void* dataptr) { for (j = 0; j < react->num_species_involved - 1; j++) { for (k = j + 1; k < react->num_species_involved; k++) { - ge_value = m_get_val(jacobian, k, j) / - m_get_val(jacobian, j, j); + ge_value = jacobian->getval(k, j) / jacobian->getval(j, j); for (n = 0; n < react->num_species_involved; n++) { - val_to_set = m_get_val(jacobian, k, n) - - ge_value * m_get_val(jacobian, j, n); - m_set_val(jacobian, k, n, val_to_set); + val_to_set = jacobian->getval(k, n) - + ge_value * jacobian->getval(j, n); + *jacobian->mep(k, n) = val_to_set; } - v_set_val(b, k, v_get_val(b, k) - ge_value * v_get_val(b, j)); + b[k] = b[k] - ge_value * b[j]; } } for (j = react->num_species_involved - 1; j + 1 > 0; j--) { - v_set_val(x, j, v_get_val(b, j)); + x[j] = b[j]; for (k = j + 1; k < react->num_species_involved; k++) { if (k != j) { - v_set_val(x, - j, - v_get_val(x, j) - - m_get_val(jacobian, j, k) * v_get_val(x, k)); + x[j] = x[j] - jacobian->getval(j, k) * x[k]; } } - v_set_val(x, j, v_get_val(x, j) / m_get_val(jacobian, j, j)); + x[j] = x[j] / jacobian->getval(j, j); } for (j = 0; j < react->num_species_involved; j++) { // I think this should be something like @@ -414,15 +410,11 @@ void* ecs_do_reactions(void* dataptr) { // react->species_indices[j][i] react->species_states[j][index] += // v_get_val(x,j); offset_idx = i + react->mc3d_indices_offsets[j]; - react->species_states[j][offset_idx] += v_get_val(x, j); + react->species_states[j][offset_idx] += x[j]; } } } } - m_free(jacobian); - v_free(b); - v_free(x); - px_free(pivot); SAFE_FREE(states_cache); SAFE_FREE(states_cache_dx); @@ -454,10 +446,10 @@ void* ecs_do_reactions(void* dataptr) { if (react->num_species_involved == 0) continue; /*allocate data structures*/ - jacobian = m_get(react->num_species_involved, react->num_species_involved); - b = v_get(react->num_species_involved); - x = v_get(react->num_species_involved); - pivot = px_get(jacobian->m); + jacobian = std::make_unique(react->num_species_involved, + react->num_species_involved); + b.resize(react->num_species_involved); + x.resize(react->num_species_involved); states_cache = (double*) malloc(sizeof(double) * react->num_species_involved); params_cache = (double*) malloc(sizeof(double) * react->num_params_involved); states_cache_dx = (double*) malloc(sizeof(double) * react->num_species_involved); @@ -471,7 +463,7 @@ void* ecs_do_reactions(void* dataptr) { states_cache[j] = react->species_states[j][i]; states_cache_dx[j] = react->species_states[j][i]; } - MEM_ZERO(results_array, react->num_species_involved * sizeof(double)); + memset(results_array, 0, react->num_species_involved * sizeof(double)); for (k = 0; j < react->num_species_involved + react->num_params_involved; k++, j++) { params_cache[k] = react->species_states[j][i]; @@ -480,32 +472,31 @@ void* ecs_do_reactions(void* dataptr) { for (j = 0; j < react->num_species_involved; j++) { states_cache_dx[j] += dx; - MEM_ZERO(results_array_dx, - react->num_species_involved * sizeof(double)); + memset(results_array_dx, + 0, + react->num_species_involved * sizeof(double)); react->reaction(states_cache_dx, params_cache, results_array_dx, NULL); - v_set_val(b, j, dt * results_array[j]); + b[j] = dt * results_array[j]; for (k = 0; k < react->num_species_involved; k++) { pd = (results_array_dx[k] - results_array[k]) / dx; - m_set_val(jacobian, k, j, (j == k) - dt * pd); + *jacobian->mep(k, j) = (j == k) - dt * pd; } states_cache_dx[j] -= dx; } // solve for x if (react->num_species_involved == 1) { - react->species_states[0][i] += v_get_val(b, 0) / - m_get_val(jacobian, 0, 0); + react->species_states[0][i] += b[0] / jacobian->getval(0, 0); } else { // find entry in leftmost column with largest absolute value // Pivot for (j = 0; j < react->num_species_involved; j++) { for (k = j + 1; k < react->num_species_involved; k++) { - if (abs(m_get_val(jacobian, j, j)) < - abs(m_get_val(jacobian, k, j))) { + if (abs(jacobian->getval(j, j)) < abs(jacobian->getval(k, j))) { for (n = 0; n < react->num_species_involved; n++) { - temp = m_get_val(jacobian, j, n); - m_set_val(jacobian, j, n, m_get_val(jacobian, k, n)); - m_set_val(jacobian, k, n, temp); + temp = jacobian->getval(j, n); + *jacobian->mep(j, n) = jacobian->getval(k, n); + *jacobian->mep(k, n) = temp; } } } @@ -513,46 +504,38 @@ void* ecs_do_reactions(void* dataptr) { for (j = 0; j < react->num_species_involved - 1; j++) { for (k = j + 1; k < react->num_species_involved; k++) { - ge_value = m_get_val(jacobian, k, j) / - m_get_val(jacobian, j, j); + ge_value = jacobian->getval(k, j) / jacobian->getval(j, j); for (n = 0; n < react->num_species_involved; n++) { - val_to_set = m_get_val(jacobian, k, n) - - ge_value * m_get_val(jacobian, j, n); - m_set_val(jacobian, k, n, val_to_set); + val_to_set = jacobian->getval(k, n) - + ge_value * jacobian->getval(j, n); + *jacobian->mep(k, n) = val_to_set; } - v_set_val(b, k, v_get_val(b, k) - ge_value * v_get_val(b, j)); + b[k] = b[k] - ge_value * b[j]; } } for (j = react->num_species_involved - 1; j + 1 > 0; j--) { - v_set_val(x, j, v_get_val(b, j)); + x[j] = b[j]; for (k = j + 1; k < react->num_species_involved; k++) { if (k != j) { - v_set_val(x, - j, - v_get_val(x, j) - - m_get_val(jacobian, j, k) * v_get_val(x, k)); + x[j] = x[j] - jacobian->getval(j, k) * x[k]; } } - v_set_val(x, j, v_get_val(x, j) / m_get_val(jacobian, j, j)); + x[j] = x[j] / jacobian->getval(j, j); } for (j = 0; j < react->num_species_involved; j++) { // I think this should be something like - // react->species_states[j][mc3d_indices[i]] += v_get_val(x,j); + // react->species_states[j][mc3d_indices[i]] += x[j]; // Since the grid has a uniform discretization, the mc3d_indices // should be the same length. So just need to access the correct // mc3d_indices[i] maybe do two lines?: index = // react->species_indices[j][i] react->species_states[j][index] += - // v_get_val(x,j); - react->species_states[j][i] += v_get_val(x, j); + // x[j]; + react->species_states[j][i] += x[j]; } } } } - m_free(jacobian); - v_free(b); - v_free(x); - px_free(pivot); SAFE_FREE(states_cache); SAFE_FREE(states_cache_dx); @@ -599,7 +582,7 @@ void _fadvance_fixed_step_3D(void) { run_threaded_reactions(threaded_reactions_tasks); for (id = 0, grid = Parallel_grids[0]; grid != NULL; grid = grid->next, id++) { - MEM_ZERO(grid->states_cur, sizeof(double) * grid->size_x * grid->size_y * grid->size_z); + memset(grid->states_cur, 0, sizeof(double) * grid->size_x * grid->size_y * grid->size_z); g = dynamic_cast(grid); if (g) g->do_multicompartment_reactions(NULL); diff --git a/src/nrnpython/rxd_intracellular.cpp b/src/nrnpython/rxd_intracellular.cpp index ca95795c54..093cb728de 100644 --- a/src/nrnpython/rxd_intracellular.cpp +++ b/src/nrnpython/rxd_intracellular.cpp @@ -5,7 +5,9 @@ #include "grids.h" #include "rxd.h" #include +#ifdef HAVE_UNISTD_H #include +#endif #include diff --git a/src/oc/axis.cpp b/src/oc/axis.cpp index e54aa53c12..1045c3838d 100644 --- a/src/oc/axis.cpp +++ b/src/oc/axis.cpp @@ -137,10 +137,6 @@ axis.cpp,v #include "hoc.h" #include "gui-redirect.h" - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - #define CLIP 1e9 #define XS 500. #define YS 400. diff --git a/src/oc/code.cpp b/src/oc/code.cpp index c137f017e7..f3d8a58db4 100644 --- a/src/oc/code.cpp +++ b/src/oc/code.cpp @@ -17,6 +17,7 @@ #include #include #include "nrnfilewrap.h" +#include "utils/enumerate.h" #include "options.h" @@ -61,7 +62,7 @@ using StackDatum = std::variant; /** @brief The stack. @@ -262,12 +263,8 @@ T const& hoc_look_inside_stack(int i) { } template double const& hoc_look_inside_stack(int); template Symbol* const& hoc_look_inside_stack(int); -template int const& hoc_look_inside_stack(int); template Object** const& hoc_look_inside_stack(int); template Object* const& hoc_look_inside_stack(int); -template char** const& hoc_look_inside_stack(int); -template double* const& hoc_look_inside_stack(int); -template std::nullptr_t const& hoc_look_inside_stack(int); namespace { bool stack_entry_is_tmpobject(StackDatum const& entry) { return std::holds_alternative(entry); @@ -288,7 +285,7 @@ void unref_if_tmpobject(StackDatum& entry) { int get_legacy_int_type(StackDatum const& entry) { if (std::holds_alternative(entry)) { return STRING; - } else if (std::holds_alternative(entry)) { + } else if (std::holds_alternative(entry)) { return VAR; } else if (std::holds_alternative(entry)) { return NUMBER; @@ -402,15 +399,13 @@ static void frame_objauto_recover_on_err(Frame* ff) { /* only on error */ static void stack_obtmp_recover_on_err(int tcnt) { if (tobj_count > tcnt) { // unref tmpobjects from the top of the stack until we have the right number left - for (auto stkp = stack.rbegin(); stkp != stack.rend(); ++stkp) { - // What is the index in `stack` of `stkp`? - auto const index = stack.size() - 1 - std::distance(stack.rbegin(), stkp); - if (stack_entry_is_tmpobject(*stkp)) { - hoc_stkobj_unref(std::get(*stkp), index); + for (const auto&& [index, stkp]: renumerate(stack)) { + if (stack_entry_is_tmpobject(stkp)) { + hoc_stkobj_unref(std::get(stkp), index); if (tobj_count == tcnt) { return; } - } else if (std::holds_alternative(*stkp)) { + } else if (std::holds_alternative(stkp)) { printf("OBJECTTMP at stack index %ld already unreffed\n", index); } } @@ -443,7 +438,7 @@ void hoc_prstack() { std::size_t i{}; std::ostringstream oss; oss << "interpreter stack: " << stack.size() << '\n'; - for (auto stkp = stack.rbegin(); stkp != stack.rend(); ++stkp, ++i) { + for (auto&& stkp: reverse(stack)) { if (i > 10) { oss << " ...\n"; break; @@ -458,7 +453,8 @@ void hoc_prstack() { } oss << ' ' << cxx_demangle(typeid(decltype(value)).name()) << '\n'; }, - *stkp); + stkp); + ++i; } Printf(oss.str().c_str()); } @@ -836,7 +832,9 @@ void hoc_push_string() { // push double pointer onto stack void hoc_pushpx(double* d) { - push_value(d); + // Trying to promote a raw pointer to a data handle is expensive, so don't + // do that every time we push a double* onto the stack. + hoc_push(neuron::container::data_handle{neuron::container::do_not_search, d}); } // push symbol pointer onto stack @@ -849,6 +847,10 @@ void hoc_pushi(int d) { push_value(d); } +void hoc_push(neuron::container::generic_data_handle handle) { + push_value(std::move(handle)); +} + /* push index onto stack */ void hoc_push_ndim(int d) { push_value(stack_ndim_datum(d)); @@ -902,9 +904,24 @@ double hoc_xpop() { return pop_value(); } +namespace neuron { +/** @brief hoc_get_arg() + */ +container::generic_data_handle oc::detail::hoc_get_arg_helper::impl( + std::size_t narg) { + return cast(get_argument(narg)); +} +/** @brief hoc_pop() + */ +container::generic_data_handle oc::detail::hoc_pop_helper::impl() { + return pop_value(); +} +} // namespace neuron + // pop double pointer and return top elem from stack double* hoc_pxpop() { - return pop_value(); + // All pointers are actually stored on the stack as data handles + return static_cast(hoc_pop>()); } // pop symbol pointer and return top elem from stack @@ -1010,19 +1027,6 @@ void forcode(void) { pc = relative(savepc + 1); /* next statement */ } -static void warn_assign_dynam_unit(const char* name) { - static int first = 1; - if (first) { - char mes[100]; - first = 0; - Sprintf(mes, - "Assignment to %s physical constant %s", - _nrnunit_use_legacy_ ? "legacy" : "modern", - name); - hoc_warning(mes, NULL); - } -} - void hoc_shortfor(void) { Inst* savepc = pc; double begin, end, *pval = 0; @@ -1042,9 +1046,6 @@ void hoc_shortfor(void) { execerror("integer iteration variable", sym->name); } else if (sym->subtype == USERDOUBLE) { pval = sym->u.pval; - } else if (sym->subtype == DYNAMICUNITS) { - pval = sym->u.pval + _nrnunit_use_legacy_; - warn_assign_dynam_unit(sym->name); } else { pval = OPVAL(sym); } @@ -1205,9 +1206,6 @@ static void for_segment2(Symbol* sym, int mode) { execerror("integer iteration variable", sym->name); } else if (sym->subtype == USERDOUBLE) { pval = sym->u.pval; - } else if (sym->subtype == DYNAMICUNITS) { - pval = sym->u.pval + _nrnunit_use_legacy_; - warn_assign_dynam_unit(sym->name); } else { pval = OPVAL(sym); } @@ -1586,17 +1584,18 @@ void hoc_Argtype() { itype = -1; } else { auto const& entry = f->argn[iarg - f->nargs]; - itype = std::visit(overloaded{[](double) { return 0; }, - [](Object*) { return 1; }, - [](Object**) { return 1; }, - [](char**) { return 2; }, - [](double*) { return 3; }, - [](auto const& x) -> int { - throw std::runtime_error( - "hoc_Argtype didn't expect argument of type " + - cxx_demangle(typeid(decltype(x)).name())); - }}, - entry); + itype = + std::visit(overloaded{[](double) { return 0; }, + [](Object*) { return 1; }, + [](Object**) { return 1; }, + [](char**) { return 2; }, + [](neuron::container::generic_data_handle const&) { return 3; }, + [](auto const& x) -> int { + throw std::runtime_error( + "hoc_Argtype didn't expect argument of type " + + cxx_demangle(typeid(decltype(x)).name())); + }}, + entry); } hoc_retpushx(itype); } @@ -1634,12 +1633,6 @@ char** hoc_pgargstr(int narg) { get_argument(narg)); } -// return pointer to nth argument -double* hoc_pgetarg(int narg) { - auto const& arg_entry = get_argument(narg); - return cast(arg_entry); -} - // return pointer to nth argument double* hoc_getarg(int narg) { auto& arg_entry = get_argument(narg); @@ -1863,9 +1856,6 @@ void eval(void) /* evaluate variable on stack */ case USERINT: d = (double) (*(sym->u.pvalint)); break; - case DYNAMICUNITS: - d = sym->u.pval[_nrnunit_use_legacy_]; - break; case USERPROPERTY: d = cable_prop_eval(sym); break; @@ -1947,9 +1937,6 @@ void hoc_evalpointer() { case USERFLOAT: execerror("can use pointer only to doubles", sym->name); break; - case DYNAMICUNITS: - d = sym->u.pval + _nrnunit_use_legacy_; - break; case USERPROPERTY: d = cable_prop_eval_pointer(sym); break; @@ -2226,13 +2213,6 @@ void hoc_assign() { } *(sym->u.pvalfloat) = (float) (d2); break; - case DYNAMICUNITS: - if (op) { - d2 = hoc_opasgn(op, sym->u.pval[_nrnunit_use_legacy_], d2); - } - sym->u.pval[_nrnunit_use_legacy_] = (float) (d2); - warn_assign_dynam_unit(sym->name); - break; default: if (op) { d2 = hoc_opasgn(op, *(OPVAL(sym)), d2); @@ -2573,7 +2553,7 @@ void insertcode(Inst* begin, Inst* end, Pfrv f) { } } -#if defined(DOS) || defined(WIN32) || (MAC && !defined(DARWIN)) +#if defined(DOS) || defined(WIN32) static int ntimes; #endif @@ -2591,14 +2571,6 @@ void execute(Inst* p) /* run the machine */ #endif } #endif - -#if MAC && !defined(DARWIN) - /* there is significant overhead here */ - if (++ntimes > 100) { - ntimes = 0; - hoc_check_intupt(1); - } -#endif if (hoc_intset) execerror("interrupted", (char*) 0); /* (*((pc++)->pf))(); DEC 5000 increments pc after the return!*/ diff --git a/src/oc/code.h b/src/oc/code.h index cf280b4ee8..7ecfbb0c82 100644 --- a/src/oc/code.h +++ b/src/oc/code.h @@ -38,6 +38,7 @@ extern void hoc_autoobject(void), hocobjret(void), hoc_newobj_ret(void); extern void connectsection(void), add_section(void), range_const(void), range_interpolate(void); extern void clear_sectionlist(void), install_sectionlist(void); extern void rangevareval(void), sec_access(void), mech_access(void); +extern void rangeobjeval(void), rangeobjevalmiddle(void); extern void for_segment(void), for_segment1(void); extern void sec_access_temp(void), sec_access_push(void), sec_access_pop(void); extern void rangepoint(void), forall_section(void), hoc_ifsec(void); diff --git a/src/oc/code2.cpp b/src/oc/code2.cpp index b8da4deb1e..3e804d103c 100644 --- a/src/oc/code2.cpp +++ b/src/oc/code2.cpp @@ -7,7 +7,9 @@ #include "hocparse.h" #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include #include #include "nrnfilewrap.h" @@ -698,32 +700,32 @@ Symbol* hoc_parse_stmt(const char* str, Symlist** psymlist) { return sp; } +/** + * @brief Executing hoc_pointer(&var) will put the address of the variable in this location. + */ +neuron::container::data_handle hoc_varhandle; -extern double* hoc_varpointer; +void hoc_pointer() { + hoc_varhandle = hoc_hgetarg(1); + hoc_ret(); + hoc_pushx(1.); +} -void hoc_pointer(void) { - extern double* hoc_pgetarg(int); - hoc_varpointer = hoc_pgetarg(1); - ret(); - pushx(1.); +neuron::container::data_handle hoc_val_handle(std::string_view s) { + constexpr std::string_view prefix{"{hoc_pointer_(&"}, suffix{")}\n"}; + std::string code; + code.reserve(prefix.size() + suffix.size() + s.size()); + code.append(prefix); + code.append(s); + code.append(")}\n"); + hoc_varhandle = {}; + auto const status = hoc_oc(code.c_str()); + assert(status == 0); + return hoc_varhandle; } double* hoc_val_pointer(const char* s) { - char buf[BUFSIZ]; - hoc_varpointer = 0; - if (strlen(s) > BUFSIZ - 20) { - HocStr* buf; - buf = hocstr_create(strlen(s) + 20); - std::snprintf(buf->buf, buf->size + 1, "{hoc_pointer_(&%s)}\n", s); - auto const code = hoc_oc(buf->buf); - assert(code == 0); - hocstr_delete(buf); - } else { - Sprintf(buf, "{hoc_pointer_(&%s)}\n", s); - auto const code = hoc_oc(buf); - assert(code == 0); - } - return hoc_varpointer; + return static_cast(hoc_val_handle(s)); } void hoc_name_declared(void) { diff --git a/src/oc/fileio.cpp b/src/oc/fileio.cpp index c69a1d2cb3..455d0220a1 100644 --- a/src/oc/fileio.cpp +++ b/src/oc/fileio.cpp @@ -4,7 +4,9 @@ #include #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include "hoc.h" #include "ocmisc.h" #include "hocstr.h" @@ -20,21 +22,6 @@ extern char* neuron_home; NrnFILEWrap* frin; FILE* fout; -#if 0 && MAC -#include - -void debugfile(const char* format, ...) { - va_list args; - static FILE* df; - if (!df) { - df = fopen("debugfile", "w"); - } - va_start(args, format); - vfprintf(df, format, args); - fflush(df); -} -#endif - void hoc_stdout(void) { static int prev = -1; if (ifarg(1)) { @@ -159,39 +146,6 @@ const char* expand_env_var(const char* s) { } } *cp2 = '\0'; -#if MAC && !defined(DARWIN) - /* convert / to : */ - for (cp1 = hs->buf + begin; *cp1; ++cp1) { - if (*cp1 == '/') { - *cp1 = ':'; - } - } - /* if a : in the original name then assume already mac and done */ - /* if $ in original name then done since NEURONHOME already has correct prefix */ - /* if first is : then remove it, otherwise prepend it */ - if (!strchr(s, ':') && !strchr(s, '$')) { - if (hs->buf[begin] == ':') { - begin = 2; - } else { - begin = 0; - hs->buf[0] = ':'; - } - } - for (cp1 = hs->buf + begin, cp2 = cp1; *cp1;) { - if (cp1[0] == ':' && cp1[1] == '.') { - if (cp1[2] == ':') { - cp1 += 2; - continue; - } else if (cp1[2] == '.' && cp1[3] == ':') { - cp1 += 3; - *cp2++ = ':'; - continue; - } - } - *cp2++ = *cp1++; - } - *cp2 = '\0'; -#endif return hs->buf + begin; } @@ -232,11 +186,7 @@ int hoc_xopen1(const char* name, const char* rcs) { errno = EINTR; while (errno == EINTR) { errno = 0; -#if MAC - constexpr auto mode_str = "rb"; -#else constexpr auto mode_str = "r"; -#endif if (!(hoc_fin = nrn_fw_fopen(fname.c_str(), mode_str))) { fname = expand_env_var(fname.c_str()); if (!(hoc_fin = nrn_fw_fopen(fname.c_str(), mode_str))) { @@ -524,8 +474,8 @@ void hoc_sprint1(char** ppbuf, int argn) { /* convert args to right type for con *ppbuf = hs->buf; } -#if defined(WIN32) || defined(MAC) -static FILE* oc_popen(char* cmd, char* type) { +#if defined(WIN32) +static FILE* oc_popen(char const* const cmd, char const* const type) { FILE* fp; char buf[1024]; assert(strlen(cmd) + 20 < 1024); @@ -668,8 +618,7 @@ static int hoc_Load_file(int always, const char* name) { path[0] = '\0'; /* otherwise find the file in the default directories */ f = fopen(base, "r"); /* cwd */ -#if !MAC - if (!f) { /* try HOC_LIBRARY_PATH */ + if (!f) { /* try HOC_LIBRARY_PATH */ char* hlp; hlp = getenv("HOC_LIBRARY_PATH"); while (hlp && *hlp) { @@ -700,7 +649,6 @@ static int hoc_Load_file(int always, const char* name) { } } } -#endif if (!f) { /* try NEURONHOME/lib/hoc */ Sprintf(path, "$(NEURONHOME)/lib/hoc"); assert(strlen(path) + strlen(base) + 1 < hoc_load_file_size_); @@ -770,23 +718,16 @@ void hoc_getcwd(void) { { strcpy(buf, hoc_back2forward(buf)); } #endif len = strlen(buf); -#if defined(MAC) - if (buf[len - 1] != ':') { - buf[len] = ':'; - buf[len + 1] = '\0'; - } -#else if (buf[len - 1] != '/') { buf[len] = '/'; buf[len + 1] = '\0'; } -#endif hoc_ret(); hoc_pushstr(&buf); } void hoc_machine_name(void) { -#if !defined(WIN32) && !defined(MAC) +#if !defined(WIN32) /*----- functions called -----*/ /*----- local variables -----*/ char buf[20]; @@ -813,10 +754,8 @@ static int (*nrnpy_pr_stdoe_callback)(int, char*); static int (*nrnpy_pass_callback)(); extern "C" void nrnpy_set_pr_etal(int (*cbpr_stdoe)(int, char*), int (*cbpass)()) { - if (nrn_is_python_extension) { - nrnpy_pr_stdoe_callback = cbpr_stdoe; - nrnpy_pass_callback = cbpass; - } + nrnpy_pr_stdoe_callback = cbpr_stdoe; + nrnpy_pass_callback = cbpass; } void nrnpy_pass() { @@ -897,7 +836,7 @@ int Fprintf(FILE* stream, const char* fmt, ...) { } /** printf style specification of hoc_execerror message. (512 char limit) **/ -void hoc_execerr_ext(const char* fmt, ...) { +[[noreturn]] void hoc_execerr_ext(const char* fmt, ...) { int size; // vsnprintf returns -1 on error. va_list ap; diff --git a/src/oc/fmenu.cpp b/src/oc/fmenu.cpp deleted file mode 100644 index 9686df22d9..0000000000 --- a/src/oc/fmenu.cpp +++ /dev/null @@ -1,628 +0,0 @@ -#include <../../nrnconf.h> -/* /local/src/master/nrn/src/oc/fmenu.c,v 1.4 1996/02/16 16:19:25 hines Exp */ -/* -fmenu.c,v - * Revision 1.4 1996/02/16 16:19:25 hines - * OCSMALL used to throw out things not needed by teaching programs - * - * Revision 1.3 1995/07/22 13:01:47 hines - * avoid unhandled exceptions in mswindows due to some function stubs - * - * Revision 1.2 1994/11/23 19:52:57 hines - * all nrnoc works in dos with go32 - * - * Revision 1.1.1.1 1994/10/12 17:22:08 hines - * NEURON 3.0 distribution - * - * Revision 2.19 93/02/02 10:34:25 hines - * static functions declared before used - * - * Revision 1.3 92/08/18 07:31:36 hines - * arrays in different objects can have different sizes. - * Now one uses araypt(symbol, SYMBOL) or araypt(symbol, OBJECTVAR) to - * return index of an array variable. - * - * Revision 1.2 91/10/14 17:36:08 hines - * scaffolding for oop in place. Syntax about right. No action yet. - * - * Revision 1.1 91/10/11 11:12:01 hines - * Initial revision - * - * Revision 4.20 91/04/03 16:01:33 hines - * mistyped || - * - * Revision 3.77 90/07/20 09:45:49 hines - * case 3 allows actions to be executed when variable is changed - * - * Revision 3.50 90/02/17 10:12:25 mlh - * lint free on sparc and makfile good for sparc - * - * Revision 3.44 90/01/05 14:57:24 mlh - * min and max along with Jamie's changes that allow person to match - * upper char by typing lower char (works only with turboc) - * - * Revision 3.41 89/12/08 15:34:47 mlh - * infinite loop when searching for non-existent character starting - * at first menu item. - * Corrected with do{}while control structure. - * - * Revision 3.20 89/08/15 08:29:42 mlh - * compiles under turbo-c 1.5 -- some significant bugs found - * - * Revision 3.7 89/07/13 08:21:26 mlh - * stack functions involve specific types instead of Datum - * - * Revision 3.4 89/07/12 10:26:55 mlh - * Lint free - * - * Revision 3.3 89/07/10 15:45:56 mlh - * Lint pass1 is silent. Inst structure changed to union. - * - * Revision 2.0 89/07/07 11:36:43 mlh - * *** empty log message *** - * - * Revision 1.1 89/07/07 11:15:57 mlh - * Initial revision - * -*/ - -/* Copyright 1989,88,87- M.L. Hines, Neurobiology Dept.,DUMC, Durham, NC - * - * REVISION HISTORY: - * - * 5-19-89 let return start entry of number - * 5-04-89 Get it going on the SUN. - * 4-17-89 Added if HOC if FOCEXT statements to distinguish between FOCAL - * and HOC versions. - * - * Synopsis fmenu is a FOCAL menu management function - * - * x fmenu(args) where the following options are available: - * - * fmenu(nmenu,-1) allocates space for nmenu number of menus - * Menu identifier numbers start at 0,1,...nmenu-1 - * fmenu(imenu,0) erase previous menu identified by imenu. - * - * fmenu(imenu,1,var list) add variables specified in list - * to imenu. The variable names will - * be added sequentially in the order - * specified. - * - * fmenu(imenu,2,"prompt","command") add the executable command - * specified by a prompt,command pair - * to imenu. - * - * fmenu(imenu) executes menu imenu, displays, - * navigates through imenu. - * - * - * Version 1.0 written by M.V. Evans and M.L. Hines 4-12-89 - */ - -#include - -#if DOS -#include -#include -union REGS regs; -extern int egagrph; /* detect if in graphics mode */ -#else -static int egagrph = 0; -#endif -#include "hoc.h" -#include -#define Ret(a) \ - hoc_ret(); \ - hoc_pushx(a) -#define SPACE '\040' -#define BEEP Printf("\007") -/* structure and functions from getsym.c */ -#include "hocgetsym.h" - -/* Structure for single menu list */ -typedef struct Menuitem { - struct Menuitem* pprevious; /* Pointer to a previous item */ - short row, col; /* Coordinates of each menu item */ - short type; - char* prompt; /* prompt, command used for actions */ - char* command; - Psym* psym; - double symmin; /* min and max value for sym */ - double symmax; - struct Menuitem* nextitem; /* Pointer to next menu item */ -} Menuitem; - -/* menu types */ -#define MENU_VAR 1 -#define MENU_ACTION 2 - -/* Summary of menu functions : - insert_menu(int r, int c, sym *sp ) - Stores row and col coordinates, and the symbol pointer -specified in fcursor() as an item in a structure list. Successive items are appended at the end of -the list. This function returns the pointer for the beginning of the list. display_menu(Menuitem -*menu) - Displays the whole menu as specified by successive calls to fcursor. Recovers information -stored for each menu item and prints the corresponding item variable name and value at its specified -position. destroy_menu(Menuitem *menu) - Frees the space allocated for the whole menu list. - navigate_menu(Menuitem *menu) - Allows user to move around the displayed menu by making use of -the arrow keys. erase_item(Menuitem *pnow) - Erases second line contents in menu -*/ - - -static int cexecute(const char*); -static const char* navigate(int); -static Menuitem* append(); /*common code for appendsym,appendaction*/ -static void appendvar(int, const char*, const char*); -static void appendaction(int, const char*, const char*); -static void destroy(int); -static double enter(int, int, double, int, Menuitem*); -static void prval(int, int, int, double); -static void prs(int, int, int, const char*); -static void undisplay(int); - -/* Structure pointer summary: - *pprev - pointer to previous item structure - *pnow - pointer to current item structure - *nextitem - pointer to next item - forward link - *pprevious - pointer to previous item - reverse link - */ - -static int current_menu = -1; /* current menu number */ -static int maxmenus; -static Menuitem** menusfirst; /* pointers to first menuitem in list*/ -static Menuitem** menuslast; /* pointers to last menuitem in list*/ -static Menuitem** menuscurrent; /* pointers to where navigate starts*/ -static int first = 1; /* emacs_term has not been opened */ - -#define diag(s) hoc_execerror(s, (char*) 0); -#define chk(i) \ - { \ - if (i < 0 || i >= maxmenus) \ - diag("menu number out of range"); \ - } -static void menu_manager(int nmenu) { - int previous; - const char* command; - previous = current_menu; - current_menu = nmenu; - if (previous >= 0) { - undisplay(previous); - } else { - undisplay(current_menu); - } - while ((command = navigate(current_menu)) != (char*) 0) { - if (cexecute(command) == 4) { /* 4 means stop was executed */ - break; - } - } - if (previous >= 0) { - undisplay(current_menu); - } - current_menu = previous; -} - -void hoc_fmenu(void) { - int imenu, flag, i, narg; -#ifdef WIN32 - hoc_execerror("fmenu not available under mswindows.", "Use xpanel series"); -#endif - imenu = *getarg(1); - if (!ifarg(2)) { /* navigate the menu */ - chk(imenu); - menu_manager(imenu); - Ret(0.); - return; - } - flag = *getarg(2); - narg = 2; - switch (flag) { - case -1: - if (current_menu != -1) { - diag("can't destroy current menu"); - } - if (maxmenus) { - for (i = 0; i < maxmenus; i++) { - destroy(i); - } - free((char*) menusfirst); - free((char*) menuslast); - } - maxmenus = 0; - menusfirst = (Menuitem**) emalloc((unsigned) (imenu * sizeof(Menuitem*))); - menuslast = (Menuitem**) emalloc((unsigned) (imenu * sizeof(Menuitem*))); - menuscurrent = (Menuitem**) emalloc((unsigned) (imenu * sizeof(Menuitem*))); - - maxmenus = imenu; - for (i = 0; i < maxmenus; i++) { - menusfirst[i] = menuslast[i] = menuscurrent[i] = (Menuitem*) 0; - } - break; - case 0: - chk(imenu); - if (current_menu == imenu) { - diag(" can't destroy current menu"); - } - destroy(imenu); - appendaction(imenu, "Exit", "stop"); - break; - case 1: - while (ifarg(narg = narg + 1)) { - appendvar(imenu, gargstr(narg), (char*) 0); - menuslast[imenu]->symmin = *getarg(narg = narg + 1); - menuslast[imenu]->symmax = *getarg(narg = narg + 1); - } - break; - case 2: - while (ifarg(narg = narg + 1)) { - char *prompt, *command; - prompt = gargstr(narg); - command = gargstr(narg = narg + 1); - appendaction(imenu, prompt, command); - } - break; - case 3: - while (ifarg(narg = narg + 1)) { - appendvar(imenu, gargstr(narg), gargstr(narg + 1)); - menuslast[imenu]->symmin = *getarg(narg = narg + 2); - menuslast[imenu]->symmax = *getarg(narg = narg + 1); - } - break; - default: - diag("illegal argument flag"); - break; - } - Ret(0.); -} - -static void xcursor(int r, int c) { -#if DOS - _BH = 0; - _DH = r; - _DL = c; - _AH = 2; - geninterrupt(0x10); -#endif -} - -static int ibmgetc(void) { /* Copied from ibm.c file in memacs */ -#if DOS - regs.h.ah = 7; - intdos(®s, ®s); - return (int) regs.h.al; -#else - return 0; -#endif -} - -static Menuitem* append(int imenu) { - Menuitem *last, *pnow; - - if (imenu < 0 || imenu >= maxmenus) { - diag("menu number out of range"); - } - last = menuslast[imenu]; - pnow = (Menuitem*) emalloc(sizeof(Menuitem)); - - pnow->pprevious = last; - pnow->nextitem = (Menuitem*) 0; - menuslast[imenu] = pnow; - - if (last) { - int col = last->col, row = last->row; - last->nextitem = pnow; - col += 13; - if (col > 77) { - row += 2; - col = 0; - } - pnow->row = row; - pnow->col = col; - } else { - menusfirst[imenu] = pnow; - pnow->row = 0; - pnow->col = 0; - menuscurrent[imenu] = pnow; - } - - pnow->type = 0; - pnow->prompt = (char*) 0; - pnow->command = (char*) 0; - pnow->psym = (Psym*) 0; - return (pnow); -} - -static void appendvar(int imenu, const char* variable, const char* command) { - Menuitem* item; - int i, len; - Psym* p; - - item = append(imenu); - item->type = MENU_VAR; - item->psym = p = hoc_getsym(variable); - if (command) { - item->command = (char*) emalloc((unsigned) (strlen(command) + 1)); - Strcpy(item->command, command); - } else { - item->command = (char*) 0; - } - std::string buf{p->sym->name}; - for (i = 0; i < p->nsub; i++) { - buf.append(1, '['); - buf.append(std::to_string(p->sub[i])); - buf.append(1, ']'); - } - item->prompt = static_cast(emalloc(buf.size() + 1)); - Strcpy(item->prompt, buf.c_str()); -} - -static void appendaction(int imenu, const char* prompt, const char* command) { - Menuitem* item; - item = append(imenu); - item->type = MENU_ACTION; - item->prompt = (char*) emalloc((unsigned) (strlen(prompt) + 1)); - Strcpy(item->prompt, prompt); - item->command = (char*) emalloc((unsigned) (strlen(command) + 1)); - Strcpy(item->command, command); -} - -static void display(int imenu) { - Menuitem *menu, *pnow; - int row, col; - - chk(imenu); - menu = menusfirst[imenu]; - for (pnow = menu; pnow; pnow = pnow->nextitem) { - row = pnow->row; - col = pnow->col; - prs(0, row, col, pnow->prompt); - switch (pnow->type) { - case MENU_VAR: - prval(0, row + 1, col, hoc_getsymval(pnow->psym)); - break; - } - } - xcursor(menuslast[imenu]->row + 2, 0); -} - - -static void destroy(int imenu) { - Menuitem* menu; - Menuitem *pnow, *nextitem; - - menu = menusfirst[imenu]; - menusfirst[imenu] = (Menuitem*) 0; - menuslast[imenu] = (Menuitem*) 0; - menuscurrent[imenu] = (Menuitem*) 0; - for (pnow = menu; pnow; pnow = nextitem) { - nextitem = pnow->nextitem; - if (pnow->prompt) { - free(pnow->prompt); - } - if (pnow->command) { - free((char*) pnow->command); - } - if (pnow->psym) { - free_arrayinfo(pnow->psym->arayinfo); - free((char*) pnow->psym); - } - free((char*) pnow); - } -} - -static const char* navigate(int imenu) { - Menuitem* menu; - Menuitem *pcur, *pnow; - int row, col, key, current_col; - double val; - - menu = menusfirst[imenu]; - if (menu == (Menuitem*) 0) { - return (char*) 0; - } - display(imenu); - pcur = menuscurrent[imenu]; - key = 0; - while (key != 27) { - pnow = pcur; /* pnow is fixed hereafter */ - row = pnow->row + 1; - col = pnow->col; - switch (pnow->type) { - case MENU_VAR: - val = hoc_getsymval(pnow->psym); - prval(1, row, col, val); - break; - case MENU_ACTION: - prs(1, row, col, "execute"); - break; - } - - key = ibmgetc(); - if (key == 27 || key == 3) - goto label; - if (key == 5) { - return "plt(-5)"; - } - if (key == 0) { /* arrow key pressed */ - key = ibmgetc(); - switch (key) { - case 77: /* Right arrow key */ - pcur = pcur->nextitem; - if (pcur == nullptr) - pcur = menu; - break; - case 75: /* Left arrow key */ - pcur = pcur->pprevious; - if (pcur == (Menuitem*) 0) { - pcur = menuslast[imenu]; - } - break; - case 80: /* Down key */ - current_col = pnow->col; - do { - pcur = pcur->nextitem; - if (pcur == (Menuitem*) 0) { - pcur = menu; - } - } while (pcur->col != current_col); - break; - case 72: /* Up key*/ - current_col = pnow->col; - do { - pcur = pcur->pprevious; - if (pcur == (Menuitem*) 0) { - pcur = menuslast[imenu]; - } - } while (pcur->col != current_col); - break; - default: - BEEP; - break; - } - } else if (pnow->type == MENU_VAR && - ((isdigit(key) || key == '-' || key == '+' || key == 015 || key == '.'))) { - prs(0, row, col, ""); - val = enter(row, col, val, key, pnow); - hoc_assignsym(pnow->psym, val); - if (pnow->command) { - prs(0, pnow->row + 1, pnow->col, "executing"); - xcursor(menuslast[imenu]->row + 2, 0); - - return pnow->command; - } - prval(1, row, col, val); - } else if (key == 015 && pnow->type == MENU_ACTION) { - prs(0, pnow->row + 1, pnow->col, "executing"); - xcursor(menuslast[imenu]->row + 2, 0); - - return pnow->command; - } else if (isalpha(key)) { - pcur = pnow; - do { - pcur = pcur->nextitem; - if (pcur == (Menuitem*) 0) { - pcur = menusfirst[imenu]; - } - if (toupper(pcur->prompt[0]) == toupper(key)) - break; - } while (pcur != pnow); - } else { - BEEP; - } - menuscurrent[imenu] = pcur; - - switch (pnow->type) { /* the old one */ - case MENU_VAR: - prval(0, row, col, val); - break; - case MENU_ACTION: - prs(0, row, col, ""); - break; - } - } -label: - xcursor(menuslast[imenu]->row + 2, 0); - return (char*) 0; -} - -static double enter(int row, int col, double defalt, int frstch, Menuitem* pnow) { - char istr[80], *istrptr; - int key; - double input; - char buf[10]; - - istrptr = &istr[0]; - xcursor(row, ++col); - if (frstch != 13) { - *istrptr++ = frstch; - Sprintf(buf, "%c", istr[0]); - plprint(buf); - } - for (;;) { - key = ibmgetc(); - if (isdigit(key) || key == '.' || key == 'e' || key == '-' || key == '+') { - Sprintf(buf, "%c", key); - plprint(buf); - *istrptr++ = key; - continue; - } else if (key == 27) { - return (defalt); - } else if (key == '\b') { - if (istrptr > istr) { - Printf("\b \b"); - *(--istrptr) = '\0'; - } - } else if (key == 13) { /*return*/ - *istrptr = '\0'; - if (sscanf(istr, "%lf", &input) == 1) - if (input >= pnow->symmin && input <= pnow->symmax) { - return (input); - } else { /* input out of range */ - BEEP; - Sprintf(istr, "Range %-5g", pnow->symmin); - prs(0, pnow->row, pnow->col, istr); - Sprintf(istr, "To %-5g ", pnow->symmax); - prs(0, pnow->row + 1, pnow->col, istr); - key = ibmgetc(); - BEEP; - prs(0, pnow->row, pnow->col, pnow->prompt); - - return (defalt); - } - else { - return (defalt); - } - } else { - BEEP; - continue; - } - } -} /* end of function enter */ - -static void prval(int oldnew, int row, int col, double val) { - char string[100]; - Sprintf(string, "%g", val); - prs(oldnew, row, col, string); -} - -static void prs(int oldnew, int row, int col, const char* string) { - char buf[100]; - xcursor(row, col); - if (oldnew == 0) { - Sprintf(buf, "%-13s", string); - plprint(buf); - } else { - Sprintf(buf, "%13c", SPACE); - plprint(buf); - xcursor(row, col); - snprintf(buf, 100, "<%s>", string); - plprint(buf); - } -} - -static int cexecute(const char* command) { - int i; - hoc_returning = 0; - hoc_execstr(command); - i = hoc_returning; - hoc_returning = 0; - return i; -} - -#if !DOS -static void clrscr(void) {} -#endif - -static void undisplay(int imenu) { - int i; - if (egagrph != 0) { - xcursor(menusfirst[imenu]->row, 0); - for (i = menuslast[imenu]->row - menusfirst[imenu]->row + 2; i; i--) { - Printf("%80c\n", SPACE); - } - } else { - clrscr(); - } -} - -void hoc_menu_cleanup(void) { - current_menu = -1; -} diff --git a/src/oc/functabl.cpp b/src/oc/functabl.cpp index b66b803adc..cb4f430419 100644 --- a/src/oc/functabl.cpp +++ b/src/oc/functabl.cpp @@ -3,7 +3,7 @@ struct TableArg { int nsize; - double* argvec; /* if nil use min,max */ + double* argvec; /* if nullptr use min,max */ double min; double max; double frac; /* temporary storage */ diff --git a/src/oc/hoc.cpp b/src/oc/hoc.cpp index 0d9d027188..297e50375b 100644 --- a/src/oc/hoc.cpp +++ b/src/oc/hoc.cpp @@ -1,12 +1,12 @@ #include <../../nrnconf.h> - -#include "../nrnpython/nrnpython_config.h" #include "hoc.h" #include "hocstr.h" #include "equation.h" #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include #include #include "parse.hpp" @@ -16,10 +16,12 @@ #include "ocfunc.h" #include "ocmisc.h" #include "nrnmpi.h" +#include "nrnpy.h" #include "nrnfilewrap.h" #include "../nrniv/backtrace_utils.h" #include +#include #include #include #include @@ -33,7 +35,6 @@ char** nrn_global_argv; #if defined(USE_PYTHON) int use_python_interpreter = 0; -int (*p_nrnpython_start)(int); void (*p_nrnpython_finalize)(); #endif int nrn_inpython_; @@ -124,10 +125,6 @@ void add_profile(int i) {} void pr_profile(void) {} #endif -#ifdef MAC -#define READLINE 0 -#endif - #if OCSMALL #define READLINE 0 #endif @@ -198,7 +195,7 @@ const char** gargv; /* global argument list */ int gargc; static int c = '\n'; /* global for use by warning() */ -#if defined(WIN32) || MAC +#if defined(WIN32) void set_intset() { hoc_intset++; } @@ -220,7 +217,7 @@ static int backslash(int c); exit(i); } -#if defined(WIN32) || defined(MAC) +#if defined(WIN32) #define HAS_SIGPIPE 0 #else #define HAS_SIGPIPE 1 @@ -643,7 +640,6 @@ int yystart; void hoc_execerror_mes(const char* s, const char* t, int prnt) { /* recover from run-time error */ hoc_in_yyparse = 0; yystart = 1; - hoc_menu_cleanup(); hoc_errno_check(); if (debug_message_ || prnt) { hoc_warning(s, t); @@ -786,7 +782,6 @@ RETSIGTYPE fpecatch(int sig) /* catch floating point exceptions */ execerror("Floating point exception.", (char*) 0); } -#if HAVE_SIGSEGV RETSIGTYPE sigsegvcatch(int sig) /* segmentation violation probably due to arg type error */ { Fprintf(stderr, "Segmentation violation\n"); @@ -797,7 +792,6 @@ RETSIGTYPE sigsegvcatch(int sig) /* segmentation violation probably due to arg t } execerror("Aborting.", (char*) 0); } -#endif #if HAVE_SIGBUS RETSIGTYPE sigbuscatch(int sig) { @@ -1006,8 +1000,8 @@ void inputReadyThread() { void hoc_final_exit(void) { char* buf; #if defined(USE_PYTHON) - if (p_nrnpython_start) { - (*p_nrnpython_start)(0); + if (neuron::python::methods.interpreter_start) { + neuron::python::methods.interpreter_start(0); } #endif bbs_done(); @@ -1016,7 +1010,7 @@ void hoc_final_exit(void) { /* Don't close the plots for the sub-processes when they finish, by default they are then closed when the master process ends */ hoc_close_plot(); -#if READLINE && !defined(MINGW) && !defined(MAC) +#if READLINE && !defined(MINGW) rl_deprep_terminal(); #endif ivoc_cleanup(); @@ -1095,18 +1089,6 @@ int hoc_moreinput() { #endif } #endif // WIN32 -#if MAC - if (gargc == 0) { - fin = nrn_fw_set_stdin(); - infile = 0; - hoc_xopen_file_[0] = 0; -#if defined(USE_PYTHON) - return use_python_interpreter ? 0 : 1; -#else - return 1; -#endif - } -#endif // MAC if (fin && !nrn_fw_eq(fin, stdin)) { IGNORE(nrn_fw_fclose(fin)); } @@ -1171,6 +1153,12 @@ int hoc_moreinput() { hpfi = hoc_print_first_instance; fin = (NrnFILEWrap*) 0; hoc_print_first_instance = 0; + // This is processing HOC code via -c on the commandline. That HOC code could include + // nrnpython(...), so the Python interpreter needs to be configured appropriately for + // that (i.e. sys.path[0] = ''). + if (neuron::python::methods.interpreter_set_path) { + neuron::python::methods.interpreter_set_path({}); + } err = hoc_oc(hs->buf); hoc_print_first_instance = hpfi; hocstr_delete(hs); @@ -1182,12 +1170,11 @@ int hoc_moreinput() { if (!p_nrnpy_pyrun) { hoc_execerror("Python not available to interpret", infile); } - (*p_nrnpy_pyrun)(infile); + if (!p_nrnpy_pyrun(infile)) { + hoc_execerror("Python error", infile); + } return hoc_moreinput(); } else if ((fin = nrn_fw_fopen(infile, "r")) == (NrnFILEWrap*) 0) { -#if OCSMALL - hoc_menu_cleanup(); -#endif Fprintf(stderr, "%d %s: can't open %s\n", nrnmpi_myid_world, progname, infile); #if NRNMPI if (nrnmpi_numprocs_world > 1) { @@ -1202,6 +1189,17 @@ int hoc_moreinput() { hoc_xopen_file_ = static_cast(erealloc(hoc_xopen_file_, hoc_xopen_file_size_)); } strcpy(hoc_xopen_file_, infile); + // This is, unfortunately rather implicitly, how we trigger execution of HOC files on a + // commandline like `nrniv a.hoc b.hoc`. To make HOC treatment similar to Python treatment + // we would pass hoc_xopen_file_ here, which would imply that nrnpython("...") inside + // test.hoc sees sys.path[0] == "/dir/" when we run `nrniv /dir/test.hoc`, however it seems + // that legacy models (183300) assume that sys.path[0] == '' in this context, so we stick + // with that. There is no particular reason to follow Python conventions when launching HOC + // scripts, in contrast to Python scripts where we strive to make `nrniv foo.py` and + // `python foo.py` behave in the same way. + if (neuron::python::methods.interpreter_set_path) { + neuron::python::methods.interpreter_set_path({}); + } } return 1; } @@ -1213,9 +1211,7 @@ static SignalType signals[4]; static void set_signals(void) { signals[0] = signal(SIGINT, onintr); signals[1] = signal(SIGFPE, fpecatch); -#if HAVE_SIGSEGV signals[2] = signal(SIGSEGV, sigsegvcatch); -#endif #if HAVE_SIGBUS signals[3] = signal(SIGBUS, sigbuscatch); #endif @@ -1224,9 +1220,7 @@ static void set_signals(void) { static void restore_signals(void) { signals[0] = signal(SIGINT, signals[0]); signals[1] = signal(SIGFPE, signals[1]); -#if HAVE_SIGSEGV signals[2] = signal(SIGSEGV, signals[2]); -#endif #if HAVE_SIGBUS signals[3] = signal(SIGBUS, signals[3]); #endif @@ -1530,9 +1524,6 @@ int hoc_yyparse(void) { #ifdef WIN32 #define INTERVIEWS 1 #endif -#ifdef MAC -#define INTERVIEWS 1 -#endif int hoc_interviews = 0; #if INTERVIEWS @@ -1615,7 +1606,6 @@ static int event_hook(void) { #endif /* READLINE */ #endif /* INTERVIEWS */ -#if 1 || MAC /* On Mac combinations of /n /r /r/n require binary mode (otherwise /r/n is /n/n) @@ -1671,55 +1661,7 @@ static CHAR* fgets_unlimited_nltrans(HocStr* bufstr, NrnFILEWrap* f, int nltrans } return (CHAR*) 0; } -#endif -#if MAC -int hoc_get_line(void) { /* supports re-entry. fill cbuf with next line */ - if (*ctp) { - hoc_execerror("Internal error:", "Not finished with previous input line"); - } - ctp = cbuf; - *ctp = '\0'; - if (pipeflag == 3) { - nrn_inputbuf_getline(); - if (*ctp == '\0') { - return EOF; - } - } else if (pipeflag) { - if (hoc_strgets_need() > hoc_cbufstr->size) { - hocstr_resize(hoc_cbufstr, hoc_strgets_need()); - } - if (hoc_strgets(cbuf, hoc_cbufstr->size - 1) == (char*) 0) { - return EOF; - } - } else { - if (nrn_fw_wrap(fin, stdin) && hoc_interviews && !hoc_in_yyparse) { -#if MAC - for (;;) { - extern CHAR* hoc_console_buffer; - hoc_console_buffer = cbuf; - if (run_til_stdin()) { - // printf("%s", cbuf); - // strcpy(cbuf, hoc_console_buffer); - break; - } else { - return EOF; - } - } -#endif - } else if (hoc_fgets_unlimited(hoc_cbufstr, fin) == (CHAR*) 0) { - return EOF; - } - } - // printf("%d %s", lineno, cbuf); - errno = 0; - lineno++; - ctp = cbuf = hoc_cbufstr->buf; - hoc_ictp = 0; - return 1; -} - -#else int hoc_get_line(void) { /* supports re-entry. fill cbuf with next line */ if (*ctp) { hoc_execerror("Internal error:", "Not finished with previous input line"); @@ -1801,12 +1743,12 @@ int hoc_get_line(void) { /* supports re-entry. fill cbuf with next line */ return EOF; } } -#else +#else // READLINE #if INTERVIEWS if (nrn_fw_eq(fin, stdin) && hoc_interviews && !hoc_in_yyparse) { run_til_stdin()); } -#endif +#endif // INTERVIEWS #if defined(WIN32) if (nrn_fw_eq(fin, stdin)) { if (gets(cbuf) == (char*) 0) { @@ -1815,13 +1757,13 @@ int hoc_get_line(void) { /* supports re-entry. fill cbuf with next line */ } strcat(cbuf, "\n"); } else -#endif +#endif // WIN32 { if (hoc_fgets_unlimited(hoc_cbufstr, fin) == (char*) 0) { return EOF; } } -#endif +#endif // READLINE } errno = 0; lineno++; @@ -1829,7 +1771,6 @@ int hoc_get_line(void) { /* supports re-entry. fill cbuf with next line */ hoc_ictp = 0; return 1; } -#endif void hoc_help(void) { #if INTERVIEWS diff --git a/src/oc/hoc_init.cpp b/src/oc/hoc_init.cpp index e4e2ea5dfb..17675d7753 100644 --- a/src/oc/hoc_init.cpp +++ b/src/oc/hoc_init.cpp @@ -5,11 +5,12 @@ #include "parse.hpp" #include #include "equation.h" -#include "nrnunits_modern.h" +#include "nrnunits.h" #include "nrn_ansi.h" #include "ocfunc.h" +#include "oc_mcran4.hpp" extern void hoc_nrnmpi_init(); @@ -19,25 +20,13 @@ extern int numprocs(), myproc(), psync(); #if 0 extern int hoc_co(); #endif -#if DOS || defined(WIN32) /*|| defined(MAC)*/ +#if DOS || defined(WIN32) extern double erf(), erfc(); /* supplied by unix */ #endif #if defined(WIN32) extern void hoc_winio_show(int b); #endif -#if MAC -static double Fabs(double x) { - return (x > 0.) ? x : -x; -} -static double Erf(double x) { - return erf(x); -} -static double Erfc(double x) { - return erfc(x); -} -#endif - static struct { /* Keywords */ const char* name; int kval; @@ -90,19 +79,11 @@ static struct { /* Constants */ {"GAMMA", 0.57721566490153286060}, /* Euler */ {"DEG", 57.29577951308232087680}, /* deg/radian */ {"PHI", 1.61803398874989484820}, /* golden ratio */ - {nullptr, 0}}; - -/* Nov, 2017, from https://physics.nist.gov/cuu/Constants/index.html */ -/* also see FARADAY and gasconstant in ../nrnoc/eion.c */ -static struct { /* Modern, Legacy units constants */ - const char* name; - double cval[2]; -} uconsts[] = {{"FARADAY", {_faraday_codata2018, 96485.309}}, /*coulombs/mole*/ - {"R", {_gasconstant_codata2018, 8.31441}}, /*molar gas constant, joules/mole/deg-K*/ - {"Avogadro_constant", - {_avogadro_number_codata2018, 6.02214129e23}}, /* note that the legacy value in + {"FARADAY", _faraday_codata2018}, /*coulombs/mole*/ + {"R", _gasconstant_codata2018}, /*molar gas constant, joules/mole/deg-K*/ + {"Avogadro_constant", _avogadro_number_codata2018}, /* note that the legacy value in nrnunits.lib.in is 6.022169+23 */ - {0, {0., 0.}}}; + {nullptr, 0}}; static struct { /* Built-ins */ const char* name; @@ -116,15 +97,9 @@ static struct { /* Built-ins */ {"exp", hoc1_Exp}, /* checks argument */ {"sqrt", Sqrt}, /* checks argument */ {"int", integer}, -#if MAC - {"abs", Fabs}, - {"erf", Erf}, - {"erfc", Erfc}, -#else {"abs", fabs}, {"erf", erf}, {"erfc", erfc}, -#endif {0, 0}}; static struct { /* Builtin functions with multiple or variable args */ const char* name; @@ -155,7 +130,6 @@ static struct { /* Builtin functions with multiple or variable args */ {"sprint", hoc_Sprint}, {"graph", hoc_Graph}, {"graphmode", hoc_Graphmode}, - {"fmenu", hoc_fmenu}, {"lw", hoc_Lw}, {"getstr", hoc_Getstr}, {"strcmp", hoc_Strcmp}, @@ -250,9 +224,7 @@ static struct { /* functions that return an object */ } objfun_bltin[] = {{"object_pushed", hoc_object_pushed}, {nullptr, nullptr}}; double hoc_epsilon = 1.e-11; -double hoc_ac_; /*known to the interpreter to evaluate expressions with hoc_oc() */ -double* hoc_varpointer; /* executing hoc_pointer(&var) will put the address of - the variable in this location */ +double hoc_ac_; /*known to the interpreter to evaluate expressions with hoc_oc() */ double hoc_cross_x_, hoc_cross_y_; /* For Graph class in ivoc */ double hoc_default_dll_loaded_; @@ -260,32 +232,30 @@ double hoc_default_dll_loaded_; char* neuron_home; const char* nrn_mech_dll; /* but actually only for NEURON mswin and linux */ int nrn_noauto_dlopen_nrnmech; /* 0 except when binary special. */ -int use_mcell_ran4_; int nrn_xopen_broadcast_; -int _nrnunit_use_legacy_; /* allow dynamic switching between legacy and modern units */ void hoc_init(void) /* install constants and built-ins table */ { int i; Symbol* s; -#if defined(DYNAMIC_UNITS_USE_LEGACY_DEFAULT) - _nrnunit_use_legacy_ = 1; /* legacy as default */ -#else - _nrnunit_use_legacy_ = 0; /* new units as default */ -#endif - { /* but check the environment variable if it exists */ + { const char* envvar = getenv("NRNUNIT_USE_LEGACY"); if (envvar) { + hoc_warning( + "NRNUNIT_USE_LEGACY is deprecated as only modern units are supported with NEURON " + "version >= 9", + "If you want to still use legacy unit you can use a NEURON version < 9"); if (strcmp(envvar, "1") == 0) { - _nrnunit_use_legacy_ = 1; - } else if (strcmp(envvar, "0") == 0) { - _nrnunit_use_legacy_ = 0; + hoc_execerror( + "'NRNUNIT_USE_LEGACY=1' is set but legacy units support is removed with NEURON " + "version >= 9", + nullptr); } } } - use_mcell_ran4_ = 0; + set_use_mcran4(false); nrn_xopen_broadcast_ = 255; extern void hoc_init_space(void); hoc_init_space(); @@ -297,12 +267,6 @@ void hoc_init(void) /* install constants and built-ins table */ s->u.pval = &consts[i].cval; s->subtype = USERDOUBLE; } - for (i = 0; uconsts[i].name; i++) { - s = install(uconsts[i].name, UNDEF, uconsts[i].cval[0], &symlist); - s->type = VAR; - s->u.pval = &uconsts[i].cval[0]; - s->subtype = DYNAMICUNITS; - } for (i = 0; builtins[i].name; i++) { s = install(builtins[i].name, BLTIN, 0.0, &symlist); s->u.ptr = builtins[i].func; @@ -355,23 +319,16 @@ void hoc_unix_mac_pc(void) { #if defined(DARWIN) hoc_pushx(4.); #else -#if MAC - hoc_pushx(2.); -#else #if defined(WIN32) hoc_pushx(3.); #else hoc_pushx(1.); #endif #endif -#endif } void hoc_show_winio(void) { int b; b = (int) chkarg(1, 0., 1.); -#if MAC - hoc_sioux_show(b); -#endif #if defined(WIN32) hoc_winio_show(b); #endif diff --git a/src/oc/hoc_oop.cpp b/src/oc/hoc_oop.cpp index 87d4a64b19..c56f418654 100644 --- a/src/oc/hoc_oop.cpp +++ b/src/oc/hoc_oop.cpp @@ -1,6 +1,8 @@ #include <../../nrnconf.h> #include #include +#include + #include "hocstr.h" #include "parse.hpp" #include "hocparse.h" @@ -9,20 +11,14 @@ #include "hoclist.h" #include "nrn_ansi.h" #include "nrnmpi.h" +#include "nrnpy.h" #include "nrnfilewrap.h" -#include #include "ocfunc.h" #define PDEBUG 0 -#if USE_PYTHON -Symbol* nrnpy_pyobj_sym_; -void (*nrnpy_py2n_component)(Object* o, Symbol* s, int nindex, int isfunc); -void (*nrnpy_hpoasgn)(Object* o, int type); -void* (*nrnpy_opaque_obj2pyobj_p_)(Object*); -#endif - +Symbol* nrnpy_pyobj_sym_{}; #include "section.h" #include "nrniv_mf.h" int section_object_seen; @@ -34,8 +30,9 @@ static int connect_obsec_; static void call_constructor(Object*, Symbol*, int); static void free_objectdata(Objectdata*, cTemplate*); -int hoc_print_first_instance = 1; +std::vector py_exposed_classes{}; +int hoc_print_first_instance = 1; int hoc_max_builtin_class_id = -1; static Symbol* hoc_obj_; @@ -951,6 +948,22 @@ static void range_suffix(Symbol* sym, int nindex, int narg) { hoc_execerror(sym->name, "section property can't have argument"); } hoc_pushs(sym); + } else if (sym->type == RANGEOBJ) { + // must return NMODLObject on stack + assert(sym->subtype == NMODLRANDOM); // the only possibility at the moment + double x{0.5}; + if (narg) { + if (narg > 1) { + hoc_execerr_ext("%s range object can have only one arg length parameter", + sym->name); + } + x = xpop(); + } + Section* sec{nrn_sec_pop()}; + auto const i = node_index(sec, x); + Prop* m = nrn_mechanism_check(sym->u.rng.type, sec, i); + Object* ob = nrn_nmodlrandom_wrap(m, sym); + hoc_push_object(ob); } else { hoc_execerror(sym->name, "suffix not a range variable or section property"); } @@ -1024,7 +1037,7 @@ void hoc_object_component() { /* note obp is now on stack twice */ /* hpoasgn will pop both */ } else { - (*nrnpy_py2n_component)(obp, sym0, nindex, isfunc); + neuron::python::methods.py2n_component(obp, sym0, nindex, isfunc); } return; } @@ -1266,9 +1279,19 @@ void hoc_object_component() { hoc_nopop(); /* get rid of iterator statement context */ break; } + case RANGEOBJ: { + assert(sym->subtype == NMODLRANDOM); + if (sym->subtype == NMODLRANDOM) { // NMODL NEURON block RANDOM var + // RANGE type. The void* is a nrnran123_State*. Wrap in a + // NMODLRandom and push_object + Object* o = nrn_pntproc_nmodlrandom_wrap(obp->u.this_pointer, sym); + hoc_pop_defer(); + hoc_push_object(o); + } + break; + } default: if (cplus) { - double* pd; if (nindex) { if (!ISARRAY(sym) || sym->arayinfo->nsub != nindex) { hoc_execerror(sym->name, ":not right number of subscripts"); @@ -1290,9 +1313,9 @@ void hoc_object_component() { } hoc_pushs(sym); (*obp->ctemplate->steer)(obp->u.this_pointer); - pd = hoc_pxpop(); + auto dh = hoc_pop_handle(); hoc_pop_defer(); - hoc_pushpx(pd); + hoc_push(std::move(dh)); } else { hoc_execerror(sym->name, ": can't push that type onto stack"); } @@ -1364,7 +1387,7 @@ void hoc_ob_pointer(void) { } else { x = .5; } - hoc_pushpx(nrn_rangepointer(sec, sym, x)); + hoc_push(nrn_rangepointer(sec, sym, x)); } else if (d_sym->type == VAR && d_sym->subtype == USERPROPERTY) { hoc_pushpx(cable_prop_eval_pointer(hoc_spop())); } else { @@ -1406,7 +1429,11 @@ void hoc_object_asgn() { } *pd = d; } else { - nrn_rangeconst(sec, sym, &d, op); + nrn_rangeconst(sec, + sym, + neuron::container::data_handle{neuron::container::do_not_search, + &d}, + op); } hoc_pushx(d); return; @@ -1460,7 +1487,7 @@ void hoc_object_asgn() { if (op) { hoc_execerror("Invalid assignment operator for PythonObject", nullptr); } - (*nrnpy_hpoasgn)(o, type1); + neuron::python::methods.hpoasgn(o, type1); } break; #endif default: @@ -1563,13 +1590,13 @@ void hoc_endtemplate(Symbol* t) { } } -void class2oc(const char* name, - void* (*cons)(Object*), - void (*destruct)(void*), - Member_func* m, - int (*checkpoint)(void**), - Member_ret_obj_func* mobjret, - Member_ret_str_func* strret) { +void class2oc_base(const char* name, + void* (*cons)(Object*), + void (*destruct)(void*), + Member_func* m, + int (*checkpoint)(void**), + Member_ret_obj_func* mobjret, + Member_ret_str_func* strret) { extern int hoc_main1_inited_; Symbol *tsym, *s; cTemplate* t; @@ -1578,6 +1605,7 @@ void class2oc(const char* name, if (hoc_lookup(name)) { hoc_execerror(name, "already being used as a name"); } + tsym = hoc_install(name, UNDEF, 0.0, &hoc_symlist); tsym->subtype = CPLUSOBJECT; hoc_begintemplate(tsym); @@ -1589,6 +1617,7 @@ void class2oc(const char* name, t->destructor = destruct; t->steer = 0; t->checkpoint = checkpoint; + if (m) for (i = 0; m[i].name; ++i) { s = hoc_install(m[i].name, FUNCTION, 0.0, &hoc_symlist); @@ -1611,6 +1640,17 @@ void class2oc(const char* name, } +void class2oc(const char* name, + void* (*cons)(Object*), + void (*destruct)(void*), + Member_func* m, + int (*checkpoint)(void**), + Member_ret_obj_func* mobjret, + Member_ret_str_func* strret) { + class2oc_base(name, cons, destruct, m, checkpoint, mobjret, strret); + py_exposed_classes.push_back(name); +} + Symbol* hoc_decl(Symbol* s) { Symbol* ss; if (templatestackp == templatestack) { @@ -2074,7 +2114,7 @@ void check_obj_type(Object* obj, const char* type_name) { if (obj) { Sprintf(buf, "object type is %s instead of", obj->ctemplate->sym->name); } else { - Sprintf(buf, "object type is nil instead of"); + Sprintf(buf, "object type is nullptr instead of"); } hoc_execerror(buf, type_name); } @@ -2090,11 +2130,9 @@ int is_obj_type(Object* obj, const char* type_name) { void* nrn_opaque_obj2pyobj(Object* ho) { -#if USE_PYTHON // The PyObject* reference is not incremented. Use only as last resort - if (nrnpy_opaque_obj2pyobj_p_) { - return (*nrnpy_opaque_obj2pyobj_p_)(ho); + if (neuron::python::methods.opaque_obj2pyobj) { + return neuron::python::methods.opaque_obj2pyobj(ho); } -#endif return nullptr; } diff --git a/src/oc/hocdec.h b/src/oc/hocdec.h index 3ee0f1acaa..944d4e9d72 100644 --- a/src/oc/hocdec.h +++ b/src/oc/hocdec.h @@ -3,7 +3,7 @@ #define hocdec_h #define INCLUDEHOCH 1 - +#include "neuron/container/generic_data_handle.hpp" #include "nrnapi.h" #include "hocassrt.h" /* hoc_execerror instead of abort */ #include "nrnassrt.h" /* assert in case of side effects (eg. scanf) */ @@ -24,7 +24,6 @@ struct Symbol; struct Arrayinfo; struct Proc; struct Symlist; -struct Datum; struct cTemplate; union Objectdata; struct Object; @@ -143,42 +142,9 @@ struct Symbol { /* symbol table entry */ using hoc_List = hoc_Item; -struct Datum { /* interpreter stack type */ - template - [[nodiscard]] T& literal_value() { - static_assert(std::is_trivially_copyable_v && std::is_trivially_destructible_v && - sizeof(T) <= sizeof(this)); - return *reinterpret_cast(&*this); // Eww - } - - template - explicit operator T() = delete; - - template - T get() { - return literal_value(); - } - - template - Datum& operator=(const T& value) { - literal_value() = value; - return *this; - } - - private: - union { - double val; - Symbol* sym; - int i; - double* pval; /* first used with Eion in NEURON */ - Object** pobj; - Object* obj; /* sections keep this to construct a name */ - char** pstr; - hoc_Item* itm; - hoc_List* lst; - void* _pvoid; /* not used on stack, see nrnoc/point.cpp */ - }; -}; +/** @brief Type of pdata in mechanisms. + */ +using Datum = neuron::container::generic_data_handle; struct cTemplate { Symbol* sym; @@ -194,7 +160,7 @@ struct cTemplate { void* observers; /* hook to c++ ClassObservable */ void* (*constructor)(struct Object*); void (*destructor)(void*); - void (*steer)(void*); /* normally nil */ + void (*steer)(void*); /* normally nullptr */ int (*checkpoint)(void**); }; @@ -258,10 +224,6 @@ struct HocParmUnits { /* units for symbol values */ #include "oc_ansi.h" -void* emalloc(size_t n); -void* ecalloc(size_t n, size_t size); -void* erealloc(void* ptr, size_t n); - extern Inst *hoc_progp, *hoc_progbase, *hoc_prog, *hoc_prog_parse_recover; extern Inst* hoc_pc; diff --git a/src/oc/macprt.cpp b/src/oc/macprt.cpp index cfe987cd04..683bb36a6b 100644 --- a/src/oc/macprt.cpp +++ b/src/oc/macprt.cpp @@ -8,10 +8,6 @@ #include "gui-redirect.h" - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - extern void debugfile(const char*, ...); extern int oc_print_from_dll(char*); extern void single_event_run(); @@ -182,7 +178,6 @@ hoc_axis() { plt(int mode, double x, double y) {} -hoc_menu_cleanup() {} initplot() {} diff --git a/src/oc/math.cpp b/src/oc/math.cpp index 3f9739327f..0469c42b3b 100644 --- a/src/oc/math.cpp +++ b/src/oc/math.cpp @@ -19,10 +19,13 @@ int hoc_errno_count; #ifdef MINGW static const auto errno_enabled = true; static const auto check_fe_except = false; -#elif defined(NVHPC_CHECK_FE_EXCEPTIONS) +#elif defined(NRN_CHECK_FE_EXCEPTIONS) static constexpr auto errno_enabled = false; static constexpr auto check_fe_except = true; +#ifdef math_errhandling +// LLVM-based Intel compiles don't define math_errhandling when -fp-model=fast static_assert(math_errhandling & MATH_ERREXCEPT); +#endif #else static const auto errno_enabled = math_errhandling & MATH_ERRNO; static const auto check_fe_except = !errno_enabled && math_errhandling & MATH_ERREXCEPT; diff --git a/src/oc/mech_api.h b/src/oc/mech_api.h index e0b99c905e..8d3ed11ecd 100644 --- a/src/oc/mech_api.h +++ b/src/oc/mech_api.h @@ -17,4 +17,6 @@ #include "oc_ansi.h" #include "nrnversionmacros.h" #include "scoplib.h" -#include "treeset.h" + +#include // nocmodl uses std::isnan +#include // nocmodl uses std::cerr diff --git a/src/oc/memory.cpp b/src/oc/memory.cpp new file mode 100644 index 0000000000..44a6b93e51 --- /dev/null +++ b/src/oc/memory.cpp @@ -0,0 +1,112 @@ +#include "memory.hpp" + +#include +#include + +// For hoc_warning and hoc_execerror +#include "oc_ansi.h" + +#if HAVE_POSIX_MEMALIGN +#define HAVE_MEMALIGN 1 +#endif +#if defined(DARWIN) /* posix_memalign seems not to work on Darwin 10.6.2 */ +#undef HAVE_MEMALIGN +#endif +#if HAVE_MEMALIGN +#undef _XOPEN_SOURCE /* avoid warnings about redefining this */ +#define _XOPEN_SOURCE 600 +#endif + +static bool emalloc_error = false; + +void* hoc_Emalloc(std::size_t n) { /* check return from malloc */ + void* p = std::malloc(n); + if (p == nullptr) { + emalloc_error = true; + } + return p; +} + +void* hoc_Ecalloc(std::size_t n, std::size_t size) { /* check return from calloc */ + if (n == 0) { + return nullptr; + } + void* p = std::calloc(n, size); + if (p == nullptr) { + emalloc_error = true; + } + return p; +} + +void* hoc_Erealloc(void* ptr, std::size_t size) { /* check return from realloc */ + if (!ptr) { + return hoc_Emalloc(size); + } + void* p = std::realloc(ptr, size); + if (p == nullptr) { + std::free(ptr); + emalloc_error = true; + } + return p; +} + +void hoc_malchk(void) { + if (emalloc_error) { + emalloc_error = false; + hoc_execerror("out of memory", nullptr); + } +} + +void* emalloc(std::size_t n) { + void* p = hoc_Emalloc(n); + if (emalloc_error) { + hoc_malchk(); + } + return p; +} + +void* ecalloc(std::size_t n, std::size_t size) { + void* p = hoc_Ecalloc(n, size); + if (emalloc_error) { + hoc_malchk(); + } + return p; +} + +void* erealloc(void* ptr, std::size_t size) { + void* p = hoc_Erealloc(ptr, size); + if (emalloc_error) { + hoc_malchk(); + } + return p; +} + +void* nrn_cacheline_alloc(void** memptr, std::size_t size) { +#if HAVE_MEMALIGN + static bool memalign_is_working = true; + if (memalign_is_working) { + if (posix_memalign(memptr, 64, size) != 0) { + hoc_warning("posix_memalign not working, falling back to using malloc\n", nullptr); + memalign_is_working = false; + *memptr = hoc_Emalloc(size); + hoc_malchk(); + } + } else +#endif + { + *memptr = hoc_Emalloc(size); + hoc_malchk(); + } + return *memptr; +} + +void* nrn_cacheline_calloc(void** memptr, std::size_t nmemb, std::size_t size) { +#if HAVE_MEMALIGN + nrn_cacheline_alloc(memptr, nmemb * size); + std::memset(*memptr, 0, nmemb * size); +#else + *memptr = hoc_Ecalloc(nmemb, size); + hoc_malchk(); +#endif + return *memptr; +} diff --git a/src/oc/memory.hpp b/src/oc/memory.hpp new file mode 100644 index 0000000000..38960e0eba --- /dev/null +++ b/src/oc/memory.hpp @@ -0,0 +1,18 @@ +#pragma once + +// Some functions here are prepend with 'hoc_' but they are unrelated to hoc itself. +#include + +/* check return from malloc */ +void* hoc_Emalloc(std::size_t n); +void* hoc_Ecalloc(std::size_t n, std::size_t size); +void* hoc_Erealloc(void* ptr, std::size_t size); + +void hoc_malchk(void); + +void* emalloc(std::size_t n); +void* ecalloc(std::size_t n, std::size_t size); +void* erealloc(void* ptr, std::size_t size); + +void* nrn_cacheline_alloc(void** memptr, std::size_t size); +void* nrn_cacheline_calloc(void** memptr, std::size_t nmemb, std::size_t size); diff --git a/src/oc/mswinprt.cpp b/src/oc/mswinprt.cpp index 4449fdd73b..905aa101b2 100644 --- a/src/oc/mswinprt.cpp +++ b/src/oc/mswinprt.cpp @@ -2,7 +2,9 @@ #ifdef MINGW +#ifdef HAVE_UNISTD_H #include +#endif #include #include #include @@ -14,10 +16,6 @@ #include "gui-redirect.h" - -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); - extern char* neuron_home; extern char* neuron_home_dos; extern void hoc_quit(); @@ -150,11 +148,9 @@ void hoc_win_exec(void) { void hoc_winio_show(int b) {} -#if !defined(__MWERKS__) int getpid() { return 1; } -#endif void hoc_Plt() { TRY_GUI_REDIRECT_DOUBLE("plt", NULL); diff --git a/src/oc/nrnassrt.h b/src/oc/nrnassrt.h index d7b5a9338d..abd026ae66 100644 --- a/src/oc/nrnassrt.h +++ b/src/oc/nrnassrt.h @@ -12,44 +12,21 @@ has side effects which need to be executed regardles of NDEBUG. #if defined(hocassrt_h) /* hoc_execerror form */ #include "oc_ansi.h" -#if defined(__STDC__) #define nrn_assert(ex) \ - { \ + do { \ if (!(ex)) { \ fprintf(stderr, "Assertion failed: file %s, line %d\n", __FILE__, __LINE__); \ - hoc_execerror(#ex, (char*) 0); \ + hoc_execerror(#ex, nullptr); \ } \ - } -#else -#define nrn_assert(ex) \ - { \ - if (!(ex)) { \ - fprintf(stderr, "Assertion failed: file %s, line %d\n", __FILE__, __LINE__); \ - hoc_execerror("ex", (char*) 0); \ - } \ - } -#endif - + } while (0) #else /* abort form */ - -#if defined(__STDC__) #define nrn_assert(ex) \ - { \ + do { \ if (!(ex)) { \ fprintf(stderr, "Assertion failed: file %s, line %d\n", __FILE__, __LINE__); \ abort(); \ } \ - } -#else -#define nrn_assert(ex) \ - { \ - if (!(ex)) { \ - fprintf(stderr, "Assertion failed: file %s, line %d\n", __FILE__, __LINE__); \ - abort(); \ - } \ - } -#endif - + } while (0) #endif diff --git a/src/oc/nrnisaac.cpp b/src/oc/nrnisaac.cpp deleted file mode 100644 index 1aa0050339..0000000000 --- a/src/oc/nrnisaac.cpp +++ /dev/null @@ -1,39 +0,0 @@ -#include <../../nrnconf.h> -#include -#if HAVE_SYS_TYPES_H -#include -#endif -#include -#include -#include "hocdec.h" - -typedef struct isaac64_state Rng; - -void* nrnisaac_new(void) { - Rng* rng; - rng = (Rng*) hoc_Emalloc(sizeof(Rng)); - hoc_malchk(); - return (void*) rng; -} - -void nrnisaac_delete(void* v) { - free(v); -} - -void nrnisaac_init(void* v, unsigned long int seed) { - isaac64_init((Rng*) v, seed); -} - -double nrnisaac_dbl_pick(void* v) { - Rng* rng = (Rng*) v; - double x = isaac64_dbl32(rng); - /*printf("dbl %d %d %d %d %g\n", sizeof(ub8), sizeof(ub4), sizeof(ub2), sizeof(ub1), x);*/ - return x; -} - -uint32_t nrnisaac_uint32_pick(void* v) { - Rng* rng = (Rng*) v; - double x = isaac64_uint32(rng); - /*printf("uint32 %g\n", x);*/ - return x; -} diff --git a/src/oc/nrnisaac.h b/src/oc/nrnisaac.h deleted file mode 100644 index caaa7f07fe..0000000000 --- a/src/oc/nrnisaac.h +++ /dev/null @@ -1,17 +0,0 @@ -#ifndef nrnisaac_h -#define nrnisaac_h - -#include <../../nrnconf.h> -#if defined(HAVE_STDINT_H) -#include -#endif - - -void* nrnisaac_new(void); -void nrnisaac_delete(void* rng); -void nrnisaac_init(void* rng, unsigned long int seed); -double nrnisaac_dbl_pick(void* rng); -uint32_t nrnisaac_uint32_pick(void* rng); - - -#endif diff --git a/src/oc/nrnmpi.h b/src/oc/nrnmpi.h index 69e4ee75a4..259e4fa094 100644 --- a/src/oc/nrnmpi.h +++ b/src/oc/nrnmpi.h @@ -7,20 +7,17 @@ not easily coexist. ParallelContext.subworlds(nsmall) divides the world into nrnmpi_numprocs_world/small subworlds of size nsmall. */ -extern int nrnmpi_numprocs_world; /* size of entire world. total size of all subworlds */ -extern int nrnmpi_myid_world; /* rank in entire world */ -extern int nrnmpi_numprocs; /* size of subworld */ -extern int nrnmpi_myid; /* rank in subworld */ -extern int nrnmpi_numprocs_bbs; /* number of subworlds */ -extern int nrnmpi_myid_bbs; /* rank in nrn_bbs_comm of rank 0 of a subworld */ -extern int nrnmpi_subworld_change_cnt; /* increment from within void nrnmpi_subworld_size(int n) */ -extern int nrnmpi_subworld_id; /* subworld index on all ranks */ -extern int nrnmpi_numprocs_subworld; /* number of ranks in subworld on all ranks */ - -typedef struct { +extern int nrnmpi_numprocs_world; /* size of entire world. total size of all subworlds */ +extern int nrnmpi_myid_world; /* rank in entire world */ +extern int nrnmpi_numprocs; /* size of subworld */ +extern int nrnmpi_myid; /* rank in subworld */ +extern int nrnmpi_numprocs_bbs; /* number of subworlds */ +extern int nrnmpi_myid_bbs; /* rank in nrn_bbs_comm of rank 0 of a subworld */ + +struct NRNMPI_Spike { int gid; double spiketime; -} NRNMPI_Spike; +}; #if NRNMPI diff --git a/src/oc/nrnmpiuse.h.in b/src/oc/nrnmpiuse.h.in index 8e0877c1c2..4db1c75155 100755 --- a/src/oc/nrnmpiuse.h.in +++ b/src/oc/nrnmpiuse.h.in @@ -4,9 +4,6 @@ /* define to 1 if you want MPI specific features activated */ #undef NRNMPI -/* define to 1 if you want parallel distributed cells (and gap junctions) */ -#undef PARANEURON - /* define to 1 if you want mpi dynamically loaded instead of linked normally */ #undef NRNMPI_DYNAMICLOAD @@ -19,7 +16,4 @@ /* define if needed */ #undef ALWAYS_CALL_MPI_INIT -/* Number of times to retry a failed open */ -#undef FILE_OPEN_RETRY - #endif diff --git a/src/oc/nrnran123.cpp b/src/oc/nrnran123.cpp deleted file mode 100644 index d9db49351c..0000000000 --- a/src/oc/nrnran123.cpp +++ /dev/null @@ -1,134 +0,0 @@ -#include <../../nrnconf.h> -#include -#include -#include -#include -#include -#include - -static const double SHIFT32 = 1.0 / 4294967297.0; /* 1/(2^32 + 1) */ - -static philox4x32_key_t k = {{0}}; - -struct nrnran123_State { - philox4x32_ctr_t c; - philox4x32_ctr_t r; - char which_; -}; - -void nrnran123_set_globalindex(uint32_t gix) { - k.v[0] = gix; -} - -/* if one sets the global, one should reset all the stream sequences. */ -uint32_t nrnran123_get_globalindex() { - return k.v[0]; -} - -nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2) { - return nrnran123_newstream3(id1, id2, 0); -} -nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3) { - nrnran123_State* s; - s = (nrnran123_State*) ecalloc(sizeof(nrnran123_State), 1); - s->c.v[1] = id3; - s->c.v[2] = id1; - s->c.v[3] = id2; - nrnran123_setseq(s, 0, 0); - return s; -} - -void nrnran123_deletestream(nrnran123_State* s) { - free(s); -} - -void nrnran123_getseq(nrnran123_State* s, uint32_t* seq, char* which) { - *seq = s->c.v[0]; - *which = s->which_; -} - -void nrnran123_setseq(nrnran123_State* s, uint32_t seq, char which) { - if (which > 3 || which < 0) { - s->which_ = 0; - } else { - s->which_ = which; - } - s->c.v[0] = seq; - s->r = philox4x32(s->c, k); -} - -void nrnran123_getids(nrnran123_State* s, uint32_t* id1, uint32_t* id2) { - *id1 = s->c.v[2]; - *id2 = s->c.v[3]; -} - -void nrnran123_getids3(nrnran123_State* s, uint32_t* id1, uint32_t* id2, uint32_t* id3) { - *id3 = s->c.v[1]; - *id1 = s->c.v[2]; - *id2 = s->c.v[3]; -} - -uint32_t nrnran123_ipick(nrnran123_State* s) { - uint32_t rval; - char which = s->which_; - assert(which < 4); - rval = s->r.v[which++]; - if (which > 3) { - which = 0; - s->c.v[0]++; - s->r = philox4x32(s->c, k); - } - s->which_ = which; - return rval; -} - -double nrnran123_dblpick(nrnran123_State* s) { - return nrnran123_uint2dbl(nrnran123_ipick(s)); -} - -double nrnran123_negexp(nrnran123_State* s) { - /* min 2.3283064e-10 to max 22.18071 */ - return -log(nrnran123_dblpick(s)); -} - -/* At cost of a cached value we could compute two at a time. */ -/* But that would make it difficult to transfer to coreneuron for t > 0 */ -double nrnran123_normal(nrnran123_State* s) { - double w, x, y; - double u1, u2; - do { - u1 = nrnran123_dblpick(s); - u2 = nrnran123_dblpick(s); - u1 = 2. * u1 - 1.; - u2 = 2. * u2 - 1.; - w = (u1 * u1) + (u2 * u2); - } while (w > 1); - - y = sqrt((-2. * log(w)) / w); - x = u1 * y; - return x; -} - -nrnran123_array4x32 nrnran123_iran(uint32_t seq, uint32_t id1, uint32_t id2) { - return nrnran123_iran3(seq, id1, id2, 0); -} -nrnran123_array4x32 nrnran123_iran3(uint32_t seq, uint32_t id1, uint32_t id2, uint32_t id3) { - nrnran123_array4x32 a; - philox4x32_ctr_t c; - c.v[0] = seq; - c.v[1] = id3; - c.v[2] = id1; - c.v[3] = id2; - philox4x32_ctr_t r = philox4x32(c, k); - a.v[0] = r.v[0]; - a.v[1] = r.v[1]; - a.v[2] = r.v[2]; - a.v[3] = r.v[3]; - return a; -} - -double nrnran123_uint2dbl(uint32_t u) { - /* 0 to 2^32-1 transforms to double value in open (0,1) interval */ - /* min 2.3283064e-10 to max (1 - 2.3283064e-10) */ - return ((double) u + 1.0) * SHIFT32; -} diff --git a/src/oc/nrnran123.h b/src/oc/nrnran123.h deleted file mode 100644 index 0a0acb6a71..0000000000 --- a/src/oc/nrnran123.h +++ /dev/null @@ -1,62 +0,0 @@ -#ifndef nrnran123_h -#define nrnran123_h - -/* interface to Random123 */ -/* http://www.thesalmons.org/john/random123/papers/random123sc11.pdf */ - -/* -The 4x32 generators utilize a uint32x4 counter and uint32x4 key to transform -into an almost cryptographic quality uint32x4 random result. -There are many possibilites for balancing the sharing of the internal -state instances while reserving a uint32 counter for the stream sequence -and reserving other portions of the counter vector for stream identifiers -and global index used by all streams. - -We currently provide a single instance by default in which the policy is -to use the 0th counter uint32 as the stream sequence, words 2, 3 and 4 as the -stream identifier, and word 0 of the key as the global index. Unused words -are constant uint32 0. - -It is also possible to use Random123 directly without reference to this -interface. See Random123-1.02/docs/html/index.html -of the full distribution available from -http://www.deshawresearch.com/resources_random123.html -*/ - -#include - - -typedef struct nrnran123_State nrnran123_State; - -typedef struct nrnran123_array4x32 { - uint32_t v[4]; -} nrnran123_array4x32; - -/* global index. eg. run number */ -/* all generator instances share this global index */ -extern void nrnran123_set_globalindex(uint32_t gix); -extern uint32_t nrnran123_get_globalindex(); - -/* minimal data stream */ -extern nrnran123_State* nrnran123_newstream(uint32_t id1, uint32_t id2); -extern nrnran123_State* nrnran123_newstream3(uint32_t id1, uint32_t id2, uint32_t id3); -extern void nrnran123_deletestream(nrnran123_State*); -extern void nrnran123_getseq(nrnran123_State*, uint32_t* seq, char* which); -extern void nrnran123_setseq(nrnran123_State*, uint32_t seq, char which); -extern void nrnran123_getids(nrnran123_State*, uint32_t* id1, uint32_t* id2); -extern void nrnran123_getids3(nrnran123_State*, uint32_t* id1, uint32_t* id2, uint32_t* id3); - -extern double nrnran123_negexp(nrnran123_State*); /* mean 1.0 */ -extern uint32_t nrnran123_ipick(nrnran123_State*); /* uniform 0 to 2^32-1 */ -extern double nrnran123_dblpick(nrnran123_State*); /* uniform open interval (0,1)*/ -/* nrnran123_dblpick minimum value is 2.3283064e-10 and max value is 1-min */ - -/* nrnran123_negexp min value is 2.3283064e-10, max is 22.18071 */ -extern double nrnran123_normal(nrnran123_State*); /* mean 0.0, std 1.0 */ - -/* more fundamental (stateless) (though the global index is still used) */ -extern nrnran123_array4x32 nrnran123_iran(uint32_t seq, uint32_t id1, uint32_t id2); -extern nrnran123_array4x32 nrnran123_iran3(uint32_t seq, uint32_t id1, uint32_t id2, uint32_t id3); -extern double nrnran123_uint2dbl(uint32_t); - -#endif diff --git a/src/oc/nrnunits.h b/src/oc/nrnunits.h new file mode 100644 index 0000000000..e8f777a686 --- /dev/null +++ b/src/oc/nrnunits.h @@ -0,0 +1,30 @@ +#ifndef nrnunits_modern_h +#define nrnunits_modern_h + +/** + NMODL translated MOD files get unit constants typically from + share/lib/nrnunits.lib. But there were other source files that + hardcode some of the constants. Here we gather a few modern units into + a single place (but, unfortunately, also in nrnunits.lib). + + These come from https://physics.nist.gov/cuu/Constants/index.html. + Termed the "2018 CODATA recommended values", they became available + on 20 May 2019 and replace the 2014 CODATA set. + + See oc/hoc_init.c, nrnoc/eion.c, nrniv/kschan.h +**/ + + +constexpr double _electron_charge_codata2018 = 1.602176634e-19; /* coulomb exact*/ +constexpr double _avogadro_number_codata2018 = 6.02214076e+23; /* exact */ +constexpr double _boltzmann_codata2018 = 1.380649e-23; /* joule/K exact */ +constexpr double _faraday_codata2018 = _electron_charge_codata2018 * + _avogadro_number_codata2018; /* 96485.33212... coulomb/mol */ +constexpr double _gasconstant_codata2018 = + _boltzmann_codata2018 * _avogadro_number_codata2018; /* 8.314462618... joule/mol-K */ + +/* e/k in K/millivolt */ +constexpr double _e_over_k_codata2018 = .001 * _electron_charge_codata2018 / + _boltzmann_codata2018; /* 11.604518... K/mV */ + +#endif /* nrnunits_modern_h */ diff --git a/src/oc/nrnunits_modern.h b/src/oc/nrnunits_modern.h deleted file mode 100644 index 50c26b3ae9..0000000000 --- a/src/oc/nrnunits_modern.h +++ /dev/null @@ -1,31 +0,0 @@ -#ifndef nrnunits_modern_h -#define nrnunits_modern_h - -/** - NMODL translated MOD files get unit constants typically from - share/lib/nrnunits.lib.in. But there were other source files that - hardcode some of the constants. Here we gather a few modern units into - a single place (but, unfortunately, also in nrnunits.lib.in). Legacy units - cannot be gathered here because they can differ slightly from place to place. - - These come from https://physics.nist.gov/cuu/Constants/index.html. - Termed the "2018 CODATA recommended values", they became available - on 20 May 2019 and replace the 2014 CODATA set. - - See oc/hoc_init.c, nrnoc/eion.c, nrniv/kschan.h -**/ - - -#define _electron_charge_codata2018 1.602176634e-19 /* coulomb exact*/ -#define _avogadro_number_codata2018 6.02214076e+23 /* exact */ -#define _boltzmann_codata2018 1.380649e-23 /* joule/K exact */ -#define _faraday_codata2018 \ - (_electron_charge_codata2018 * _avogadro_number_codata2018) /* 96485.33212... coulomb/mol */ -#define _gasconstant_codata2018 \ - (_boltzmann_codata2018 * _avogadro_number_codata2018) /* 8.314462618... joule/mol-K */ - -/* e/k in K/millivolt */ -#define _e_over_k_codata2018 \ - (.001 * _electron_charge_codata2018 / _boltzmann_codata2018) /* 11.604518... K/mV */ - -#endif /* nrnunits_modern_h */ diff --git a/src/oc/oc_ansi.h b/src/oc/oc_ansi.h index 6876d40157..1499f77512 100644 --- a/src/oc/oc_ansi.h +++ b/src/oc/oc_ansi.h @@ -1,10 +1,15 @@ #pragma once +#include "neuron/container/data_handle.hpp" +#include "neuron/container/generic_data_handle.hpp" + #include #include #include #include #include #include + +#include "memory.hpp" /** * \dir * \brief HOC Interpreter @@ -25,7 +30,6 @@ struct Arrayinfo; struct cTemplate; -struct Datum; struct DoubScal; struct DoubVec; struct HocSymExtension; @@ -35,6 +39,11 @@ union Objectdata; struct Symbol; struct Symlist; struct VoidFunc; +struct Prop; + +namespace neuron { +struct model_sorted_token; +} // nocpout.cpp Symbol* hoc_get_symbol(const char* var); @@ -43,9 +52,6 @@ void ivoc_help(const char*); Symbol* hoc_lookup(const char*); -void* hoc_Ecalloc(std::size_t nmemb, std::size_t size); -void* hoc_Emalloc(size_t size); -void hoc_malchk(); [[noreturn]] void hoc_execerror(const char*, const char*); [[noreturn]] void hoc_execerr_ext(const char* fmt, ...); char* hoc_object_name(Object*); @@ -86,7 +92,6 @@ decltype(auto) invoke_method_that_may_throw(Callable message_prefix, Args&&... a } // namespace neuron::oc double* hoc_getarg(int); -double* hoc_pgetarg(int); int ifarg(int); int vector_instance_px(void*, double**); @@ -131,6 +136,7 @@ extern int nrnignore; */ int hoc_obj_run(const char*, Object*); +void hoc_prstack(); int hoc_argtype(int); int hoc_is_double_arg(int); int hoc_is_pdouble_arg(int); @@ -161,6 +167,11 @@ void hoc_plt(int, double, double); void hoc_plprint(const char*); void hoc_ret(); /* but need to push before returning */ +void hoc_push(neuron::container::generic_data_handle handle); +template +void hoc_push(neuron::container::data_handle const& handle) { + hoc_push(neuron::container::generic_data_handle{handle}); +} void hoc_pushx(double); void hoc_pushstr(char**); void hoc_pushobj(Object**); @@ -170,7 +181,77 @@ void hoc_pushs(Symbol*); void hoc_pushi(int); void hoc_push_ndim(int); int hoc_pop_ndim(); +int hoc_stack_type(); bool hoc_stack_type_is_ndim(); + +namespace neuron::oc::detail { +template +struct hoc_get_arg_helper; +template +struct hoc_pop_helper; +} // namespace neuron::oc::detail + +/** @brief Pop an object of type T from the HOC stack. + * + * The helper type neuron::oc::detail::hoc_pop must be specialised for all + * supported (families of) T. + */ +template +T hoc_pop() { + return neuron::oc::detail::hoc_pop_helper::impl(); +} + +/** @brief Get the nth (1..N) argument on the stack. + * + * This is a templated version of hoc_getarg, hoc_pgetarg and friends. + * + * @todo Should the stack be modified such that this can return const + * references, even for things like data_handle that at the moment are not + * exactly stored (we store generic_data_handle, which can produce a + * data_handle on demand but which does not, at present, actually *contain* + * a data_handle)? + */ +template +[[nodiscard]] T hoc_get_arg(std::size_t narg) { + return neuron::oc::detail::hoc_get_arg_helper::impl(narg); +} + +namespace neuron::oc::detail { +template <> +struct hoc_get_arg_helper { + static neuron::container::generic_data_handle impl(std::size_t); +}; +template +struct hoc_get_arg_helper> { + static neuron::container::data_handle impl(std::size_t narg) { + return neuron::container::data_handle{ + hoc_get_arg(narg)}; + } +}; +template <> +struct hoc_pop_helper { + /** @brief Pop a generic data handle from the HOC stack. + * + * This function is not used from translated MOD files, so we can assume + * that the generic_data_handle is not in permissive mode. + */ + static neuron::container::generic_data_handle impl(); +}; + +template +struct hoc_pop_helper> { + /** @brief Pop a data_handle from the HOC stack. + */ + static neuron::container::data_handle impl() { + return neuron::container::data_handle{hoc_pop()}; + } +}; +} // namespace neuron::oc::detail + +[[nodiscard]] inline double* hoc_pgetarg(int narg) { + return static_cast(hoc_get_arg>(narg)); +} + double hoc_xpop(); Symbol* hoc_spop(); double* hoc_pxpop(); @@ -184,9 +265,28 @@ char** hoc_strpop(); int hoc_ipop(); void hoc_nopop(); +/** @brief Shorthand for hoc_pop>(). + */ +template +neuron::container::data_handle hoc_pop_handle() { + return hoc_pop>(); +} + +/** + * @brief Shorthand for hoc_get_arg>(narg). + * + * Migrating code to be data_handle-aware typically involves replacing hoc_pgetarg with + * hoc_hgetarg. + */ +template +[[nodiscard]] neuron::container::data_handle hoc_hgetarg(int narg) { + return hoc_get_arg>(narg); +} + [[noreturn]] void hoc_execerror_mes(const char*, const char*, int); void hoc_warning(const char*, const char*); double* hoc_val_pointer(const char*); +neuron::container::data_handle hoc_val_handle(std::string_view); Symbol* hoc_table_lookup(const char*, Symlist*); Symbol* hoc_install(const char*, int, double, Symlist**); extern Objectdata* hoc_objectdata; @@ -211,7 +311,10 @@ void hoc_obj_unref(Object*); /* NULL allowed */ void hoc_dec_refcount(Object**); Object** hoc_temp_objvar(Symbol* template_symbol, void* cpp_object); Object** hoc_temp_objptr(Object*); +Object* hoc_new_object(Symbol* symtemp, void* v); void hoc_new_object_asgn(Object** obp, Symbol* template_symbol, void* cpp_object); +Object* nrn_pntproc_nmodlrandom_wrap(void* pnt, Symbol* sym); +Object* nrn_nmodlrandom_wrap(Prop* prop, Symbol* sym); HocSymExtension* hoc_var_extra(const char*); double check_domain_limits(float*, double); Object* hoc_obj_get(int i); @@ -219,10 +322,6 @@ void hoc_obj_set(int i, Object*); void nrn_hoc_lock(); void nrn_hoc_unlock(); -void* hoc_Erealloc(void* ptr, std::size_t size); - -void* nrn_cacheline_alloc(void** memptr, std::size_t size); -void* nrn_cacheline_calloc(void** memptr, std::size_t nmemb, std::size_t size); [[noreturn]] void nrn_exit(int); void hoc_free_list(Symlist**); int hoc_errno_check(); @@ -262,8 +361,6 @@ int is_vector_arg(int); char* vector_get_label(IvocVect*); void vector_set_label(IvocVect*, char*); -void hoc_regexp_compile(const char*); -int hoc_regexp_search(const char*); Symbol* hoc_install_var(const char*, double*); void hoc_class_registration(); void hoc_spinit(); @@ -272,7 +369,6 @@ int hoc_arayinfo_install(Symbol*, int); void hoc_free_arrayinfo(Arrayinfo*); void hoc_free_val_array(double*, std::size_t); std::size_t hoc_total_array(Symbol*); -void hoc_menu_cleanup(); void frame_debug(); void hoc_oop_initaftererror(); void hoc_init(); @@ -331,7 +427,6 @@ int nrn_is_cable(); void* nrn_opaque_obj2pyobj(Object*); // PyObject reference not incremented Symbol* hoc_get_symbol(const char* var); -extern int _nrnunit_use_legacy_; /* 1:legacy, 0:modern (default) */ void bbs_done(void); int hoc_main1(int, const char**, const char**); char* cxx_char_alloc(std::size_t size); diff --git a/src/oc/oc_mcran4.cpp b/src/oc/oc_mcran4.cpp new file mode 100644 index 0000000000..128ccd7d04 --- /dev/null +++ b/src/oc/oc_mcran4.cpp @@ -0,0 +1,41 @@ +#include "hocdec.h" +#include "mcran4.h" + +int use_mcell_ran4_; + +void set_use_mcran4(bool value) { + use_mcell_ran4_ = value ? 1 : 0; +} + +bool use_mcran4() { + return use_mcell_ran4_ != 0; +} + +void hoc_mcran4() { + uint32_t idx; + double* xidx; + double x; + xidx = hoc_pgetarg(1); + idx = (uint32_t) (*xidx); + x = mcell_ran4a(&idx); + *xidx = idx; + hoc_ret(); + hoc_pushx(x); +} +void hoc_mcran4init() { + double prev = mcell_lowindex(); + if (ifarg(1)) { + uint32_t idx = (uint32_t) chkarg(1, 0., 4294967295.); + mcell_ran4_init(idx); + } + hoc_ret(); + hoc_pushx(prev); +} +void hoc_usemcran4() { + double prev = (double) use_mcell_ran4_; + if (ifarg(1)) { + use_mcell_ran4_ = (int) chkarg(1, 0., 1.); + } + hoc_ret(); + hoc_pushx(prev); +} diff --git a/src/oc/oc_mcran4.hpp b/src/oc/oc_mcran4.hpp new file mode 100644 index 0000000000..d275e3e9bf --- /dev/null +++ b/src/oc/oc_mcran4.hpp @@ -0,0 +1,5 @@ +void set_use_mcran4(bool value); +bool use_mcran4(); +void hoc_mcran4(); +void hoc_mcran4init(); +void hoc_usemcran4(); diff --git a/src/oc/ocerf.cpp b/src/oc/ocerf.cpp index 19e3f6c9e0..a8aa148b38 100644 --- a/src/oc/ocerf.cpp +++ b/src/oc/ocerf.cpp @@ -1,7 +1,7 @@ #include <../../nrnconf.h> #include #ifndef MINGW -#if DOS || defined(WIN32) /*|| defined(MAC)*/ +#if DOS || defined(WIN32) /****************************************************************************** * * File: erf.cpp diff --git a/src/oc/ocfunc.h b/src/oc/ocfunc.h index 9ec9590323..61a620aa44 100644 --- a/src/oc/ocfunc.h +++ b/src/oc/ocfunc.h @@ -11,7 +11,7 @@ extern void hoc_System(void), hoc_Prmat(void), hoc_solve(void), hoc_eqinit(void) extern void hoc_symbols(void), hoc_PRintf(void), hoc_Xred(void), hoc_Sred(void); extern void hoc_ropen(void), hoc_wopen(void), hoc_xopen(void), hoc_Fscan(void), hoc_Fprint(void); extern void hoc_Graph(void), hoc_Graphmode(void), hoc_Plot(void), hoc_axis(void), hoc_Sprint(void); -extern void hoc_fmenu(void), hoc_Getstr(void), hoc_Strcmp(void); +extern void hoc_Getstr(void), hoc_Strcmp(void); extern void hoc_Lw(void), hoc_machine_name(void), hoc_Saveaudit(void), hoc_Retrieveaudit(void); extern void hoc_plotx(void), hoc_ploty(void), hoc_regraph(void); extern void hoc_startsw(void), hoc_stopsw(void), hoc_object_id(void); @@ -32,7 +32,6 @@ extern void hoc_neuronhome(void), hoc_Execerror(void); extern void hoc_sscanf(void), hoc_save_session(void), hoc_print_session(void); extern void hoc_Chdir(void), hoc_getcwd(void), hoc_Symbol_units(void), hoc_stdout(void); extern void hoc_name_declared(void), hoc_unix_mac_pc(void), hoc_show_winio(void); -extern void hoc_usemcran4(void), hoc_mcran4(void), hoc_mcran4init(void); extern void hoc_nrn_load_dll(void), hoc_nrnversion(void), hoc_object_pushed(void); extern void hoc_mallinfo(void); extern void hoc_Setcolor(void); diff --git a/src/oc/ocmain.cpp b/src/oc/ocmain.cpp index c1a3f62f9a..7be682d581 100644 --- a/src/oc/ocmain.cpp +++ b/src/oc/ocmain.cpp @@ -7,20 +7,13 @@ extern const char* neuron_home; -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj) = NULL; -extern double (*nrnpy_object_to_double_)(Object*) = NULL; - -#if MAC -char hoc_console_buffer[256]; -#endif - #if defined(WIN32) void* cvode_pmem; extern void setneuronhome(const char*); #endif static void setnrnhome(const char* arg) { -#if !defined(WIN32) && !defined(MAC) +#ifndef WIN32 /* Gary Holt's first pass at this was: @@ -48,15 +41,8 @@ static void setnrnhome(const char* arg) { int main(int argc, const char** argv, const char** envp) { int err; -#if MAC - int our_argc = 1; - char* our_argv[1]; - our_argv[0] = "Neuron"; - err = hoc_main1(our_argc, our_argv, envp); -#else setnrnhome(argv[0]); err = hoc_main1(argc, argv, envp); -#endif if (!err) { hoc_final_exit(); } diff --git a/src/oc/parse.ypp b/src/oc/parse.ypp index 8b38a4629b..70cccbbce0 100755 --- a/src/oc/parse.ypp +++ b/src/oc/parse.ypp @@ -112,9 +112,9 @@ static void hoc_opasgn_invalid(int op); /* NEWCABLE */ %token SECTIONKEYWORD SECTION CONNECTKEYWORD ACCESSKEYWORD %token RANGEVAR MECHANISM INSERTKEYWORD FORALL NRNPNTVAR FORSEC IFSEC -%token UNINSERTKEYWORD SETPOINTERKEYWORD SECTIONREF +%token UNINSERTKEYWORD SETPOINTERKEYWORD SECTIONREF RANGEOBJ %type sectiondecl sectionname -%type rangevar rangevar1 section section_or_ob +%type rangevar rangevar1 section section_or_ob rangeobj rangeobj1 /* END NEWCABLE */ /* OOP */ @@ -202,6 +202,15 @@ asgn: varname ROP expr ; /* OOP */ + +rangeobj: rangeobj1 + { code(sec_access_push); codesym((Symbol *)0);} + | section '.' rangeobj1 + ; +rangeobj1: RANGEOBJ {pushs($1); pushi(CHECK);} wholearray + { $$ = $3;} + ; + object: OBJECTVAR {pushi(OBJECTVAR);pushs($1); pushi(CHECK);} wholearray {$$ = $3; code(hoc_objectvar); spop(); codesym($1);} | OBJECTARG @@ -216,6 +225,10 @@ object: OBJECTVAR {pushi(OBJECTVAR);pushs($1); pushi(CHECK);} wholearray | HOCOBJFUNCTION begin '(' arglist ')' { $$ = $2; code(call); codesym($1); codei($4); code(hoc_known_type); codei(OBJECTVAR); pushi(OBJECTVAR);} + | rangeobj + { code(rangeobjevalmiddle); codesym(spop()); pushi(OBJECTVAR);} + | rangeobj '(' expr ')' + {TPD; code(rangeobjeval); codesym(spop()); pushi(OBJECTVAR);} ; ob: ob1 { spop(); } @@ -975,7 +988,7 @@ ckvar: VAR anyname: STRING|VAR|UNDEF|FUNCTION|PROCEDURE|FUN_BLTIN|SECTION|RANGEVAR |NRNPNTVAR|OBJECTVAR|TEMPLATE|OBFUNCTION|AUTO|AUTOOBJ|SECTIONREF |MECHANISM|BLTIN|STRFUNCTION|HOCOBJFUNCTION|ITERATOR|STRINGFUNC - |OBJECTFUNC + |OBJECTFUNC|RANGEOBJ ; %% /* end of grammar */ diff --git a/src/oc/plt.cpp b/src/oc/plt.cpp index 5362dbff07..b3cf4e6f27 100644 --- a/src/oc/plt.cpp +++ b/src/oc/plt.cpp @@ -6,8 +6,6 @@ added plots in fig format \ */ #include "hoc.h" #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); extern void Fig_file(const char*, int); diff --git a/src/oc/regexp.cpp b/src/oc/regexp.cpp deleted file mode 100644 index 4387fa1613..0000000000 --- a/src/oc/regexp.cpp +++ /dev/null @@ -1,466 +0,0 @@ -#include <../../nrnconf.h> -/* /local/src/master/nrn/src/oc/regexp.cpp,v 1.1.1.1 1994/10/12 17:22:13 hines Exp */ -/* -regexp.cpp,v - * Revision 1.1.1.1 1994/10/12 17:22:13 hines - * NEURON 3.0 distribution - * - * Revision 2.19 93/02/02 10:34:37 hines - * static functions declared before used - * - * Revision 1.3 92/07/31 12:11:31 hines - * following merged from hoc - * The regular expression has been augmented with - * {istart-iend} where istart and iend are integers. The expression matches - * any integer that falls in this range. - * - * Revision 1.2 92/01/30 08:17:19 hines - * bug fixes found in hoc incorporated. if()return, no else, objectcenter - * warnings. - * - * Revision 1.1 91/10/11 11:12:16 hines - * Initial revision - * - * Revision 3.108 90/10/24 09:44:14 hines - * saber warnings gone - * - * Revision 3.58 90/05/17 16:30:52 jamie - * changed global functions to start with hoc_ - * moved regexp.cpp from project 'neuron' to 'hoc' - * - * Revision 1.25 89/08/31 10:28:46 mlh - * regular expressions for issection() - * differences between standard regular expressions are: - * allways match from beginning to end of target (implicit ^ and $) - * change [] to <> - * eliminate \( - * - * Revision 1.2 89/08/31 09:22:17 mlh - * works as in e.cpp and lint free - * - * Revision 1.1 89/08/31 08:24:59 mlh - * Initial revision - * -*/ - -/* regular expression match for section names - grabbed prototype from e.cpp - Use by first compiling the search string with hoc_regexp_compile(pattern) - Then checking target strings one at a time with hoc_regexp_search(target) -*/ - -#include -#include "hocdec.h" -#define CABLESECTION 1 -/* Always match from beginning of string (implicit ^), - Always match end of string (implicit $), - change [] to <>, - eliminate \( -*/ - -#define STAR 01 -#define SUFF '.' -#define TILDE '~' - -#define EREGEXP 24 -#define error(enum) hoc_execerror("search string format error", pattern) -#define CBRA 1 -#define CCHR 2 -#define CDOT 4 -#define CCL 6 -#define NCCL 8 -#define CDOL 10 -#define CEOF 11 -#define CKET 12 -#if CABLESECTION -#define INTRANGE 14 -#endif -#define NBRA 5 -#define ESIZE 256 -#define eof '\0' -static char expbuf[ESIZE + 4]; -static const char* pattern = ""; -static char* loc1; -static char* loc2; -static char* locs; -static char* braslist[NBRA]; -static char* braelist[NBRA]; -static int circfl; -#if CABLESECTION -static int int_range_start[NBRA]; -static int int_range_stop[NBRA]; -#endif - -static int advance(char* lp, char* ep); -static int hoc_cclass(char* set, char c, int af); - -void hoc_regexp_compile(const char* pat) { - char* cp = (char*) pat; - int c; - char* ep; - char* lastep = 0; -#if (!CABLESECTION) - char bracket[NBRA], *bracketp; - int nbra; -#else - int int_range_index = 0; -#endif - int cclcnt; - int tempc; - - - if (!cp) { - pattern = ""; - error(EREGEXP); - } - if (pattern == cp && strcmp(pattern, cp)) { - /* if previous pattern != cp then may have been freed */ - return; - } - pattern = cp; - ep = expbuf; -#if (!CABLESECTION) - bracketp = bracket; - nbra = 0; -#endif - if ((c = *cp++) == '\n') { - cp--; - c = eof; - } - if (c == eof) { - if (*ep == 0) - error(EREGEXP); - return; - } -#if CABLESECTION - circfl = 1; -#else - circfl = 0; - if (c == '^') { - c = *cp++; - circfl++; - } -#endif - if (c == '*') - goto cerror; - cp--; - for (;;) { - if (ep >= &expbuf[ESIZE]) - goto cerror; - c = *cp++; - if (c == '\n') { - cp--; - c = eof; - } - if (c == eof) { -#if CABLESECTION - *ep++ = CDOL; -#endif - *ep++ = CEOF; - return; - } - if (c != '*') - lastep = ep; - switch (c) { - case '\\': -#if (!CABLESECTION) - if ((c = *cp++) == '(') { - if (nbra >= NBRA) - goto cerror; - *bracketp++ = nbra; - *ep++ = CBRA; - *ep++ = nbra++; - continue; - } - if (c == ')') { - if (bracketp <= bracket) - goto cerror; - *ep++ = CKET; - *ep++ = *--bracketp; - continue; - } -#endif - *ep++ = CCHR; - if (c == '\n') - goto cerror; - *ep++ = c; - continue; - - case '.': - *ep++ = CDOT; - continue; - - case '\n': - goto cerror; - - case '*': - if (*lastep == CBRA || *lastep == CKET) - error(EREGEXP); - *lastep |= STAR; - continue; - -#if (!CABLESECTION) - case '$': - tempc = *cp; - if (tempc != eof && tempc != '\n') - goto defchar; - *ep++ = CDOL; - continue; -#endif - -#if CABLESECTION - case '{': { - char* cp1 = cp; - if (int_range_index >= NBRA) - goto cerror; - *ep++ = INTRANGE; - do { - if (!(*cp >= '0' && *cp <= '9') && *cp != '-') { - error(EREGEXP); - } - } while (*(++cp) != '}'); - cp++; - if (2 != sscanf(cp1, - "%d-%d", - int_range_start + int_range_index, - int_range_stop + int_range_index)) { - error(EREGEXP); - } - *ep++ = int_range_index++; - } - continue; -#endif -#if CABLESECTION - case '<': -#else - case '[': -#endif - *ep++ = CCL; - *ep++ = 0; - cclcnt = 1; - if ((c = *cp++) == '^') { - c = *cp++; - ep[-2] = NCCL; - } - do { - if (c == '\n') - goto cerror; - /* - * Handle the escaped '-' - */ - if (c == '-' && *(ep - 1) == '\\') - *(ep - 1) = '-'; - /* - * Handle ranges of characters (e.g. a-z) - */ - else if ((tempc = *cp++) != ']' && c == '-' && cclcnt > 1 && tempc != '\n' && - (c = *(ep - 1)) <= tempc) { - while (++c <= tempc) { - *ep++ = c; - cclcnt++; - if (ep >= &expbuf[ESIZE]) - goto cerror; - } - } - /* - * Normal case. Add character to buffer - */ - else { - cp--; - *ep++ = c; - cclcnt++; - if (ep >= &expbuf[ESIZE]) - goto cerror; - } -#if CABLESECTION - } while ((c = *cp++) != '>'); -#else - - } while ((c = *cp++) != ']'); -#endif - lastep[1] = cclcnt; - continue; - -#if (!CABLESECTION) - defchar: -#endif - default: - *ep++ = CCHR; - *ep++ = c; - } - } -cerror: - expbuf[0] = 0; - error(EREGEXP); -} - -int hoc_regexp_search(const char* tar) { /*return true if target matches pattern*/ - char* target = (char*) tar; - char *p1, *p2, c; - -#if 1 - if (target == (char*) 0) { - return (0); - } - p1 = target; - locs = (char*) 0; -#else /* in e, apparently for searches within or at begining of string */ - if (gf) { - if (circfl) - return (0); - p1 = linebuf; - p2 = genbuf; - while (*p1++ = *p2++) - ; - locs = p1 = loc2; - } else { - if (addr == zero) - return (0); - p1 = getline(*addr); - locs = NULL; - } -#endif - p2 = expbuf; - if (circfl) { - loc1 = p1; - return (advance(p1, p2)); - } - /* fast check for first character */ - if (*p2 == CCHR) { - c = p2[1]; - do { - if (*p1 != c) - continue; - if (advance(p1, p2)) { - loc1 = p1; - return (1); - } - } while (*p1++); - return (0); - } - /* regular algorithm */ - do { - if (advance(p1, p2)) { - loc1 = p1; - return (1); - } - } while (*p1++); - return (0); -} - -static int advance(char* lp, char* ep) { - char* curlp; - - for (;;) - switch (*ep++) { - case CCHR: - if (*ep++ == *lp++) - continue; - return (0); - - case CDOT: - if (*lp++) - continue; - return (0); - - case CDOL: - if (*lp == 0) - continue; - return (0); - - case CEOF: - loc2 = lp; - return (1); - -#if CABLESECTION - case INTRANGE: { - int start, stop, num; - start = int_range_start[*ep]; - stop = int_range_stop[*ep++]; - num = *lp++ - '0'; - if (num < 0 || num > 9) { - return (0); - } - while (*lp >= '0' && *lp <= '9') { - num = 10 * num + *lp - '0'; - ++lp; - } - if (num >= start && num <= stop) { - continue; - } - } - return (0); -#endif - - case CCL: - if (hoc_cclass(ep, *lp++, 1)) { - ep += *ep; - continue; - } - return (0); - - case NCCL: - if (hoc_cclass(ep, *lp++, 0)) { - ep += *ep; - continue; - } - return (0); - - case CBRA: - braslist[*ep++] = lp; - continue; - - case CKET: - braelist[*ep++] = lp; - continue; - - case CDOT | STAR: - curlp = lp; - /*EMPTY*/ - while (*lp++) - ; - goto star; - - case CCHR | STAR: - curlp = lp; - /*EMPTY*/ - while (*lp++ == *ep) - ; - ep++; - goto star; - - case CCL | STAR: - case NCCL | STAR: - curlp = lp; - /*EMPTY*/ - while (hoc_cclass(ep, *lp++, ep[-1] == (CCL | STAR))) - ; - ep += *ep; - goto star; - - star: - do { - lp--; - if (lp == locs) - break; - if (advance(lp, ep)) - return (1); - } while (lp > curlp); - return (0); - - default: - error(EREGEXP); - } -} - -static int hoc_cclass(char* set, char c, int af) { - int n; - - if (c == 0) - return (0); - n = *set++; - while (--n) - if (*set++ == c) - return (af); - return (!af); -} diff --git a/src/oc/scoprand.cpp b/src/oc/scoprand.cpp index c7683b2ae8..70651f5608 100644 --- a/src/oc/scoprand.cpp +++ b/src/oc/scoprand.cpp @@ -1,6 +1,8 @@ #include <../../nrnconf.h> #include +#ifdef HAVE_UNISTD_H #include +#endif /* this was removed from the scopmath library since there could be multiple copies of the static value below. One in neuron.exe and the @@ -21,7 +23,8 @@ static char RCSid[] = "random.cpp,v 1.4 1999/01/04 12:46:49 hines Exp"; #endif #include -#include +#include "oc_mcran4.hpp" +#include "mcran4.h" #include "scoplib.h" static uint32_t value = 1; @@ -56,8 +59,7 @@ static uint32_t value = 1; *--------------------------------------------------------------------------- */ double scop_random(void) { - extern int use_mcell_ran4_; - if (use_mcell_ran4_) { + if (use_mcran4()) { /*perhaps 4 times slower but much higher quality*/ return mcell_ran4a(&value); } else { diff --git a/src/oc/settext.cpp b/src/oc/settext.cpp index d3ee14bc34..63ad51c376 100644 --- a/src/oc/settext.cpp +++ b/src/oc/settext.cpp @@ -5,8 +5,6 @@ hoc_pushx(a); #include "gui-redirect.h" -extern Object** (*nrnpy_gui_helper_)(const char* name, Object* obj); -extern double (*nrnpy_object_to_double_)(Object*); int newstyle; unsigned int text_style = 0, text_size = 1, text_orient = 0; diff --git a/src/oc/symbol.cpp b/src/oc/symbol.cpp index eaba0b4f80..605a7ba2a6 100644 --- a/src/oc/symbol.cpp +++ b/src/oc/symbol.cpp @@ -2,17 +2,6 @@ /* /local/src/master/nrn/src/oc/symbol.cpp,v 1.9 1999/02/25 18:01:58 hines Exp */ /* version 7.2.1 2-jan-89 */ -#if HAVE_POSIX_MEMALIGN -#define HAVE_MEMALIGN 1 -#endif -#if defined(DARWIN) /* posix_memalign seems not to work on Darwin 10.6.2 */ -#undef HAVE_MEMALIGN -#endif -#if HAVE_MEMALIGN -#undef _XOPEN_SOURCE /* avoid warnings about redefining this */ -#define _XOPEN_SOURCE 600 -#endif - #include "hoc.h" #include "hocdec.h" #include "hoclist.h" @@ -25,15 +14,9 @@ #include #include -#if MAC -#undef HAVE_MALLOC_H -#endif #if HAVE_MALLOC_H #include #endif -#if HAVE_ALLOC_H -#include /* at least for turbo C 2.0 */ -#endif #include "nrnmpiuse.h" @@ -179,96 +162,6 @@ void hoc_link_symbol(Symbol* sp, Symlist* list) { sp->next = nullptr; } -static int emalloc_error = 0; - -void hoc_malchk(void) { - if (emalloc_error) { - emalloc_error = 0; - execerror("out of memory", nullptr); - } -} - -void* hoc_Emalloc(size_t n) { /* check return from malloc */ - void* p = malloc(n); - if (p == nullptr) - emalloc_error = 1; - return p; -} - -void* emalloc(size_t n) { - void* p = hoc_Emalloc(n); - if (emalloc_error) { - hoc_malchk(); - } - return p; -} - -void* hoc_Ecalloc(size_t n, size_t size) { /* check return from calloc */ - if (n == 0) { - return nullptr; - } - void* p = calloc(n, size); - if (p == nullptr) - emalloc_error = 1; - return p; -} - -void* ecalloc(size_t n, size_t size) { - void* p = hoc_Ecalloc(n, size); - if (emalloc_error) { - hoc_malchk(); - } - return p; -} - -void* nrn_cacheline_alloc(void** memptr, size_t size) { -#if HAVE_MEMALIGN - static int memalign_is_working = 1; - if (memalign_is_working) { - if (posix_memalign(memptr, 64, size) != 0) { - fprintf(stderr, "posix_memalign not working, falling back to using malloc\n"); - memalign_is_working = 0; - *memptr = hoc_Emalloc(size); - hoc_malchk(); - } - } else -#endif - *memptr = hoc_Emalloc(size); - hoc_malchk(); - return *memptr; -} - -void* nrn_cacheline_calloc(void** memptr, size_t nmemb, size_t size) { -#if HAVE_MEMALIGN - nrn_cacheline_alloc(memptr, nmemb * size); - memset(*memptr, 0, nmemb * size); -#else - *memptr = hoc_Ecalloc(nmemb, size); - hoc_malchk(); -#endif - return *memptr; -} - -void* hoc_Erealloc(void* ptr, size_t size) { /* check return from realloc */ - if (!ptr) { - return hoc_Emalloc(size); - } - void* p = realloc(ptr, size); - if (p == nullptr) { - free(ptr); - emalloc_error = 1; - } - return p; -} - -void* erealloc(void* ptr, size_t size) { - void* p = hoc_Erealloc(ptr, size); - if (emalloc_error) { - hoc_malchk(); - } - return p; -} - void hoc_free_symspace(Symbol* s1) { /* frees symbol space. Marks it UNDEF */ if (s1 && s1->cpublic != 2) { switch (s1->type) { diff --git a/src/oc/wrap_sprintf.h b/src/oc/wrap_sprintf.h index 2c1ca445f7..bba7ddd80f 100644 --- a/src/oc/wrap_sprintf.h +++ b/src/oc/wrap_sprintf.h @@ -1,4 +1,6 @@ #include +#include // std::forward + namespace neuron { /** * @brief Redirect sprintf to snprintf if the buffer size can be deduced. diff --git a/src/oc/xred.cpp b/src/oc/xred.cpp index d7ab88b28f..5dfbbf1c59 100644 --- a/src/oc/xred.cpp +++ b/src/oc/xred.cpp @@ -132,9 +132,6 @@ void hoc_Sred(void) { int hoc_sred(const char* prompt, char* defalt, char* charlist) { char istr[80], c[2], instring[40], *result; -#if !defined(HAVE_STRSTR) - extern char* strstr(); -#endif for (;;) { /* cycle until done */ IGNORE(fprintf(stderr, "%s (%s)", prompt, defalt)); /* print prompt */ @@ -169,22 +166,4 @@ int hoc_sred(const char* prompt, char* defalt, char* charlist) { return 0; } -#if !defined(HAVE_STRSTR) -char* strstr(char* cs, char* ct) { - char *strchr_ptr, *cs_ptr; - int ct_len; - - ct_len = strlen(ct); - - for (cs_ptr = cs; - ((strchr_ptr = (char*) strchr(cs_ptr, ct[0])) && (ct_len <= strlen(strchr_ptr))); - cs_ptr = strchr_ptr + 1) { - if (memcmp(ct, strchr_ptr, ct_len) == 0) { - return strchr_ptr; - } - } - - return (char*) 0; -} -#endif #endif diff --git a/src/parallel/bbs.cpp b/src/parallel/bbs.cpp index e04dd5cc04..95962ccbe2 100644 --- a/src/parallel/bbs.cpp +++ b/src/parallel/bbs.cpp @@ -1,6 +1,5 @@ #include <../../nrnconf.h> #include "nrnmpi.h" -#include "bbsconf.h" #include #include #include diff --git a/src/parallel/bbsclimpi.cpp b/src/parallel/bbsclimpi.cpp index 7147ff6ae0..4e95b5b336 100644 --- a/src/parallel/bbsclimpi.cpp +++ b/src/parallel/bbsclimpi.cpp @@ -1,13 +1,14 @@ #include <../../nrnconf.h> -#include "../nrnpython/nrnpython_config.h" #include -#include "bbsconf.h" #ifdef NRNMPI // to end of file #include #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include +#include "nrnpy.h" #include "oc2iv.h" #include "bbs.h" #include "bbsrcli.h" @@ -17,17 +18,7 @@ extern void nrnmpi_int_broadcast(int*, int, int); #define debug 0 -#if defined(USE_PYTHON) -extern int (*p_nrnpython_start)(int); -#endif - -#if defined(HAVE_STL) -#if defined(HAVE_SSTREAM) // the standard ... #include -#else -#include -#include -#endif struct ltint { bool operator()(int i, int j) const { @@ -37,18 +28,14 @@ struct ltint { class KeepArgs: public std::map {}; -#endif - int BBSClient::sid_; BBSClient::BBSClient() { - sendbuf_ = nil; - recvbuf_ = nil; + sendbuf_ = nullptr; + recvbuf_ = nullptr; request_ = nrnmpi_newbuf(100); nrnmpi_ref(request_); -#if defined(HAVE_STL) keepargs_ = new KeepArgs(); -#endif BBSClient::start(); } @@ -56,9 +43,7 @@ BBSClient::~BBSClient() { nrnmpi_unref(sendbuf_); nrnmpi_unref(recvbuf_); nrnmpi_unref(request_); -#if defined(HAVE_STL) delete keepargs_; -#endif } void BBSClient::perror(const char* s) { @@ -134,7 +119,7 @@ void BBSClient::post(const char* key) { nrnmpi_pkstr(key, sendbuf_); nrnmpi_bbssend(sid_, POST, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; } void BBSClient::post_todo(int parentid) { @@ -146,7 +131,7 @@ void BBSClient::post_todo(int parentid) { nrnmpi_pkint(parentid, sendbuf_); nrnmpi_bbssend(sid_, POST_TODO, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; } void BBSClient::post_result(int id) { @@ -158,7 +143,7 @@ void BBSClient::post_result(int id) { nrnmpi_pkint(id, sendbuf_); nrnmpi_bbssend(sid_, POST_RESULT, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; } int BBSClient::get(const char* key, int type) { @@ -271,19 +256,15 @@ int BBSClient::look_take_result(int pid) { } void BBSClient::save_args(int userid) { -#if defined(HAVE_STL) nrnmpi_ref(sendbuf_); keepargs_->insert(std::pair(userid, sendbuf_)); - -#endif post_todo(working_id_); } void BBSClient::return_args(int userid) { -#if defined(HAVE_STL) KeepArgs::iterator i = keepargs_->find(userid); nrnmpi_unref(recvbuf_); - recvbuf_ = nil; + recvbuf_ = nullptr; if (i != keepargs_->end()) { recvbuf_ = (*i).second; nrnmpi_ref(recvbuf_); @@ -291,7 +272,6 @@ void BBSClient::return_args(int userid) { upkbegin(); BBSImpl::return_args(userid); } -#endif } void BBSClient::done() { @@ -312,8 +292,8 @@ void BBSClient::done() { } } #if defined(USE_PYTHON) - if (p_nrnpython_start) { - (*p_nrnpython_start)(0); + if (neuron::python::methods.interpreter_start) { + neuron::python::methods.interpreter_start(0); } #endif BBSImpl::done(); diff --git a/src/parallel/bbsconf.h.in b/src/parallel/bbsconf.h.in deleted file mode 100755 index d139df47c9..0000000000 --- a/src/parallel/bbsconf.h.in +++ /dev/null @@ -1,11 +0,0 @@ -#ifndef H_bbsconfig_included -#define H_bbsconfig_included 1 - -/* following are relevant to src/parallel implmentation of ParallelContext */ -/* Set to 1 if the standard template library exists */ -#undef HAVE_STL -/* Set to 1 if SIGPOLL is a possible signal */ -#undef HAVE_SIGPOLL - -#endif /* H_config_included */ - diff --git a/src/parallel/bbsdirectmpi.cpp b/src/parallel/bbsdirectmpi.cpp index fc1e4f67e5..4e62dbe0bf 100644 --- a/src/parallel/bbsdirectmpi.cpp +++ b/src/parallel/bbsdirectmpi.cpp @@ -1,10 +1,11 @@ #include <../../nrnconf.h> #include -#include "bbsconf.h" #ifdef NRNMPI // to end of file #include #include +#ifdef HAVE_UNISTD_H #include +#endif #include #include "oc2iv.h" #include "bbs.h" @@ -14,13 +15,7 @@ extern void nrnmpi_int_broadcast(int*, int, int); -#if defined(HAVE_STL) -#if defined(HAVE_SSTREAM) // the standard ... #include -#else -#include -#include -#endif #define debug 0 @@ -32,27 +27,20 @@ struct ltint { class KeepArgs: public std::map {}; -#endif - - BBSDirect::BBSDirect() { if (!BBSDirectServer::server_) { BBSDirectServer::server_ = new BBSDirectServer(); } - sendbuf_ = nil; - recvbuf_ = nil; + sendbuf_ = nullptr; + recvbuf_ = nullptr; BBSDirect::start(); -#if defined(HAVE_STL) keepargs_ = new KeepArgs(); -#endif } BBSDirect::~BBSDirect() { nrnmpi_unref(sendbuf_); nrnmpi_unref(recvbuf_); -#if defined(HAVE_STL) delete keepargs_; -#endif } void BBSDirect::perror(const char* s) { @@ -82,7 +70,7 @@ void BBSDirect::context() { } nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; } int BBSDirect::upkint() { @@ -178,7 +166,7 @@ void BBSDirect::post(const char* key) { nrnmpi_pkstr(key, sendbuf_); BBSDirectServer::server_->post(key, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; BBSDirectServer::handle(); } @@ -190,7 +178,7 @@ void BBSDirect::post_todo(int parentid) { nrnmpi_pkint(parentid, sendbuf_); BBSDirectServer::server_->post_todo(parentid, nrnmpi_myid_bbs, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; BBSDirectServer::handle(); } @@ -202,7 +190,7 @@ void BBSDirect::post_result(int id) { nrnmpi_pkint(id, sendbuf_); BBSDirectServer::server_->post_result(id, sendbuf_); nrnmpi_unref(sendbuf_); - sendbuf_ = nil; + sendbuf_ = nullptr; BBSDirectServer::handle(); } @@ -263,26 +251,21 @@ int BBSDirect::master_take_result(int pid) { } void BBSDirect::save_args(int userid) { -#if defined(HAVE_STL) nrnmpi_ref(sendbuf_); keepargs_->insert(std::pair(userid, sendbuf_)); - -#endif post_todo(working_id_); } void BBSDirect::return_args(int userid) { -#if defined(HAVE_STL) KeepArgs::iterator i = keepargs_->find(userid); nrnmpi_unref(recvbuf_); - recvbuf_ = nil; + recvbuf_ = nullptr; if (i != keepargs_->end()) { recvbuf_ = (*i).second; keepargs_->erase(i); nrnmpi_upkbegin(recvbuf_); BBSImpl::return_args(userid); } -#endif } bool BBSDirect::look_take(const char* key) { diff --git a/src/parallel/bbslocal.cpp b/src/parallel/bbslocal.cpp index 6d7a8688a6..15ae4b8ee1 100644 --- a/src/parallel/bbslocal.cpp +++ b/src/parallel/bbslocal.cpp @@ -1,20 +1,13 @@ #include <../../nrnconf.h> -#include "bbsconf.h" #include #include "oc2iv.h" #include "bbslocal.h" #include "bbslsrv.h" #include -#if defined(HAVE_STL) -#if defined(HAVE_SSTREAM) // the standard ... #include #include #include -#else -#include -#include -#endif struct ltint { bool operator()(int i, int j) const { @@ -24,8 +17,6 @@ struct ltint { class KeepArgs: public std::map {}; -#endif - static MessageValue* posting_; static MessageValue* taking_; static BBSLocalServer* server_; @@ -33,20 +24,16 @@ static BBSLocalServer* server_; BBSLocal::BBSLocal() { if (!server_) { server_ = new BBSLocalServer(); - posting_ = nil; - taking_ = nil; + posting_ = nullptr; + taking_ = nullptr; } start(); -#if defined(HAVE_STL) keepargs_ = new KeepArgs(); -#endif } BBSLocal::~BBSLocal() { // need to unref anything in keepargs_; -#if defined(HAVE_STL) delete keepargs_; -#endif } void BBSLocal::context() {} @@ -148,19 +135,19 @@ void BBSLocal::pkpickle(const char* s, size_t n) { void BBSLocal::post(const char* key) { server_->post(key, posting_); Resource::unref(posting_); - posting_ = nil; + posting_ = nullptr; } bool BBSLocal::look_take(const char* key) { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; bool b = server_->look_take(key, &taking_); return b; } bool BBSLocal::look(const char* key) { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; bool b = server_->look(key, &taking_); return b; } @@ -169,7 +156,7 @@ void BBSLocal::take(const char* key) { // blocking int id; for (;;) { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; if (server_->look_take(key, &taking_)) { return; } else if ((id = server_->look_take_todo(&taking_)) != 0) { @@ -183,32 +170,32 @@ void BBSLocal::take(const char* key) { // blocking void BBSLocal::post_todo(int parentid) { server_->post_todo(parentid, posting_); Resource::unref(posting_); - posting_ = nil; + posting_ = nullptr; } void BBSLocal::post_result(int id) { server_->post_result(id, posting_); Resource::unref(posting_); - posting_ = nil; + posting_ = nullptr; } int BBSLocal::look_take_result(int pid) { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; int id = server_->look_take_result(pid, &taking_); return id; } int BBSLocal::look_take_todo() { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; int id = server_->look_take_todo(&taking_); return id; } int BBSLocal::take_todo() { Resource::unref(taking_); - taking_ = nil; + taking_ = nullptr; int id = look_take_todo(); if (id == 0) { perror("take_todo blocking"); @@ -218,14 +205,11 @@ int BBSLocal::take_todo() { void BBSLocal::save_args(int userid) { server_->post_todo(working_id_, posting_); -#if defined(HAVE_STL) keepargs_->insert(std::pair(userid, posting_)); -#endif - posting_ = nil; + posting_ = nullptr; } void BBSLocal::return_args(int userid) { -#if defined(HAVE_STL) KeepArgs::iterator i = keepargs_->find(userid); assert(i != keepargs_->end()); Resource::unref(taking_); @@ -233,7 +217,6 @@ void BBSLocal::return_args(int userid) { keepargs_->erase(i); taking_->init_unpack(); BBSImpl::return_args(userid); -#endif } void BBSLocal::done() { diff --git a/src/parallel/bbslsrv.cpp b/src/parallel/bbslsrv.cpp index 402423dd4a..b98dd212d1 100644 --- a/src/parallel/bbslsrv.cpp +++ b/src/parallel/bbslsrv.cpp @@ -1,5 +1,4 @@ #include <../../nrnconf.h> -#include "bbsconf.h" #include #include #include "bbslsrv.h" @@ -11,18 +10,9 @@ #define VECTOR 4 #define PICKLE 5 -#if defined(HAVE_STL) -#if defined(HAVE_SSTREAM) // the standard ... #include #include #include -#else -#include -#include -#include -#include -#include -#endif // debug is 0 1 or 2 #define debug 0 @@ -69,7 +59,7 @@ WorkItem::WorkItem(int id, MessageValue* m) { id_ = id; val_ = m; val_->ref(); - parent_ = nil; + parent_ = nullptr; } WorkItem::~WorkItem() { @@ -99,18 +89,9 @@ class MessageList: public std::multimap class WorkList: public std::map {}; class ReadyList: public std::set {}; class ResultList: public std::multimap {}; -#else -class MessageList {}; -class WorkList {}; -class ReadyList {}; -class ResultList {}; -static void nostl() { - hoc_execerror("BBSLocalServer not working", "Compiled without STL"); -} -#endif MessageItem::MessageItem() { - next_ = nil; + next_ = nullptr; type_ = 0; } @@ -129,9 +110,9 @@ MessageItem::~MessageItem() { } MessageValue::MessageValue() { - first_ = nil; - last_ = nil; - unpack_ = nil; + first_ = nullptr; + last_ = nullptr; + unpack_ = nullptr; } MessageValue::~MessageValue() { @@ -250,17 +231,14 @@ int MessageValue::upkpickle(char* s, size_t* n) { } BBSLocalServer::BBSLocalServer() { -#if defined(HAVE_STL) messages_ = new MessageList(); work_ = new WorkList(); todo_ = new ReadyList(); results_ = new ResultList(); next_id_ = 1; -#endif } BBSLocalServer::~BBSLocalServer() { -#if defined(HAVE_STL) delete todo_; delete results_; @@ -268,11 +246,9 @@ BBSLocalServer::~BBSLocalServer() { // need to unref MessageValue in messages_ and delete WorkItem in work_ delete messages_; delete work_; -#endif } bool BBSLocalServer::look_take(const char* key, MessageValue** val) { -#if defined(HAVE_STL) MessageList::iterator m = messages_->find(key); if (m != messages_->end()) { *val = (MessageValue*) ((*m).second); @@ -286,15 +262,11 @@ bool BBSLocalServer::look_take(const char* key, MessageValue** val) { } #if debug printf("fail srvr_look_take |%s|\n", key); -#endif -#else - nostl(); #endif return false; } bool BBSLocalServer::look(const char* key, MessageValue** val) { -#if defined(HAVE_STL) MessageList::iterator m = messages_->find(key); if (m != messages_->end()) { *val = (MessageValue*) ((*m).second); @@ -304,32 +276,24 @@ bool BBSLocalServer::look(const char* key, MessageValue** val) { #endif return true; } else { - val = nil; + val = nullptr; } #if debug printf("srvr_look false |%s|\n", key); -#endif -#else - nostl(); #endif return false; } void BBSLocalServer::post(const char* key, MessageValue* val) { -#if defined(HAVE_STL) MessageList::iterator m = messages_->insert( std::pair(newstr(key), val)); Resource::ref(val); #if debug printf("srvr_post |%s|\n", key); #endif -#else - nostl(); -#endif } void BBSLocalServer::post_todo(int parentid, MessageValue* val) { -#if defined(HAVE_STL) WorkItem* w = new WorkItem(next_id_++, val); WorkList::iterator p = work_->find(parentid); if (p != work_->end()) { @@ -340,13 +304,9 @@ void BBSLocalServer::post_todo(int parentid, MessageValue* val) { #if debug printf("srvr_post_todo id=%d pid=%d\n", w->id_, parentid); #endif -#else - nostl(); -#endif } void BBSLocalServer::post_result(int id, MessageValue* val) { -#if defined(HAVE_STL) WorkList::iterator i = work_->find(id); WorkItem* w = (WorkItem*) ((*i).second); val->ref(); @@ -356,11 +316,9 @@ void BBSLocalServer::post_result(int id, MessageValue* val) { #if debug printf("srvr_post_done id=%d pid=%d\n", id, w->parent_ ? w->parent_->id_ : 0); #endif -#endif } int BBSLocalServer::look_take_todo(MessageValue** m) { -#if defined(HAVE_STL) ReadyList::iterator i = todo_->begin(); if (i != todo_->end()) { WorkItem* w = (*i); @@ -377,14 +335,9 @@ int BBSLocalServer::look_take_todo(MessageValue** m) { #endif return 0; } -#else - nostl(); - return 0; -#endif } int BBSLocalServer::look_take_result(int pid, MessageValue** m) { -#if defined(HAVE_STL) ResultList::iterator i = results_->find(pid); if (i != results_->end()) { WorkItem* w = (WorkItem*) ((*i).second); @@ -405,8 +358,4 @@ int BBSLocalServer::look_take_result(int pid, MessageValue** m) { #endif return 0; } -#else - nostl(); - return 0; -#endif } diff --git a/src/parallel/bbssrv2mpi.cpp b/src/parallel/bbssrv2mpi.cpp index 8a624ede13..0a2776445e 100644 --- a/src/parallel/bbssrv2mpi.cpp +++ b/src/parallel/bbssrv2mpi.cpp @@ -1,5 +1,4 @@ #include <../../nrnconf.h> -#include "bbsconf.h" #include #if NRNMPI // to end of file #include @@ -14,17 +13,8 @@ void nrnbbs_context_wait(); BBSDirectServer* BBSDirectServer::server_; -#if defined(HAVE_STL) -#if defined(HAVE_SSTREAM) // the standard ... #include #include -#else -#include -#include -#include -#include -#include -#endif #define debug 0 @@ -78,7 +68,7 @@ WorkItem::WorkItem(int id, bbsmpibuf* buf, int cid) { id_ = id; buf_ = buf; cid_ = cid; - parent_ = nil; + parent_ = nullptr; } WorkItem::~WorkItem() { @@ -109,17 +99,8 @@ class WorkList: public std::map {}; class LookingToDoList: public std::set {}; class ReadyList: public std::set {}; class ResultList: public std::multimap {}; -#else -class MessageList {}; -class PendingList {}; -class WorkList {}; -class LookingToDoList {}; -class ReadyList {}; -class ResultList {}; -#endif BBSDirectServer::BBSDirectServer() { -#if defined(HAVE_STL) messages_ = new MessageList(); work_ = new WorkList(); todo_ = new ReadyList(); @@ -128,13 +109,11 @@ BBSDirectServer::BBSDirectServer() { looking_todo_ = new LookingToDoList(); send_context_ = new LookingToDoList(); next_id_ = FIRSTID; - context_buf_ = nil; + context_buf_ = nullptr; remaining_context_cnt_ = 0; -#endif } BBSDirectServer::~BBSDirectServer() { -#if defined(HAVE_STL) delete todo_; delete results_; delete looking_todo_; @@ -144,7 +123,6 @@ BBSDirectServer::~BBSDirectServer() { delete messages_; delete work_; delete send_context_; -#endif } bool BBSDirectServer::look_take(const char* key, bbsmpibuf** recv) { @@ -152,9 +130,8 @@ bool BBSDirectServer::look_take(const char* key, bbsmpibuf** recv) { printf("DirectServer::look_take |%s|\n", key); #endif bool b = false; -#if defined(HAVE_STL) nrnmpi_unref(*recv); - *recv = nil; + *recv = nullptr; MessageList::iterator m = messages_->find(key); if (m != messages_->end()) { b = true; @@ -166,7 +143,6 @@ bool BBSDirectServer::look_take(const char* key, bbsmpibuf** recv) { } #if debug printf("DirectServer::look_take |%s| recv=%p return %d\n", key, (*recv), b); -#endif #endif return b; } @@ -177,8 +153,7 @@ bool BBSDirectServer::look(const char* key, bbsmpibuf** recv) { #endif bool b = false; nrnmpi_unref(*recv); - *recv = nil; -#if defined(HAVE_STL) + *recv = nullptr; MessageList::iterator m = messages_->find(key); if (m != messages_->end()) { b = true; @@ -189,24 +164,20 @@ bool BBSDirectServer::look(const char* key, bbsmpibuf** recv) { } #if debug printf("DirectServer::look |%s| recv=%p return %d\n", key, (*recv), b); -#endif #endif return b; } void BBSDirectServer::put_pending(const char* key, int cid) { -#if defined(HAVE_STL) #if debug printf("put_pending |%s| %d\n", key, cid); #endif char* s = newstr(key); pending_->insert(std::pair(s, cid)); -#endif } bool BBSDirectServer::take_pending(const char* key, int* cid) { bool b = false; -#if defined(HAVE_STL) PendingList::iterator p = pending_->find(key); if (p != pending_->end()) { *cid = (*p).second; @@ -218,12 +189,10 @@ bool BBSDirectServer::take_pending(const char* key, int* cid) { delete[] s; b = true; } -#endif return b; } void BBSDirectServer::post(const char* key, bbsmpibuf* send) { -#if defined(HAVE_STL) int cid; #if debug printf("DirectServer::post |%s| send=%p\n", key, send); @@ -235,17 +204,13 @@ void BBSDirectServer::post(const char* key, bbsmpibuf* send) { std::pair(newstr(key), send)); nrnmpi_ref(send); } -#endif } void BBSDirectServer::add_looking_todo(int cid) { -#if defined(HAVE_STL) looking_todo_->insert(cid); -#endif } void BBSDirectServer::post_todo(int pid, int cid, bbsmpibuf* send) { -#if defined(HAVE_STL) #if debug printf("BBSDirectServer::post_todo pid=%d cid=%d send=%p\n", pid, cid, send); #endif @@ -271,11 +236,9 @@ void BBSDirectServer::post_todo(int pid, int cid, bbsmpibuf* send) { #endif todo_->insert(w); } -#endif } void BBSDirectServer::context(bbsmpibuf* send) { -#if defined(HAVE_STL) int cid, j; #if debug printf("numprocs_bbs=%d\n", nrnmpi_numprocs_bbs); @@ -291,7 +254,7 @@ void BBSDirectServer::context(bbsmpibuf* send) { Printf("some workers did not receive previous context\n"); send_context_->erase(send_context_->begin(), send_context_->end()); nrnmpi_unref(context_buf_); - context_buf_ = nil; + context_buf_ = nullptr; } remaining_context_cnt_ = nrnmpi_numprocs_bbs - 1; for (j = 1; j < nrnmpi_numprocs_bbs; ++j) { @@ -315,7 +278,6 @@ void BBSDirectServer::context(bbsmpibuf* send) { nrnmpi_ref(context_buf_); handle(); } -#endif } void nrnbbs_context_wait() { @@ -333,7 +295,6 @@ void BBSDirectServer::context_wait() { } bool BBSDirectServer::send_context(int cid) { -#if defined(HAVE_STL) LookingToDoList::iterator i = send_context_->find(cid); if (i != send_context_->end()) { send_context_->erase(i); @@ -343,16 +304,14 @@ bool BBSDirectServer::send_context(int cid) { nrnmpi_bbssend(cid, CONTEXT + 1, context_buf_); if (--remaining_context_cnt_ <= 0) { nrnmpi_unref(context_buf_); - context_buf_ = nil; + context_buf_ = nullptr; } return true; } -#endif return false; } void BBSDirectServer::post_result(int id, bbsmpibuf* send) { -#if defined(HAVE_STL) #if debug printf("DirectServer::post_result id=%d send=%p\n", id, send); #endif @@ -362,16 +321,14 @@ void BBSDirectServer::post_result(int id, bbsmpibuf* send) { nrnmpi_unref(w->buf_); w->buf_ = send; results_->insert(std::pair(w->parent_ ? w->parent_->id_ : 0, w)); -#endif } int BBSDirectServer::look_take_todo(bbsmpibuf** recv) { -#if defined(HAVE_STL) #if debug printf("DirectServer::look_take_todo\n"); #endif nrnmpi_unref(*recv); - *recv = nil; + *recv = nullptr; ReadyList::iterator i = todo_->begin(); if (i != todo_->end()) { WorkItem* w = (WorkItem*) (*i); @@ -388,18 +345,14 @@ int BBSDirectServer::look_take_todo(bbsmpibuf** recv) { } else { return 0; } -#else - return 0; -#endif } int BBSDirectServer::look_take_result(int pid, bbsmpibuf** recv) { #if debug printf("DirectServer::look_take_result pid=%d\n", pid); #endif -#if defined(HAVE_STL) nrnmpi_unref(*recv); - *recv = nil; + *recv = nullptr; ResultList::iterator i = results_->find(pid); if (i != results_->end()) { WorkItem* w = (WorkItem*) ((*i).second); @@ -416,9 +369,6 @@ int BBSDirectServer::look_take_result(int pid, bbsmpibuf** recv) { } else { return 0; } -#else - return 0; -#endif } #endif // NRNMPI diff --git a/src/parallel/bbssrvmpi.cpp b/src/parallel/bbssrvmpi.cpp index 894d48e651..53aef3cd3b 100644 --- a/src/parallel/bbssrvmpi.cpp +++ b/src/parallel/bbssrvmpi.cpp @@ -1,9 +1,10 @@ #include <../../nrnconf.h> -#include "bbsconf.h" #include #ifdef NRNMPI // to end of file #include +#ifdef HAVE_UNISTD_H #include +#endif #include "bbssrv2mpi.h" #include "bbssrv.h" @@ -65,7 +66,7 @@ void BBSDirectServer::handle1(int size, int tag, int cid) { bbsmpibuf* send; char* key; int index; - send = nil; + send = nullptr; recv = nrnmpi_newbuf(size); nrnmpi_ref(recv); tag = nrnmpi_bbsrecv(cid, recv); @@ -104,7 +105,7 @@ void BBSDirectServer::handle1(int size, int tag, int cid) { nrnmpi_bbssend(cid, LOOK_YES, send); nrnmpi_unref(send); } else { - nrnmpi_bbssend(cid, LOOK_NO, nil); + nrnmpi_bbssend(cid, LOOK_NO, nullptr); } break; case LOOK_TAKE: @@ -119,7 +120,7 @@ void BBSDirectServer::handle1(int size, int tag, int cid) { nrnmpi_bbssend(cid, LOOK_TAKE_YES, send); nrnmpi_unref(send); } else { - nrnmpi_bbssend(cid, LOOK_TAKE_NO, nil); + nrnmpi_bbssend(cid, LOOK_TAKE_NO, nullptr); } break; case TAKE: diff --git a/src/parallel/ocbbs.cpp b/src/parallel/ocbbs.cpp index e36b616785..0df1804fdf 100644 --- a/src/parallel/ocbbs.cpp +++ b/src/parallel/ocbbs.cpp @@ -1,5 +1,4 @@ #include <../../nrnconf.h> -#include "bbsconf.h" #include #include "classreg.h" #include "oc2iv.h" @@ -12,6 +11,7 @@ #include "section.h" #include "membfunc.h" #include "multicore.h" +#include "nrnpy.h" #include "utils/profile/profiler_interface.h" #include #include @@ -33,13 +33,9 @@ extern int nrn_set_timeout(int timeout); extern void nrnmpi_gid_clear(int); double nrnmpi_rtcomp_time_; extern double nrn_multisend_receive_time(int); -char* (*nrnpy_po2pickle)(Object*, size_t*); -Object* (*nrnpy_pickle2po)(char*, size_t); -char* (*nrnpy_callpicklef)(char*, size_t, int, size_t*); -Object* (*nrnpympi_alltoall_type)(int, int); extern void nrn_prcellstate(int gid, const char* suffix); double nrnmpi_step_wait_; -#if PARANEURON +#if NRNMPI double nrnmpi_transfer_wait_; double nrnmpi_splitcell_wait_; #endif @@ -53,6 +49,7 @@ static void nrnmpi_dbl_broadcast(double*, int, int) {} extern double* nrn_mech_wtime_; extern int nrn_nthread; extern void nrn_thread_partition(int, Object*); +extern Object** nrn_get_thread_partition(int); extern int nrn_allow_busywait(int); extern int nrn_how_many_processors(); extern size_t nrncore_write(); @@ -127,8 +124,8 @@ static int submit_help(OcBBS* bbs) { } else { Object* ob = *hoc_objgetarg(i++); size_t size; - if (nrnpy_po2pickle) { - pname = (*nrnpy_po2pickle)(ob, &size); + if (neuron::python::methods.po2pickle) { + pname = neuron::python::methods.po2pickle(ob, &size); } if (pname) { style = 3; @@ -165,9 +162,9 @@ static int submit_help(OcBBS* bbs) { if (hoc_is_str_arg(i)) { bbs->pkint(0); // hoc statement style bbs->pkstr(gargstr(i)); - } else if (nrnpy_po2pickle) { + } else if (neuron::python::methods.po2pickle) { size_t size; - pname = (*nrnpy_po2pickle)(*hoc_objgetarg(i), &size); + pname = neuron::python::methods.po2pickle(*hoc_objgetarg(i), &size); bbs->pkint(3); // pyfun with no arg style bbs->pkpickle(pname, size); bbs->pkint(0); // argtypes @@ -290,7 +287,7 @@ static void pack_help(int i, OcBBS* bbs) { bbs->pkvec(n, px); } else { // must be a PythonObject size_t size; - char* s = nrnpy_po2pickle(*hoc_objgetarg(i), &size); + char* s = neuron::python::methods.po2pickle(*hoc_objgetarg(i), &size); bbs->pkpickle(s, size); delete[] s; } @@ -376,8 +373,8 @@ static Object** upkpyobj(void* v) { OcBBS* bbs = (OcBBS*) v; size_t n; char* s = bbs->upkpickle(&n); - assert(nrnpy_pickle2po); - Object* po = (*nrnpy_pickle2po)(s, n); + assert(neuron::python::methods.pickle2po); + Object* po = neuron::python::methods.pickle2po(s, n); delete[] s; return hoc_temp_objptr(po); } @@ -388,8 +385,8 @@ static Object** pyret(void* v) { } Object** BBS::pyret() { assert(impl_->pickle_ret_); - assert(nrnpy_pickle2po); - Object* po = (*nrnpy_pickle2po)(impl_->pickle_ret_, impl_->pickle_ret_size_); + assert(neuron::python::methods.pickle2po); + Object* po = neuron::python::methods.pickle2po(impl_->pickle_ret_, impl_->pickle_ret_size_); delete[] impl_->pickle_ret_; impl_->pickle_ret_ = 0; impl_->pickle_ret_size_ = 0; @@ -397,14 +394,14 @@ Object** BBS::pyret() { } static Object** py_alltoall_type(int type) { - assert(nrnpympi_alltoall_type); + assert(neuron::python::methods.mpi_alltoall_type); // for py_gather, py_broadcast, and py_scatter, // the second arg refers to the root rank of the operation (default 0) int size = 0; if (ifarg(2)) { size = int(chkarg(2, -1, 2.14748e9)); } - Object* po = (*nrnpympi_alltoall_type)(size, type); + Object* po = neuron::python::methods.mpi_alltoall_type(size, type); return hoc_temp_objptr(po); } @@ -473,7 +470,7 @@ static double vtransfer_time(void* v) { int mode = ifarg(1) ? int(chkarg(1, 0., 2.)) : 0; if (mode == 2) { return nrnmpi_rtcomp_time_; -#if PARANEURON +#if NRNMPI } else if (mode == 1) { return nrnmpi_splitcell_wait_; } else { @@ -514,7 +511,7 @@ static double wait_time(void* v) { static double step_time(void* v) { double w = ((OcBBS*) v)->integ_time(); -#if PARANEURON +#if NRNMPI w -= nrnmpi_transfer_wait_ + nrnmpi_splitcell_wait_; #endif return w; @@ -525,7 +522,7 @@ static double step_wait(void* v) { nrnmpi_step_wait_ = chkarg(1, -1.0, 0.0); } double w = nrnmpi_step_wait_; -#if PARANEURON +#if NRNMPI // sadly, no calculation of transfer and multisplit barrier times. #endif if (w < 0.) { @@ -702,7 +699,7 @@ static double spike_stat(void* v) { static double maxhist(void* v) { OcBBS* bbs = (OcBBS*) v; - IvocVect* vec = ifarg(1) ? vector_arg(1) : nil; + IvocVect* vec = ifarg(1) ? vector_arg(1) : nullptr; if (vec) { hoc_obj_ref(vec->obj_); } @@ -938,6 +935,11 @@ static double partition(void*) { return 0.0; } +static Object** get_partition(void*) { + return nrn_get_thread_partition(int(chkarg(1, 0, nrn_nthread - 1))); + ; +} + static double thread_stat(void*) { // nrn_thread_stat was called here but didn't do anything return 0.0; @@ -994,6 +996,20 @@ static double nrncorewrite_argvec(void*) { return double(nrncore_write()); } +static double print_memory_stats(void*) { + neuron::container::MemoryUsage local_memory_usage = neuron::container::local_memory_usage(); + +#if NRNMPI + neuron::container::MemoryStats memory_stats; + nrnmpi_memory_stats(memory_stats, local_memory_usage); + nrnmpi_print_memory_stats(memory_stats); +#else + print_memory_usage(local_memory_usage); +#endif + + return 1.0; +} + static double nrncorewrite_argappend(void*) { if (ifarg(2) && !hoc_is_double_arg(2)) { hoc_execerror( @@ -1104,6 +1120,7 @@ static Member_func members[] = {{"submit", submit}, {"nrncore_write", nrncorewrite_argappend}, {"nrnbbcore_register_mapping", nrnbbcore_register_mapping}, {"nrncore_run", nrncorerun}, + {"print_memory_stats", print_memory_stats}, {0, 0}}; @@ -1113,6 +1130,7 @@ static Member_ret_obj_func retobj_members[] = {{"upkvec", upkvec}, {"gid2obj", gid2obj}, {"gid2cell", gid2cell}, {"gid_connect", gid_connect}, + {"get_partition", get_partition}, {"upkpyobj", upkpyobj}, {"pyret", pyret}, {"py_alltoall", py_alltoall}, @@ -1139,7 +1157,7 @@ static void destruct(void* v) { } void ParallelContext_reg() { - class2oc("ParallelContext", cons, destruct, members, nil, retobj_members, retstr_members); + class2oc("ParallelContext", cons, destruct, members, nullptr, retobj_members, retstr_members); } char* BBSImpl::execute_helper(size_t* size, int id, bool exec) { @@ -1163,7 +1181,7 @@ char* BBSImpl::execute_helper(size_t* size, int id, bool exec) { nrnmpi_int_broadcast(&size, 1, 0); nrnmpi_char_broadcast(s, size, 0); } - hoc_obj_run(s, nil); + hoc_obj_run(s, nullptr); delete[] s; break; default: { @@ -1171,7 +1189,7 @@ char* BBSImpl::execute_helper(size_t* size, int id, bool exec) { int i, j; size_t npickle; Symbol* fname = 0; - Object* ob = nil; + Object* ob = nullptr; char* sarg[20]; // upto 20 argument may be strings int ns = 0; // number of args that are strings int narg = 0; // total number of args @@ -1193,7 +1211,7 @@ char* BBSImpl::execute_helper(size_t* size, int id, bool exec) { if (ob->index == i) { break; } - ob = nil; + ob = nullptr; } if (!ob) { fprintf(stderr, "%s[%d] is not an Object in this process\n", s, i); @@ -1273,21 +1291,21 @@ char* BBSImpl::execute_helper(size_t* size, int id, bool exec) { nrnmpi_int_broadcast(&size, 1, 0); nrnmpi_char_broadcast(s, size, 0); } - assert(nrnpy_pickle2po); - Object* po = nrnpy_pickle2po(s, n); + assert(neuron::python::methods.pickle2po); + Object* po = neuron::python::methods.pickle2po(s, n); delete[] s; hoc_pushobj(hoc_temp_objptr(po)); } } if (style == 3) { - assert(nrnpy_callpicklef); + assert(neuron::python::methods.call_picklef); if (pickle_ret_) { delete[] pickle_ret_; pickle_ret_ = 0; pickle_ret_size_ = 0; } if (exec) { - rs = (*nrnpy_callpicklef)(s, npickle, narg, size); + rs = neuron::python::methods.call_picklef(s, npickle, narg, size); } hoc_ac_ = 0.; } else { diff --git a/src/parallel/subworld.cpp b/src/parallel/subworld.cpp index 789cf6c9ba..c9b82ef2a3 100644 --- a/src/parallel/subworld.cpp +++ b/src/parallel/subworld.cpp @@ -27,7 +27,7 @@ void BBSImpl::subworld_worker_execute() { nrnmpi_int_broadcast(&size, 1, 0); // includes terminator char* s = new char[size]; nrnmpi_char_broadcast(s, size, 0); - hoc_obj_run(s, nil); + hoc_obj_run(s, nullptr); delete[] s; // printf("%d leave subworld_worker_execute\n", nrnmpi_myid_world); return; @@ -36,7 +36,7 @@ void BBSImpl::subworld_worker_execute() { int npickle; char* s; Symbol* fname = 0; - Object* ob = nil; + Object* ob = nullptr; char* sarg[20]; // up to 20 arguments may be strings int ns = 0; // number of args that are strings int narg = 0; // total number of args @@ -89,7 +89,7 @@ void BBSImpl::subworld_worker_execute() { char* s; s = new char[n]; nrnmpi_char_broadcast(s, n, 0); - Object* po = nrnpy_pickle2po(s, size_t(n)); + Object* po = neuron::python::methods.pickle2po(s, size_t(n)); delete[] s; hoc_pushobj(hoc_temp_objptr(po)); } @@ -97,7 +97,7 @@ void BBSImpl::subworld_worker_execute() { if (style == 3) { size_t size; - char* rs = (*nrnpy_callpicklef)(s, size_t(npickle), narg, &size); + char* rs = neuron::python::methods.call_picklef(s, size_t(npickle), narg, &size); assert(rs); delete[] rs; } else { diff --git a/src/scopmath/f2c.h b/src/scopmath/f2c.h index 2c2a02c128..61ff45830f 100644 --- a/src/scopmath/f2c.h +++ b/src/scopmath/f2c.h @@ -157,9 +157,6 @@ struct Namelist { int nvars; }; typedef struct Namelist Namelist; -#ifdef MAC -#undef abs -#endif #define abs(x) ((x) >= 0 ? (x) : -(x)) #define dabs(x) (doublereal) abs(x) #define min(a, b) ((a) <= (b) ? (a) : (b)) diff --git a/src/scopmath/row_view.hpp b/src/scopmath/row_view.hpp new file mode 100644 index 0000000000..a033b37648 --- /dev/null +++ b/src/scopmath/row_view.hpp @@ -0,0 +1,35 @@ +#pragma once +#include "neuron/container/non_owning_soa_identifier.hpp" +namespace neuron::scopmath { +template +struct row_view { + row_view(MechRange* ml, std::size_t iml) + : m_iml{iml} + , m_ml{ml} {} + [[nodiscard]] double& operator[](container::field_index ind) { + return m_ml->data(m_iml, ind); + } + [[nodiscard]] double const& operator[](container::field_index ind) const { + return m_ml->data(m_iml, ind); + } + /** + * @brief Wrapper taking plain int indices. + * @see @ref simeq for why :-( + */ + [[nodiscard]] double& operator[](int col) { + return m_ml->data(m_iml, container::field_index{col, 0}); + } + + /** + * @brief Wrapper taking plain int indices. + * @see @ref simeq for why :-( + */ + [[nodiscard]] double const& operator[](int col) const { + return m_ml->data(m_iml, container::field_index{col, 0}); + } + + private: + std::size_t m_iml{}; + MechRange* m_ml{}; +}; +} // namespace neuron::scopmath diff --git a/src/scopmath/scoplib.h b/src/scopmath/scoplib.h index e844e899f8..91f6cf191b 100644 --- a/src/scopmath/scoplib.h +++ b/src/scopmath/scoplib.h @@ -12,6 +12,7 @@ #include "euler_thread.hpp" #include "newton.hpp" #include "newton_thread.hpp" +#include "row_view.hpp" #include "runge.hpp" #include "simeq.hpp" #include "sparse.hpp" diff --git a/src/sparse13/CMakeLists.txt b/src/sparse13/CMakeLists.txt new file mode 100644 index 0000000000..b2adc31dc2 --- /dev/null +++ b/src/sparse13/CMakeLists.txt @@ -0,0 +1,3 @@ +add_library(sparse13 STATIC spalloc.cpp spbuild.cpp spfactor.cpp spoutput.cpp spsolve.cpp + sputils.cpp) +set_property(TARGET sparse13 PROPERTY POSITION_INDEPENDENT_CODE ON) diff --git a/src/sparse13/cspalloc.cpp b/src/sparse13/cspalloc.cpp deleted file mode 100644 index 29b7ee44c8..0000000000 --- a/src/sparse13/cspalloc.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spalloc.cpp" diff --git a/src/sparse13/cspbuild.cpp b/src/sparse13/cspbuild.cpp deleted file mode 100644 index b002c1e5c2..0000000000 --- a/src/sparse13/cspbuild.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spbuild.cpp" diff --git a/src/sparse13/cspfactor.cpp b/src/sparse13/cspfactor.cpp deleted file mode 100644 index 2a9d91aaaa..0000000000 --- a/src/sparse13/cspfactor.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spfactor.cpp" diff --git a/src/sparse13/cspmatrix.h b/src/sparse13/cspmatrix.h deleted file mode 100755 index 7200d07b55..0000000000 --- a/src/sparse13/cspmatrix.h +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spmatrix.h" diff --git a/src/sparse13/cspoutput.cpp b/src/sparse13/cspoutput.cpp deleted file mode 100644 index 59325c189a..0000000000 --- a/src/sparse13/cspoutput.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spoutput.cpp" diff --git a/src/sparse13/cspredef.h b/src/sparse13/cspredef.h deleted file mode 100644 index 2df7687583..0000000000 --- a/src/sparse13/cspredef.h +++ /dev/null @@ -1,49 +0,0 @@ -/* mostly generated from -cat temp | /usr/bin/tr -cs "[:alpha:]" "[\n*]" | sort | uniq | grep '^sp' > sp_redef.h -where temp is the last part of spmatrix.h -*/ -#define spClear cmplx_spClear -#define spCondition cmplx_spCondition -#define spCreate cmplx_spCreate -#define spDeleteRowAndCol cmplx_spDeleteRowAndCol -#define spDestroy cmplx_spDestroy -#define spDeterminant cmplx_spDeterminant -#define spElementCount cmplx_spElementCount -#define spError cmplx_spError -#define spFactor cmplx_spFactor -#define spFileMatrix cmplx_spFileMatrix -#define spFileStats cmplx_spFileStats -#define spFileVector cmplx_spFileVector -#define spFillinCount cmplx_spFillinCount -#define spGetAdmittance cmplx_spGetAdmittance -#define spGetElement cmplx_spGetElement -#define spGetInitInfo cmplx_spGetInitInfo -#define spGetOnes cmplx_spGetOnes -#define spGetQuad cmplx_spGetQuad -#define spGetSize cmplx_spGetSize -#define spInitialize cmplx_spInitialize -#define spInstallInitInfo cmplx_spInstallInitInfo -#define spLargestElement cmplx_spLargestElement -#define spMNA_Preorder cmplx_spMNA_Preorder -#define spMultTransposed cmplx_spMultTransposed -#define spMultiply cmplx_spMultiply -#define spNorm cmplx_spNorm -#define spOrderAndFactor cmplx_spOrderAndFactor -#define spPartition cmplx_spPartition -#define spPrint cmplx_spPrint -#define spPseudoCondition cmplx_spPseudoCondition -#define spRoundoff cmplx_spRoundoff -#define spScale cmplx_spScale -#define spSetComplex cmplx_spSetComplex -#define spSetReal cmplx_spSetReal -#define spSolve cmplx_spSolve -#define spSolveTransposed cmplx_spSolveTransposed -#define spStripFills cmplx_spStripFills -#define spWhereSingular cmplx_spWhereSingular -#define spcGetFillin cmplx_spcGetFillin -#define spcGetElement cmplx_spcGetElement -#define spcLinkRows cmplx_spcLinkRows -#define spcFindElementInCol cmplx_spcFindElementInCol -#define spcCreateElement cmplx_spcCreateElement -#define spcRowExchange cmplx_spcRowExchange -#define spcColExchange cmplx_spcColExchange diff --git a/src/sparse13/cspsolve.cpp b/src/sparse13/cspsolve.cpp deleted file mode 100644 index f1aeba7d67..0000000000 --- a/src/sparse13/cspsolve.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "spsolve.cpp" diff --git a/src/sparse13/csputils.cpp b/src/sparse13/csputils.cpp deleted file mode 100644 index 0f0363b8ae..0000000000 --- a/src/sparse13/csputils.cpp +++ /dev/null @@ -1,2 +0,0 @@ -#define cmplx_spPrefix -#include "sputils.cpp" diff --git a/src/sparse13/spalloc.cpp b/src/sparse13/spalloc.cpp index b8223ef4c4..ab85ebefd9 100644 --- a/src/sparse13/spalloc.cpp +++ b/src/sparse13/spalloc.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX ALLOCATION MODULE * diff --git a/src/sparse13/spbuild.cpp b/src/sparse13/spbuild.cpp index 5d4260ed1b..3b7e9a1da2 100644 --- a/src/sparse13/spbuild.cpp +++ b/src/sparse13/spbuild.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX BUILD MODULE * diff --git a/src/sparse13/spconfig.h b/src/sparse13/spconfig.h index e0b5348bbc..ae52e16216 100644 --- a/src/sparse13/spconfig.h +++ b/src/sparse13/spconfig.h @@ -415,7 +415,6 @@ * do not provide the standard header files yet. */ #endif -#if defined(HAVE_LIMITS_H) #include #include #define MACHINE_RESOLUTION DBL_EPSILON @@ -423,37 +422,6 @@ #define SMALLEST_REAL DBL_MIN #define LARGEST_SHORT_INTEGER SHRT_MAX #define LARGEST_LONG_INTEGER LONG_MAX -#else /* do not have limits.h */ /* NOT defined(__STDC__) */ - -/* VAX machine constants */ -#ifdef vax -#define MACHINE_RESOLUTION 6.93889e-18 -#define LARGEST_REAL 1.70141e+38 -#define SMALLEST_REAL 2.938743e-39 -#define LARGEST_SHORT_INTEGER 32766 -#define LARGEST_LONG_INTEGER 2147483646 -#endif - -/* hp9000 machine constants */ -#ifdef hpux -/* These values are correct for hp9000/300. Should be correct for others. */ -#define MACHINE_RESOLUTION 8.9e-15 -#define LARGEST_REAL 1.79769313486231e+308 -#define SMALLEST_REAL 2.22507385850721e-308 -#define LARGEST_SHORT_INTEGER 32766 -#define LARGEST_LONG_INTEGER 2147483646 -#endif - -/* Sun machine constants */ -#ifdef sun -/* These values are rumored to be the correct values. */ -#define MACHINE_RESOLUTION 8.9e-15 -#define LARGEST_REAL 1.79769313486231e+308 -#define SMALLEST_REAL 2.22507385850721e-308 -#define LARGEST_SHORT_INTEGER 32766 -#define LARGEST_LONG_INTEGER 2147483646 -#endif -#endif /* NOT defined(__STDC__) */ /* * ANNOTATION diff --git a/src/sparse13/spdefs.h b/src/sparse13/spdefs.h index e3fff9ac78..f328b9d247 100644 --- a/src/sparse13/spdefs.h +++ b/src/sparse13/spdefs.h @@ -388,7 +388,7 @@ * MEMORY ALLOCATION */ #if 1 -#include +#include "spmatrix.h" #include #else #if !defined(__MWERKS__) diff --git a/src/sparse13/spfactor.cpp b/src/sparse13/spfactor.cpp index e6d343dcc8..c1d04bda2f 100644 --- a/src/sparse13/spfactor.cpp +++ b/src/sparse13/spfactor.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX FACTORIZATION MODULE * @@ -67,6 +64,7 @@ static char RCSid[] = "@(#)$Header$"; #include "spconfig.h" #include "spdefs.h" #include "spmatrix.h" +#include /* avoid "declared implicitly `extern' and later `static' " warnings. */ static int FactorComplexMatrix(MatrixPtr Matrix); @@ -896,10 +894,10 @@ static void MarkowitzProducts(MatrixPtr Matrix, int Step) for (I = Step; I <= Size; I++) { /* If chance of overflow, use real numbers. */ - if ((*pMarkowitzRow > LARGEST_SHORT_INTEGER AND * pMarkowitzCol != 0) OR(*pMarkowitzCol > LARGEST_SHORT_INTEGER AND * pMarkowitzRow != 0)) { + if ((*pMarkowitzRow > SHRT_MAX AND * pMarkowitzCol != 0) OR(*pMarkowitzCol > SHRT_MAX AND * pMarkowitzRow != 0)) { fProduct = (double)(*pMarkowitzRow++) * (double)(*pMarkowitzCol++); - if (fProduct >= (double)LARGEST_LONG_INTEGER) - *pMarkowitzProduct++ = LARGEST_LONG_INTEGER; + if (fProduct >= (double)LONG_MAX) + *pMarkowitzProduct++ = LONG_MAX; else *pMarkowitzProduct++ = fProduct; } else { @@ -1228,7 +1226,7 @@ static ElementPtr QuicklySearchDiagonal(MatrixPtr Matrix, int Step) /* Begin `QuicklySearchDiagonal'. */ NumberOfTies = -1; - MinMarkowitzProduct = LARGEST_LONG_INTEGER; + MinMarkowitzProduct = LONG_MAX; pMarkowitzProduct = &(Matrix->MarkowitzProd[Matrix->Size + 2]); Matrix->MarkowitzProd[Matrix->Size + 1] = Matrix->MarkowitzProd[Step]; @@ -1409,7 +1407,7 @@ static ElementPtr QuicklySearchDiagonal(MatrixPtr Matrix, int Step) /* Begin `QuicklySearchDiagonal'. */ ChosenPivot = NULL; - MinMarkowitzProduct = LARGEST_LONG_INTEGER; + MinMarkowitzProduct = LONG_MAX; pMarkowitzProduct = &(Matrix->MarkowitzProd[Matrix->Size + 2]); Matrix->MarkowitzProd[Matrix->Size + 1] = Matrix->MarkowitzProd[Step]; @@ -1563,7 +1561,7 @@ static ElementPtr SearchDiagonal(MatrixPtr Matrix, int Step) /* Begin `SearchDiagonal'. */ ChosenPivot = NULL; - MinMarkowitzProduct = LARGEST_LONG_INTEGER; + MinMarkowitzProduct = LONG_MAX; pMarkowitzProduct = &(Matrix->MarkowitzProd[Size + 2]); Matrix->MarkowitzProd[Size + 1] = Matrix->MarkowitzProd[Step]; @@ -1674,7 +1672,7 @@ static ElementPtr SearchEntireMatrix(MatrixPtr Matrix, int Step) /* Begin `SearchEntireMatrix'. */ ChosenPivot = NULL; LargestElementMag = 0.0; - MinMarkowitzProduct = LARGEST_LONG_INTEGER; + MinMarkowitzProduct = LONG_MAX; /* Start search of matrix on column by column basis. */ for (I = Step; I <= Size; I++) { @@ -2570,10 +2568,10 @@ static void UpdateMarkowitzNumbers(MatrixPtr Matrix, ElementPtr pPivot) --MarkoRow[Row]; /* Form Markowitz product while being cautious of overflows. */ - if ((MarkoRow[Row] > LARGEST_SHORT_INTEGER AND MarkoCol[Row] != 0) OR(MarkoCol[Row] > LARGEST_SHORT_INTEGER AND MarkoRow[Row] != 0)) { + if ((MarkoRow[Row] > SHRT_MAX AND MarkoCol[Row] != 0) OR(MarkoCol[Row] > SHRT_MAX AND MarkoRow[Row] != 0)) { Product = MarkoCol[Row] * MarkoRow[Row]; - if (Product >= (double)LARGEST_LONG_INTEGER) - Matrix->MarkowitzProd[Row] = LARGEST_LONG_INTEGER; + if (Product >= (double)LONG_MAX) + Matrix->MarkowitzProd[Row] = LONG_MAX; else Matrix->MarkowitzProd[Row] = Product; } else @@ -2587,10 +2585,10 @@ static void UpdateMarkowitzNumbers(MatrixPtr Matrix, ElementPtr pPivot) --MarkoCol[Col]; /* Form Markowitz product while being cautious of overflows. */ - if ((MarkoRow[Col] > LARGEST_SHORT_INTEGER AND MarkoCol[Col] != 0) OR(MarkoCol[Col] > LARGEST_SHORT_INTEGER AND MarkoRow[Col] != 0)) { + if ((MarkoRow[Col] > SHRT_MAX AND MarkoCol[Col] != 0) OR(MarkoCol[Col] > SHRT_MAX AND MarkoRow[Col] != 0)) { Product = MarkoCol[Col] * MarkoRow[Col]; - if (Product >= (double)LARGEST_LONG_INTEGER) - Matrix->MarkowitzProd[Col] = LARGEST_LONG_INTEGER; + if (Product >= (double)LONG_MAX) + Matrix->MarkowitzProd[Col] = LONG_MAX; else Matrix->MarkowitzProd[Col] = Product; } else diff --git a/src/sparse13/spoutput.cpp b/src/sparse13/spoutput.cpp index fe660424b6..534025bcd2 100644 --- a/src/sparse13/spoutput.cpp +++ b/src/sparse13/spoutput.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX OUTPUT MODULE * @@ -57,6 +54,7 @@ static char RCSid[] = "$Header$"; #include "spconfig.h" #include "spdefs.h" #include "spmatrix.h" +#include #if DOCUMENTATION @@ -184,7 +182,7 @@ void spPrint(char* eMatrix, int PrintReordered, int Data, int Header) else printf("Matrix before factorization:\n"); - SmallestElement = LARGEST_REAL; + SmallestElement = DBL_MAX; SmallestDiag = SmallestElement; } @@ -667,7 +665,7 @@ int spFileStats(char* eMatrix, char* File, char* Label) /* Search matrix. */ NumberOfElements = 0; LargestElement = 0.0; - SmallestElement = LARGEST_REAL; + SmallestElement = DBL_MAX; for (I = 1; I <= Size; I++) { pElement = Matrix->FirstInCol[I]; diff --git a/src/sparse13/spsolve.cpp b/src/sparse13/spsolve.cpp index 08afbadaf3..ee70d3b769 100644 --- a/src/sparse13/spsolve.cpp +++ b/src/sparse13/spsolve.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX SOLVE MODULE * diff --git a/src/sparse13/sputils.cpp b/src/sparse13/sputils.cpp index 5ba99a9a31..66984fef77 100644 --- a/src/sparse13/sputils.cpp +++ b/src/sparse13/sputils.cpp @@ -1,6 +1,3 @@ -#ifdef HAVE_CONFIG_H -#include <../../nrnconf.h> -#endif /* * MATRIX UTILITY MODULE * @@ -69,6 +66,7 @@ static char RCSid[] = "@(#)$Header$"; #include "spconfig.h" #include "spdefs.h" #include "spmatrix.h" +#include extern void spcLinkRows(MatrixPtr); extern void spcRowExchange(MatrixPtr, int row1, int row2); @@ -1985,8 +1983,8 @@ RealNumber spRoundoff(char* eMatrix, RealNumber Rho) Reid = 3.01 * Matrix->Size; if (Gear < Reid) - return (MACHINE_RESOLUTION * Rho * Gear); + return (DBL_EPSILON * Rho * Gear); else - return (MACHINE_RESOLUTION * Rho * Reid); + return (DBL_EPSILON * Rho * Reid); } #endif diff --git a/src/sundials/shared/nvector_parallel.c b/src/sundials/shared/nvector_parallel.c index 92aff83d31..6b843651e0 100755 --- a/src/sundials/shared/nvector_parallel.c +++ b/src/sundials/shared/nvector_parallel.c @@ -19,19 +19,12 @@ #include #include -/* for NRNMPI_DYNAMICLOAD */ #include -#if NRNMPI_DYNAMICLOAD extern void nrnmpi_dbl_allreduce_vec(double* src, double* dest, int cnt, int type); extern void nrnmpi_long_allreduce_vec(long* src, long* dest, int cnt, int type); extern int nrnmpi_numprocs; -#endif #include "nvector_parallel.h" -#if NRNMPI_DYNAMICLOAD -#else -extern MPI_Comm nrnmpi_comm; -#endif #include "sundialsmath.h" #include "sundialstypes.h" @@ -92,12 +85,7 @@ N_Vector N_VNewEmpty_Parallel(MPI_Comm comm, /* Compute global length as sum of local lengths */ n = local_length; -#if NRNMPI_DYNAMICLOAD nrnmpi_long_allreduce_vec(&n, &Nsum, 1, 1); -#else - comm = nrnmpi_comm; - MPI_Allreduce(&n, &Nsum, 1, PVEC_INTEGER_MPI_TYPE, MPI_SUM, comm); -#endif if (Nsum != global_length) { printf(BAD_N); return(NULL); @@ -411,11 +399,7 @@ void N_VSpace_Parallel(N_Vector v, long int *lrw, long int *liw) int npes; comm = NV_COMM_P(v); -#if NRNMPI_DYNAMICLOAD npes = nrnmpi_numprocs; -#else - MPI_Comm_size(comm, &npes); -#endif *lrw = NV_GLOBLENGTH_P(v); *liw = 2 * npes; @@ -881,26 +865,8 @@ static realtype VAllReduce_Parallel(realtype d, int op, MPI_Comm comm) * min if op = 3. * The operation is over all processors in the communicator */ - realtype out = 0.0; - -#if NRNMPI_DYNAMICLOAD nrnmpi_dbl_allreduce_vec(&d, &out, 1, op); -#else - switch (op) { - case 1: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_SUM, comm); - break; - - case 2: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_MAX, comm); - break; - - case 3: MPI_Allreduce(&d, &out, 1, PVEC_REAL_MPI_TYPE, MPI_MIN, comm); - break; - - default: break; - } -#endif - return(out); } diff --git a/src/sundials/shared/nvector_parallel.h b/src/sundials/shared/nvector_parallel.h index 9e0ccc688f..8eaa2a0efe 100755 --- a/src/sundials/shared/nvector_parallel.h +++ b/src/sundials/shared/nvector_parallel.h @@ -49,13 +49,7 @@ #ifndef _NVECTOR_PARALLEL_H #define _NVECTOR_PARALLEL_H -#if NRNMPI_DYNAMICLOAD -#define MPI_DOUBLE double -#define MPI_LONG long #define MPI_Comm int -#else -#include -#endif #if defined(__cplusplus) extern "C" { diff --git a/src/sundials/sundials_config.h.in b/src/sundials/sundials_config.h.in index 0a7d88d2d1..49977f3819 100755 --- a/src/sundials/sundials_config.h.in +++ b/src/sundials/sundials_config.h.in @@ -6,39 +6,9 @@ /* Define to 1 if you have the header file. */ #undef HAVE_DLFCN_H -/* Define to 1 if you have the header file. */ -#undef HAVE_FLOAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_INTTYPES_H - /* Define to 1 if you have the `m' library (-lm). */ #undef HAVE_LIBM -/* Define to 1 if you have the header file. */ -#undef HAVE_MATH_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_MEMORY_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDINT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STDLIB_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRINGS_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_STRING_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_STAT_H - -/* Define to 1 if you have the header file. */ -#undef HAVE_SYS_TYPES_H - /* Define to 1 if you have the header file. */ #undef HAVE_UNISTD_H diff --git a/src/utils/enumerate.h b/src/utils/enumerate.h new file mode 100644 index 0000000000..1a0b38f2ad --- /dev/null +++ b/src/utils/enumerate.h @@ -0,0 +1,145 @@ +#pragma once + +#include +#include +#include + +template ())), + typename = decltype(std::end(std::declval()))> +void apply_to_first(T&& iterable, value_type&& value, F&& f) { + auto it = std::find(std::begin(std::forward(iterable)), + std::end(std::forward(iterable)), + std::forward(value)); + if (it != std::end(std::forward(iterable))) { + f(it); + } +} + +template +void erase_first(T&& iterable, value_type&& value) { + apply_to_first(std::forward(iterable), + std::forward(value), + [&iterable](const auto& it) { iterable.erase(it); }); +} + +template ())), + typename = decltype(std::begin(std::declval())), + typename = decltype(std::end(std::declval()))> +constexpr auto range(T&& iterable) { + struct iterator { + std::size_t i; + TIter iter; + bool operator!=(const iterator& other) const { + return iter != other.iter; + } + void operator++() { + ++i; + ++iter; + } + auto operator*() const { + return i; + } + }; + struct iterable_wrapper { + T iterable; + auto begin() { + return iterator{0, std::begin(iterable)}; + } + auto end() { + return iterator{0, std::end(iterable)}; + } + }; + return iterable_wrapper{std::forward(iterable)}; +} + +template ())), + typename = decltype(std::rend(std::declval()))> +constexpr auto reverse(T&& iterable) { + struct iterator { + TIter iter; + bool operator!=(const iterator& other) const { + return iter != other.iter; + } + void operator++() { + ++iter; + } + auto&& operator*() const { + return *iter; + } + }; + struct iterable_wrapper { + T iterable; + auto begin() { + return iterator{std::rbegin(iterable)}; + } + auto end() { + return iterator{std::rend(iterable)}; + } + }; + return iterable_wrapper{std::forward(iterable)}; +} + +template ())), + typename = decltype(std::end(std::declval()))> +constexpr auto enumerate(T&& iterable) { + struct iterator { + std::size_t i; + TIter iter; + bool operator!=(const iterator& other) const { + return iter != other.iter; + } + void operator++() { + ++i; + ++iter; + } + auto operator*() const { + return std::tie(i, *iter); + } + }; + struct iterable_wrapper { + T iterable; + auto begin() { + return iterator{0, std::begin(iterable)}; + } + auto end() { + return iterator{0, std::end(iterable)}; + } + }; + return iterable_wrapper{std::forward(iterable)}; +} + +template ())), + typename = decltype(std::rend(std::declval()))> +constexpr auto renumerate(T&& iterable) { + struct iterator { + std::size_t i; + TIter iter; + bool operator!=(const iterator& other) const { + return iter != other.iter; + } + void operator++() { + --i; + ++iter; + } + auto operator*() const { + return std::tie(i, *iter); + } + }; + struct iterable_wrapper { + T iterable; + auto begin() { + return iterator{std::size(iterable) - 1, std::rbegin(iterable)}; + } + auto end() { + return iterator{std::size(iterable) - 1, std::rend(iterable)}; + } + }; + return iterable_wrapper{std::forward(iterable)}; +} diff --git a/src/utils/profile/profiler_interface.h b/src/utils/profile/profiler_interface.h index 86bb94e0ff..e48a58b174 100644 --- a/src/utils/profile/profiler_interface.h +++ b/src/utils/profile/profiler_interface.h @@ -8,289 +8,10 @@ #pragma once -#include -#include - -#if defined(NRN_CALIPER) -#include -#endif - -#if defined(CRAYPAT) -#include -#endif - -#if defined(TAU) -#include -#endif - -#if defined(LIKWID_PERFMON) -#include -#endif +#include "coreneuron/utils/profile/profiler_interface.h" namespace nrn { -namespace detail { - -/*! \class Instrumentor - * \brief Instrumentation infrastructure for benchmarking and profiling. - * - * The Instrumentor class exposes static methods that can be used to - * toggle with fine-grained resolution the profiling of specific - * areas within the code. - */ -template -struct Instrumentor { -#pragma clang diagnostic push -#pragma clang diagnostic ignored "-Wunused-value" - /*! \fn phase_begin - * \brief Activate the collection of profiling data within a code region. - * - * This function semantically defines the beginning of a region - * of code that the user wishes to profile. - * Loops through all enabled profilers and calls the relevant - * `phase_begin` function. - * This function should have a non-empty implementation only for - * profilers that allow multiple code regions with different names - * to be profiled concurrently. - * - * @param name the (unique) identifier of the code region to be profiled - */ - inline static void phase_begin(const char* name) { - std::initializer_list{(TProfilerImpl::phase_begin(name), 0)...}; - } - - /*! \fn phase_end - * \brief Deactivate the collection of profiling data within a code region. - * - * This function semantically defines the end of a region - * of code that the user wishes to profile. - * Loops through all enabled profilers and calls the relevant - * `phase_end` function. - * This function should have a non-empty implementation only for - * profilers that allow multiple code regions with different names - * to be profiled concurrently. - * - * @param name the (unique) identifier of the code region to be profiled - */ - inline static void phase_end(const char* name) { - std::initializer_list{(TProfilerImpl::phase_end(name), 0)...}; - } - - /*! \fn start_profile - * \brief Globally activate the collection of profiling data. - * - * Activate the collection of profiler data without defining - * a region of interest with a given name, as opposed to `phase_begin`. - * Loops through all enabled profilers and calls the relevant - * `start_profile` function. - * This function should have a non-empty implementation only for - * profilers that expose simply a global begin/end interface, without - * named regions. - */ - inline static void start_profile() { - std::initializer_list{(TProfilerImpl::start_profile(), 0)...}; - } - - /*! \fn stop_profile - * \brief Globally deactivate the collection of profiling data. - * - * Deactivate the collection of profiler data without defining - * a region of interest with a given name, as opposed to `phase_end`. - * Loops through all enabled profilers and calls the relevant - * `stop_profile` function. - * This function should have a non-empty implementation only for - * profilers that expose simply a global begin/end interface, without - * named regions. - */ - inline static void stop_profile() { - std::initializer_list{(TProfilerImpl::stop_profile(), 0)...}; - } - - /*! \fn init_profile - * \brief Initialize the profiler. - * - * Initialize a profiler's internal structure, without activating yet - * any data collection, similar in concept to MPI_Init. - * Loops through all enabled profilers and calls the relevant - * `init_profile` function. - * This function should have a non-empty implementation only for - * profilers that require special initialization, typically before - * any memory allocation is done. - */ - inline static void init_profile() { - std::initializer_list{(TProfilerImpl::init_profile(), 0)...}; - } - - /*! \fn finalize_profile - * \brief Finalize the profiler. - * - * Finalize a profiler's internal structure, without activating yet - * any data collection, similar in concept to MPI_Finalize. - * Loops through all enabled profilers and calls the relevant - * `finalize_profile` function. - * This function should have a non-empty implementation only for - * profilers that require special finalization. - */ - inline static void finalize_profile() { - std::initializer_list{(TProfilerImpl::finalize_profile(), 0)...}; - } -#pragma clang diagnostic pop -}; - -#if defined(NRN_CALIPER) - -struct Caliper { - inline static void phase_begin(const char* name) { - CALI_MARK_BEGIN(name); - }; - - inline static void phase_end(const char* name) { - CALI_MARK_END(name); - }; - - inline static void start_profile(){}; - - inline static void stop_profile(){}; - - inline static void init_profile(){}; - - inline static void finalize_profile(){}; -}; - -#endif - -#if defined(CRAYPAT) - -struct CrayPat { - inline static void phase_begin(const char* name){}; - - inline static void phase_end(const char* name){}; - - inline static void start_profile() { - PAT_record(PAT_STATE_ON); - }; - - inline static void stop_profile() { - PAT_record(PAT_STATE_OFF); - }; - - inline static void init_profile(){}; - - inline static void finalize_profile(){}; -}; -#endif - -#if defined(TAU) - -struct Tau { - inline static void phase_begin(const char* name){}; - - inline static void phase_end(const char* name){}; +namespace Instrumentor = coreneuron::Instrumentor; - inline static void start_profile() { - TAU_ENABLE_INSTRUMENTATION(); - }; - - inline static void stop_profile() { - TAU_DISABLE_INSTRUMENTATION(); - }; - - inline static void init_profile(){}; - - inline static void finalize_profile(){}; -}; - -#endif - -#if defined(LIKWID_PERFMON) - -struct Likwid { - inline static void phase_begin(const char* name) { - LIKWID_MARKER_START(name); - }; - - inline static void phase_end(const char* name) { - LIKWID_MARKER_STOP(name); - }; - - inline static void start_profile(){}; - - inline static void stop_profile(){}; - - inline static void init_profile() { - LIKWID_MARKER_INIT; - -#pragma omp parallel - { LIKWID_MARKER_THREADINIT; } - }; - - inline static void finalize_profile() { - LIKWID_MARKER_CLOSE; - }; -}; - -#endif - -struct NullInstrumentor { - inline static void phase_begin(const char* name){}; - inline static void phase_end(const char* name){}; - inline static void start_profile(){}; - inline static void stop_profile(){}; - inline static void init_profile(){}; - inline static void finalize_profile(){}; -}; - -using InstrumentorImpl = detail::Instrumentor< -#if defined NRN_CALIPER - detail::Caliper, -#endif -#if defined(CRAYPAT) - detail::CrayPat, -#endif -#if defined(TAU) - detail::Tau, -#endif -#if defined(LIKWID_PERFMON) - detail::Likwid, -#endif - detail::NullInstrumentor>; -} // namespace detail - -namespace Instrumentor { -struct phase { - const char* phase_name; - phase(const char* name) - : phase_name(name) { - detail::InstrumentorImpl::phase_begin(phase_name); - } - ~phase() { - detail::InstrumentorImpl::phase_end(phase_name); - } -}; - -inline static void start_profile() { - detail::InstrumentorImpl::start_profile(); -} - -inline static void stop_profile() { - detail::InstrumentorImpl::stop_profile(); -} - -inline static void phase_begin(const char* name) { - detail::InstrumentorImpl::phase_begin(name); } - -inline static void phase_end(const char* name) { - detail::InstrumentorImpl::phase_end(name); -} - -inline static void init_profile() { - detail::InstrumentorImpl::init_profile(); -} - -inline static void finalize_profile() { - detail::InstrumentorImpl::finalize_profile(); -} -} // namespace Instrumentor - -} // namespace nrn diff --git a/test/CMakeLists.txt b/test/CMakeLists.txt index b5e9f45df4..86d011562b 100644 --- a/test/CMakeLists.txt +++ b/test/CMakeLists.txt @@ -13,8 +13,17 @@ include(NeuronTestHelper) # ============================================================================= # Test executables # ============================================================================= -add_executable(testneuron common/catch2_main.cpp unit_tests/basic.cpp - unit_tests/oc/hoc_interpreter.cpp) +add_executable( + testneuron + common/catch2_main.cpp + unit_tests/basic.cpp + unit_tests/iovec.cpp + unit_tests/container/container.cpp + unit_tests/container/generic_data_handle.cpp + unit_tests/container/mechanism.cpp + unit_tests/container/node.cpp + unit_tests/utils/enumerate.cpp + unit_tests/oc/hoc_interpreter.cpp) set(catch2_targets testneuron) if(NRN_ENABLE_THREADS) add_executable(nrn-benchmarks common/catch2_main.cpp benchmarks/threads/test_multicore.cpp) @@ -27,7 +36,6 @@ foreach(target ${catch2_targets}) if(NOT MINGW) target_link_libraries(${target} ${CMAKE_DL_LIBS}) endif() - target_compile_definitions(testneuron PUBLIC "USE_PYTHON=${USE_PYTHON}") endforeach() # ============================================================================= @@ -43,6 +51,20 @@ nrn_add_test( GROUP unit_tests NAME testneuron COMMAND $) +# Extra tests from testneuron that need manual checking because they call std::terminate. +nrn_add_test( + GROUP unit_tests + NAME testneuron_soa_erase_calls_terminate + COMMAND $ + "Deleting a row from a frozen SoA container causes a fatal error") +string( + JOIN " " + regex "neuron::container::owning_identifier destructor could" + "not delete from the underlying storage:" + "neuron::container::Node::storage\\[frozen_count=1,sorted=false\\]: erase\\(\\) called on a" + "frozen structure \\[std::runtime_error\\]\\. This is not recoverable, aborting\\.") +set_tests_properties(unit_tests::testneuron_soa_erase_calls_terminate + PROPERTIES PASS_REGULAR_EXPRESSION "${regex}") if(TARGET nrn-benchmarks AND NRN_ENABLE_PERFORMANCE_TESTS) nrn_add_test( GROUP unit_tests @@ -50,6 +72,7 @@ if(TARGET nrn-benchmarks AND NRN_ENABLE_PERFORMANCE_TESTS) PROCESSORS 16 COMMAND $ --processors=16) endif() + # ============================================================================= # Add ringtest # ============================================================================= @@ -115,7 +138,7 @@ if(NRN_ENABLE_MPI) "-x;NRN_PROBE_VARIABLE=NRN_PROBE_VALUE") set(probe_command ${MPIEXEC_EXECUTABLE} ${MPIEXEC_NUMPROC_FLAG} 1 ${MPIEXEC_PREFLAGS} ${option} - ${PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS} -c + ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS} -c "import os\; print(os.environ['NRN_PROBE_VARIABLE'])") execute_process( COMMAND ${probe_command} @@ -158,8 +181,8 @@ foreach(mpiexec "" "_mpiexec") if(NRN_HAVE_OPENMPI2_OR_LESS AND NOT NRN_ENABLE_MPI_DYNAMIC) set(python ${mpi_init_prefix${mpiexec}} nrniv ${mpi_init_suffix${mpiexec}} -python) else() - set(python ${mpi_init_prefix${mpiexec}} ${preload_sanitizer${mpiexec}} ${PYTHON_EXECUTABLE} - ${mpi_init_suffix${mpiexec}}) + set(python ${mpi_init_prefix${mpiexec}} ${preload_sanitizer${mpiexec}} + ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${mpi_init_suffix${mpiexec}}) endif() nrn_add_test( GROUP mpi_init @@ -179,7 +202,7 @@ endforeach() # ============================================================================= # Add pytest # ============================================================================= -if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) +if(NRN_ENABLE_PYTHON) # use `--capture=tee-sys`: combines 'sys' and '-s', capturing sys.stdout/stderr and passing it # along to the actual sys.stdout/stderr set(pytest_args --capture=tee-sys) @@ -188,18 +211,52 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) list(APPEND pytest_args --cov-report=xml --cov=neuron) endif() set(pytest -m pytest ${pytest_args}) - # TODO: consider allowing the group-related parts to be dropped here + + nrn_add_test( + GROUP unit_tests + NAME python_unit_tests + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${pytest} + ${PROJECT_SOURCE_DIR}/test/unit_tests/hoc_python + PRELOAD_SANITIZER + SCRIPT_PATTERNS "test/unit_tests/hoc_python/*.py") + + # In brief: + # + # * tests and mod files under pytest/ are only compatible with NEURON + # * tests and mod files under pytest_coreneuron/ are compatible with both NEURON and CoreNEURON + # + # So, pytest/ should generally be viewed as a staging area en route to pytest_coreneuron. As the + # name implies, tests in these directories are executed using pytest. + nrn_add_test_group(NAME pytest MODFILE_PATTERNS test/pytest/*.mod) nrn_add_test_group( CORENEURON - NAME pynrn - MODFILE_PATTERNS test/pynrn/*.mod) + NAME pytest_coreneuron + MODFILE_PATTERNS test/pytest_coreneuron/*.mod) + # In case of multiple Python versions, run these tests with all of them + foreach(val RANGE ${NRN_PYTHON_ITERATION_LIMIT}) + list(GET NRN_PYTHON_EXECUTABLES ${val} exe) + list(GET NRN_PYTHON_VERSIONS ${val} pyver) + foreach(group pytest pytest_coreneuron) + nrn_add_test( + GROUP ${group} + NAME basic_tests_py${pyver} + PRELOAD_SANITIZER + COMMAND "${exe}" ${pytest} "./test/${group}" + SCRIPT_PATTERNS "test/${group}/*.json" "test/${group}/*.py") + endforeach() + endforeach() + + # Add some tests that are specifically aimed at NEURON - Python integration, covering different + # ways NEURON can be made to execute Python code. + add_subdirectory(pyinit) + + nrn_add_test_group(NAME datahandle MODFILE_PATTERNS test/datahandle/*.mod) nrn_add_test( - GROUP pynrn - NAME basic_tests + GROUP datahandle + NAME datahandle_tests PRELOAD_SANITIZER - ENVIRONMENT COVERAGE_FILE=.coverage.basic_tests - COMMAND ${PYTHON_EXECUTABLE} ${pytest} ./test/pynrn - SCRIPT_PATTERNS test/pynrn/*.json test/pynrn/*.py) + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${pytest} ./test/datahandle + SCRIPT_PATTERNS test/datahandle/*.py) # Mostly to increase coverage nrn_add_test_group(NAME coverage_tests MODFILE_PATTERNS test/cover/mod/*.mod) @@ -208,13 +265,13 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) NAME cover_tests PRELOAD_SANITIZER ENVIRONMENT COVERAGE_FILE=.coverage.cover_tests - COMMAND ${PYTHON_EXECUTABLE} ${pytest} ./test/cover + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${pytest} ./test/cover SCRIPT_PATTERNS test/cover/*.py test/cover/*.json) nrn_add_test_group( NAME example_nmodl MODFILE_PATTERNS *.mod *.inc SIM_DIRECTORY share/examples/nrniv/nmodl) - set(py_exe ${PYTHON_EXECUTABLE}) + set(py_exe ${NRN_DEFAULT_PYTHON_EXECUTABLE}) set(py_preload PRELOAD_SANITIZER) set(hoc_exe special) foreach(ext hoc py) @@ -235,7 +292,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) # independent run of each hoc and python file in test/hoc/* folders nrn_add_test_group( NAME hoctests - MODFILE_PATTERNS *.mod + MODFILE_PATTERNS *.mod *.inc SIM_DIRECTORY test/hoctests) set(hoctest_utils expect_err.hoc) foreach(ext hoc py) @@ -262,6 +319,8 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) # GCC. Other compilers produce larger differences. if(${CMAKE_CXX_COMPILER_ID} STREQUAL "Intel") set(change_test_tolerance NRN_RXD_TEST_TOLERANCE=1e-8) + elseif(${CMAKE_CXX_COMPILER_ID} STREQUAL "IntelLLVM") + set(change_test_tolerance NRN_RXD_TEST_TOLERANCE=3e-8) elseif(NRN_HAVE_NVHPC_COMPILER) set(change_test_tolerance NRN_RXD_TEST_TOLERANCE=1e-4) endif() @@ -270,9 +329,9 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) NAME rxd_tests PRELOAD_SANITIZER ENVIRONMENT COVERAGE_FILE=.coverage.rxd_tests ${change_test_tolerance} - COMMAND ${PYTHON_EXECUTABLE} ${pytest} ./test/rxd) + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${pytest} ./test/rxd) if(NRN_ENABLE_MPI) - nrn_find_python_module(mpi4py) + nrn_find_python_module(MODULE mpi4py) if(mpi4py_FOUND) nrn_add_test( GROUP rxdmod_tests @@ -281,12 +340,12 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) ENVIRONMENT COVERAGE_FILE=.coverage.rxd_mpi_tests ${change_test_tolerance} COMMAND ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} 1 ${MPIEXEC_PREFLAGS} - ${preload_sanitizer_mpiexec} ${PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS} ${pytest} - ./test/rxd --mpi) + ${preload_sanitizer_mpiexec} ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS} + ${pytest} ./test/rxd --mpi) endif() endif() endif() - nrn_add_test_group(NAME parallel MODFILE_PATTERNS test/pynrn/*.mod) + nrn_add_test_group(NAME parallel MODFILE_PATTERNS test/pytest_coreneuron/*.mod) nrn_add_test( GROUP parallel NAME subworld @@ -300,17 +359,17 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) NAME partrans PROCESSORS 2 REQUIRES mpi - SCRIPT_PATTERNS test/pynrn/test_partrans.py + SCRIPT_PATTERNS test/pytest_coreneuron/test_partrans.py COMMAND ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} 2 ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} - nrniv ${MPIEXEC_POSTFLAGS} -mpi -python test/pynrn/test_partrans.py) + nrniv ${MPIEXEC_POSTFLAGS} -mpi -python test/pytest_coreneuron/test_partrans.py) nrn_add_test( GROUP parallel NAME netpar PROCESSORS 2 REQUIRES mpi - SCRIPT_PATTERNS test/pynrn/test_hoc_po.py test/pynrn/test_netpar.py + SCRIPT_PATTERNS test/pytest_coreneuron/test_hoc_po.py test/pytest_coreneuron/test_netpar.py COMMAND ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} 2 ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} - nrniv ${MPIEXEC_POSTFLAGS} -mpi -python test/pynrn/test_netpar.py) + nrniv ${MPIEXEC_POSTFLAGS} -mpi -python test/pytest_coreneuron/test_netpar.py) nrn_add_test( GROUP parallel NAME bas @@ -328,10 +387,11 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) PROCESSORS 2 REQUIRES mpi ENVIRONMENT "NRN_PYTEST_ARGS=${pytest_arg_string}" - SCRIPT_PATTERNS test/pynrn/run_pytest.py test/pynrn/test_nrntest_fast.json - test/pynrn/test_nrntest_fast.py + SCRIPT_PATTERNS + test/pytest_coreneuron/run_pytest.py test/pytest_coreneuron/test_nrntest_fast.json + test/pytest_coreneuron/test_nrntest_fast.py COMMAND ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} 2 ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} - special ${MPIEXEC_POSTFLAGS} -mpi -python test/pynrn/run_pytest.py) + special ${MPIEXEC_POSTFLAGS} -mpi -python test/pytest_coreneuron/run_pytest.py) # CoreNEURON's reports require MPI and segfault if it is not initialised. This is a crude # workaround. if(CORENRN_ENABLE_REPORTING) @@ -372,7 +432,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) -python) else() set(modtests_preload_sanitizer PRELOAD_SANITIZER) - set(modtests_launch_py ${PYTHON_EXECUTABLE} ${pytest}) + set(modtests_launch_py ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${pytest}) set(modtests_launch_hoc nrniv ${nrniv_mpi_arg}) set(modtests_launch_py_mpi ${MPIEXEC_NAME} @@ -381,7 +441,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} ${preload_sanitizer_mpiexec} - ${PYTHON_EXECUTABLE} + ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS}) set(modtests_launch_py_mpi_subworlds ${MPIEXEC_NAME} @@ -390,15 +450,12 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} ${preload_sanitizer_mpiexec} - ${PYTHON_EXECUTABLE} + ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS}) endif() - # External coreneuron can be used for testing but for simplicity we are testing only submodule - # builds (in near future we want to support only internal builds anyway). This test uses the - # standard NEURON installation that does not have a `special` statically linked against - # CoreNEURON, so we cannot run this in static builds. - if(NOT nrn_using_ext_corenrn AND CORENRN_ENABLE_SHARED) + # This test uses the standard NEURON installation (without nrnivmodl having been run) + if(CORENRN_ENABLE_SHARED) nrn_add_test_group( CORENEURON NAME coreneuron_standalone @@ -446,6 +503,14 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) SCRIPT_PATTERNS test/nmodl/test_kinetic.py ENVIRONMENT ${sonata_zero_gid_env} ${nrnpython_mpi_env} COMMAND ${modtests_launch_py} test/nmodl/test_kinetic.py) + nrn_add_test( + GROUP nmodl_tests + NAME test_random + REQUIRES ${modtests_preload_sanitizer} + SCRIPT_PATTERNS test/nmodl/test_random.py + ENVIRONMENT ${sonata_zero_gid_env} ${nrnpython_mpi_env} + COMMAND ${modtests_launch_py} test/nmodl/test_random.py) + nrn_add_test_group( CORENEURON NAME coreneuron_modtests @@ -453,15 +518,23 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) SCRIPT_PATTERNS test/coreneuron/test_spikes.py MODFILE_PATTERNS "test/coreneuron/mod files/*.mod" "test/coreneuron/mod files/axial.inc" - test/pynrn/unitstest.mod test/pynrn/version_macros.mod test/gjtests/natrans.mod) + test/pytest_coreneuron/unitstest.mod test/pytest_coreneuron/version_macros.mod + test/gjtests/natrans.mod) nrn_add_test( GROUP coreneuron_modtests NAME version_macros REQUIRES coreneuron ${modtests_preload_sanitizer} - SCRIPT_PATTERNS test/pynrn/test_version_macros.py + SCRIPT_PATTERNS test/pytest_coreneuron/test_version_macros.py ENVIRONMENT COVERAGE_FILE=.coverage.coreneuron_version_macros NRN_CORENEURON_ENABLE=true ${sonata_zero_gid_env} ${nrnpython_mpi_env} - COMMAND ${modtests_launch_py} test/pynrn/test_version_macros.py) + COMMAND ${modtests_launch_py} test/pytest_coreneuron/test_version_macros.py) + nrn_add_test( + GROUP coreneuron_modtests + NAME inputpresyn_py + REQUIRES coreneuron mpi ${modtests_preload_sanitizer} + SCRIPT_PATTERNS test/coreneuron/test_inputpresyn.py + PROCESSORS 2 + COMMAND ${modtests_launch_py_mpi} test/coreneuron/test_inputpresyn.py) # In GPU builds run all of the tests on both CPU and GPU set(coreneuron_modtests_gpu_env CORENRN_ENABLE_GPU=true) foreach(processor cpu gpu) @@ -486,7 +559,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) nrn_add_test( GROUP coreneuron_modtests NAME direct_hoc_${processor} - REQUIRES coreneuron ${processor} ${modtests_preload_sanitizer} + REQUIRES coreneuron ${processor} SCRIPT_PATTERNS test/coreneuron/test_direct.hoc ENVIRONMENT ${processor_env} COMMAND ${modtests_launch_hoc} test/coreneuron/test_direct.hoc) @@ -504,14 +577,15 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) ENVIRONMENT ${modtests_processor_env} ${nrnpython_mpi_env} COVERAGE_FILE=.coverage.coreneuron_spikes_file_mode_py NRN_TEST_SPIKES_FILE_MODE=1 COMMAND ${modtests_launch_py} test/coreneuron/test_spikes.py) + # TODO this test does some NEURON vs. NEURON testing too nrn_add_test( GROUP coreneuron_modtests NAME fast_imem_py_${processor} REQUIRES coreneuron ${processor} ${modtests_preload_sanitizer} - SCRIPT_PATTERNS test/pynrn/test_fast_imem.py + SCRIPT_PATTERNS test/pytest_coreneuron/test_fast_imem.py ENVIRONMENT ${modtests_processor_env} ${nrnpython_mpi_env} COVERAGE_FILE=.coverage.coreneuron_fast_imem_py - COMMAND ${modtests_launch_py} test/pynrn/test_fast_imem.py) + COMMAND ${modtests_launch_py} test/pytest_coreneuron/test_fast_imem.py) nrn_add_test( GROUP coreneuron_modtests NAME datareturn_py_${processor} @@ -568,6 +642,24 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) ENVIRONMENT ${modtests_processor_env} ${nrnpython_mpi_env} COVERAGE_FILE=.coverage.coreneuron_test_ba_py COMMAND ${modtests_launch_py} test/coreneuron/test_ba.py) + nrn_add_test( + GROUP coreneuron_modtests + NAME test_nmodlrandom_py_${processor} + REQUIRES coreneuron ${processor} ${modtests_preload_sanitizer} + SCRIPT_PATTERNS test/coreneuron/test_nmodlrandom.py + ENVIRONMENT ${modtests_processor_env} ${nrnpython_mpi_env} + COVERAGE_FILE=.coverage.coreneuron_test_nmodlrandom_py + COMMAND ${modtests_launch_py} test/coreneuron/test_nmodlrandom.py) + nrn_add_test( + GROUP coreneuron_modtests + NAME test_nmodlrandom_syntax_py_${processor} + REQUIRES coreneuron ${processor} ${modtests_preload_sanitizer} + SCRIPT_PATTERNS test/coreneuron/test_nmodlrandom_syntax.py + ENVIRONMENT + ${modtests_processor_env} ${nrnpython_mpi_env} + COVERAGE_FILE=.coverage.coreneuron_test_nmodlrandom_syntax_py + NMODL_BINARY=${CORENRN_NMODL_BINARY} + COMMAND ${modtests_launch_py} test/coreneuron/test_nmodlrandom_syntax.py) nrn_add_test( GROUP coreneuron_modtests NAME test_natrans_py_${processor} @@ -577,7 +669,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) COVERAGE_FILE=.coverage.coreneuron_test_natrans_py COMMAND ${modtests_launch_py} test/gjtests/test_natrans.py) if(NRN_ENABLE_MPI) - nrn_find_python_module(mpi4py) + nrn_find_python_module(MODULE mpi4py) if(mpi4py_FOUND) nrn_add_test( GROUP coreneuron_modtests @@ -614,15 +706,6 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) PROCESSORS 2 ENVIRONMENT ${processor_env} NRN_TEST_SPIKES_NRNMPI_INIT=1 NRN_TEST_SPIKES_FILE_MODE=1 COMMAND ${launch_spikes_mpi_file_mode_py} test/coreneuron/test_spikes.py) - nrn_add_test( - GROUP coreneuron_modtests - NAME inputpresyn_py_${processor} - REQUIRES coreneuron ${processor} ${modtests_preload_sanitizer} - PROCESSORS 2 - COMMAND - ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} 2 ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS} - special ${MPIEXEC_POSTFLAGS} -mpi -python - ${PROJECT_SOURCE_DIR}/test/coreneuron/test_inputpresyn.py) nrn_add_test( GROUP coreneuron_modtests NAME test_subworlds_py_${processor} @@ -680,7 +763,7 @@ if(NRN_ENABLE_PYTHON AND PYTEST_FOUND) GROUP nrnmusic NAME music_tests PROCESSORS 2 - COMMAND ${PYTHON_EXECUTABLE} test/music_tests/runtests.py + COMMAND ${NRN_DEFAULT_PYTHON_EXECUTABLE} test/music_tests/runtests.py SCRIPT_PATTERNS test/music_tests/*.music test/music_tests/*.py) endif() endif() @@ -699,10 +782,11 @@ function(add_modlunit_test mod_file) "${TESTS}" PARENT_SCOPE) endfunction() -add_modlunit_test("${PROJECT_SOURCE_DIR}/test/pynrn/unitstest.mod") +add_modlunit_test("${PROJECT_SOURCE_DIR}/test/pytest_coreneuron/unitstest.mod") add_modlunit_test("${PROJECT_SOURCE_DIR}/src/nrnoc/hh.mod") add_modlunit_test("${PROJECT_SOURCE_DIR}/src/nrnoc/stim.mod") add_modlunit_test("${PROJECT_SOURCE_DIR}/src/nrnoc/pattern.mod") +add_modlunit_test("${PROJECT_SOURCE_DIR}/test/hoctests/rand.mod") set_property(TEST modlunit_pattern PROPERTY WILL_FAIL ON) set_tests_properties(${TESTS} PROPERTIES ENVIRONMENT "${NRN_RUN_FROM_BUILD_DIR_ENV}") diff --git a/test/benchmarks/threads/test_multicore.cpp b/test/benchmarks/threads/test_multicore.cpp index f078634989..ca1288c148 100644 --- a/test/benchmarks/threads/test_multicore.cpp +++ b/test/benchmarks/threads/test_multicore.cpp @@ -12,8 +12,6 @@ #include #include -extern int use_cachevec; - /* @brief * Test multicore implementation: * * parallel mode (std::threads) @@ -51,14 +49,9 @@ TEST_CASE("Multicore unit and performance testing", "[NEURON][multicore]") { SECTION("Test parallel mode", "[NEURON][multicore][parallel]") { static std::vector cache_sim_times; - static std::vector no_cache_sim_times; - GIVEN("we do prun() over each nof_threads{nof_threads_range} with cachevec{0,1}") { - auto cache_efficient = GENERATE(0, 1); + GIVEN("we do prun() over each nof_threads{nof_threads_range}") { auto nof_threads = GENERATE_COPY(from_range(nof_threads_range)); - THEN("we run the simulation with " + std::to_string(nof_threads) + - " threads, cachevec is " + std::to_string(cache_efficient)) { - nrn_cachevec(cache_efficient); - REQUIRE(use_cachevec == cache_efficient); + THEN("we run the simulation with " + std::to_string(nof_threads) + " threads") { nrn_threads_create(nof_threads, 1); REQUIRE(nrn_nthread == nof_threads); REQUIRE(nof_worker_threads() == (nof_threads > 1 ? nof_threads : 0)); @@ -66,14 +59,12 @@ TEST_CASE("Multicore unit and performance testing", "[NEURON][multicore]") { REQUIRE(hoc_oc("prun()") == 0); auto end = std::chrono::high_resolution_clock::now(); auto duration = std::chrono::duration_cast(end - start); - (cache_efficient ? cache_sim_times : no_cache_sim_times) - .push_back(duration.count()); + cache_sim_times.push_back(duration.count()); REQUIRE(nof_worker_threads() == (nof_threads > 1 ? nof_threads : 0)); } } THEN("we assert all simulations ran") { REQUIRE(cache_sim_times.size() == nof_threads_range.size()); - REQUIRE(no_cache_sim_times.size() == nof_threads_range.size()); } THEN("we print the results") { std::cout << "[parallel][simulation times] : " << std::endl; @@ -83,15 +74,12 @@ TEST_CASE("Multicore unit and performance testing", "[NEURON][multicore]") { << "\t\t" << "cache=1" << std::endl; for (auto i = 0; i < cache_sim_times.size(); ++i) { - std::cout << nof_threads_range[i] << "\t" << no_cache_sim_times[i] << "\t" - << cache_sim_times[i] << std::endl; + std::cout << nof_threads_range[i] << "\t" << cache_sim_times[i] << std::endl; } } THEN("we check that the more threads we have the faster the simulation runs") { if (nof_threads_range.size() > 2) { REQUIRE(std::is_sorted(cache_sim_times.rbegin(), cache_sim_times.rend())); - REQUIRE(std::is_sorted(no_cache_sim_times.rbegin(), no_cache_sim_times.rend())); - REQUIRE(cache_sim_times < no_cache_sim_times); THEN( "we check that the standard deviaton is above 25% from the mean for simulation " "vectors") { @@ -106,24 +94,10 @@ TEST_CASE("Multicore unit and performance testing", "[NEURON][multicore]") { return a + (b - cache_mean) * (b - cache_mean); }) / cache_sim_times.size()); - const auto no_cache_mean = - std::accumulate(no_cache_sim_times.begin(), no_cache_sim_times.end(), 0.0) / - no_cache_sim_times.size(); - const auto no_cache_std_dev = std::sqrt( - std::accumulate(no_cache_sim_times.begin(), - no_cache_sim_times.end(), - 0.0, - [no_cache_mean](double a, double b) { - return a + (b - no_cache_mean) * (b - no_cache_mean); - }) / - no_cache_sim_times.size()); REQUIRE(cache_std_dev / cache_mean > 0.2); - REQUIRE(no_cache_std_dev / no_cache_mean > 0.2); // print the standard deviations std::cout << "[parallel][cache][standard deviation] : " << cache_std_dev << std::endl; - std::cout << "[parallel][no-cache][standard deviation] : " << no_cache_std_dev - << std::endl; } } else { WARN("Not enough threads to test parallel performance KPI"); @@ -132,67 +106,54 @@ TEST_CASE("Multicore unit and performance testing", "[NEURON][multicore]") { } SECTION("Test serial mode", "[NEURON][multicore][serial]") { - WHEN("cache efficient is set to 1") { - nrn_cachevec(1); - THEN("we check cachevec is 1") { - REQUIRE(use_cachevec == 1); - } - static std::vector sim_times; - GIVEN("we do prun() over each nof_threads{nof_threads_range} with serial mode on") { - auto nof_threads = GENERATE_COPY(from_range(nof_threads_range)); - THEN("we run the serial simulation with " << nof_threads << " threads") { - nrn_threads_create(nof_threads, 0); - REQUIRE(nrn_nthread == nof_threads); - REQUIRE(nof_worker_threads() == 0); - auto start = std::chrono::high_resolution_clock::now(); - REQUIRE(hoc_oc("prun()") == 0); - auto end = std::chrono::high_resolution_clock::now(); - auto duration = std::chrono::duration_cast(end - - start); - sim_times.push_back(duration.count()); - REQUIRE(nof_worker_threads() == 0); - } - } - THEN("we assert all simulations ran") { - REQUIRE(sim_times.size() == nof_threads_range.size()); + static std::vector sim_times; + GIVEN("we do prun() over each nof_threads{nof_threads_range} with serial mode on") { + auto nof_threads = GENERATE_COPY(from_range(nof_threads_range)); + THEN("we run the serial simulation with " << nof_threads << " threads") { + nrn_threads_create(nof_threads, 0); + REQUIRE(nrn_nthread == nof_threads); + REQUIRE(nof_worker_threads() == 0); + auto start = std::chrono::high_resolution_clock::now(); + REQUIRE(hoc_oc("prun()") == 0); + auto end = std::chrono::high_resolution_clock::now(); + auto duration = std::chrono::duration_cast(end - start); + sim_times.push_back(duration.count()); + REQUIRE(nof_worker_threads() == 0); } - THEN("we print the results") { - std::cout << "[serial][simulation times] : " << std::endl; - std::cout << "nt" - << "\t" - << "cache=1" << std::endl; - for (auto i = 0; i < sim_times.size(); ++i) { - std::cout << nof_threads_range[i] << "\t" << sim_times[i] << std::endl; - } + } + THEN("we assert all simulations ran") { + REQUIRE(sim_times.size() == nof_threads_range.size()); + } + THEN("we print the results") { + std::cout << "[serial][simulation times] : " << std::endl; + std::cout << "nt" + << "\t" + << "cache=1" << std::endl; + for (auto i = 0; i < sim_times.size(); ++i) { + std::cout << nof_threads_range[i] << "\t" << sim_times[i] << std::endl; } - THEN("we assert sim_times have under 10% standard deviation from the mean") { - if (nof_threads_range.size() > 2) { - const auto mean = std::accumulate(sim_times.begin(), sim_times.end(), 0.0) / - sim_times.size(); - const auto sq_sum = std::inner_product(sim_times.begin(), - sim_times.end(), - sim_times.begin(), - 0.0); - const auto stdev = std::sqrt(sq_sum / sim_times.size() - mean * mean); + } + THEN("we assert sim_times have under 10% standard deviation from the mean") { + if (nof_threads_range.size() > 2) { + const auto mean = std::accumulate(sim_times.begin(), sim_times.end(), 0.0) / + sim_times.size(); + const auto sq_sum = + std::inner_product(sim_times.begin(), sim_times.end(), sim_times.begin(), 0.0); + const auto stdev = std::sqrt(sq_sum / sim_times.size() - mean * mean); - std::cout << "[serial][standard deviation] : " << stdev << std::endl; - REQUIRE(stdev < 0.1 * mean); - } else { - WARN("Not enough threads to test serial performance KPI"); - } + std::cout << "[serial][standard deviation] : " << stdev << std::endl; + REQUIRE(stdev < 0.1 * mean); + } else { + WARN("Not enough threads to test serial performance KPI"); } } } SECTION("Test busywait parallel mode", "[NEURON][multicore][parallel][busywait]") { - WHEN("busywait is set to 1 and cache efficient is set to 1") { + WHEN("busywait is set to 1") { THEN("set thread_busywait to 1") { REQUIRE(hoc_oc("pc.thread_busywait(1)") == 0); } - THEN("set cachevec to 1") { - nrn_cachevec(1); - REQUIRE(use_cachevec == 1); - } static std::vector sim_times; GIVEN("we do prun() over each nof_threads{nof_threads_range} with serial mode on") { auto nof_threads = GENERATE_COPY(from_range(nof_threads_range)); diff --git a/test/common/catch2_main.cpp b/test/common/catch2_main.cpp index 2eefe1a780..09f9a7a715 100644 --- a/test/common/catch2_main.cpp +++ b/test/common/catch2_main.cpp @@ -8,6 +8,8 @@ #include "ocfunc.h" #include "section.h" +#include + int ivocmain_session(int, const char**, const char**, int); extern int nrn_main_launch; extern int nrn_nobanner_; @@ -24,7 +26,21 @@ int PROCESSORS{0}; int MAX_PROCESSORS{nrn_how_many_processors()}; } // namespace nrn::test +#ifdef NRN_COVERAGE_ENABLED +// works with AppleClang 14, other sources suggest __gcov_flush. +extern "C" void __gcov_dump(); +#endif +namespace { +void new_handler() { +#ifdef NRN_COVERAGE_ENABLED + __gcov_dump(); +#endif +} +} // namespace + int main(int argc, char* argv[]) { + std::set_terminate(new_handler); + // global setup... nrn_main_launch = 2; int argc_nompi = 2; diff --git a/test/coreneuron/mod files/noisychan.mod b/test/coreneuron/mod files/noisychan.mod new file mode 100644 index 0000000000..780feea691 --- /dev/null +++ b/test/coreneuron/mod files/noisychan.mod @@ -0,0 +1,59 @@ +: The idea is that the voltage follows the change in e because of high +: conductance. So every negexp event causes voltage to pass threshold + +NEURON { + SUFFIX noisychan + NONSPECIFIC_CURRENT i + RANGE tau, invl + RANDOM ran +} + +UNITS { + (mA) = (milliamp) + (mV) = (millivolt) + (S) = (siemens) +} + +PARAMETER { + g = .1 (S/cm2) + tau = .1 (ms) + invl = 1 (ms) + emax = 50 (mV) +} + +ASSIGNED { + v (mV) + i (mA/cm2) + tnext (ms) +} + +STATE { + e (mV) +} + +INITIAL { + random_setseq(ran, 0) + e = -65(mV) + tnext = negexp() +} + +BEFORE STEP { + if (t > tnext) { + tnext = tnext + negexp() + e = emax + } +} + +BREAKPOINT { + SOLVE conduct METHOD cnexp + i = g*(v - e) +} + +DERIVATIVE conduct { + e' = (-65(mV) - e)/tau +} + +FUNCTION negexp()(ms) { + negexp = invl*random_negexp(ran) +} + diff --git a/test/coreneuron/mod files/watchrange.mod b/test/coreneuron/mod files/watchrange.mod index a710a67e20..dd051ac23a 100644 --- a/test/coreneuron/mod files/watchrange.mod +++ b/test/coreneuron/mod files/watchrange.mod @@ -106,6 +106,18 @@ VERBATIM ENDVERBATIM } +DESTRUCTOR { +VERBATIM + { + nrnran123_State** pv = (nrnran123_State**)(&_p_ran); + if (*pv) { + nrnran123_deletestream(*pv); + *pv = nullptr; + } + } +ENDVERBATIM +} + VERBATIM static void bbcore_write(double* z, int* d, int* zz, int* offset, _threadargsproto_) { if (d) { @@ -115,6 +127,11 @@ static void bbcore_write(double* z, int* d, int* zz, int* offset, _threadargspro nrnran123_getids3(*pv, di, di+1, di+2); nrnran123_getseq(*pv, di+3, &which); di[4] = (int)which; +#if NRNBBCORE + /* CoreNEURON does not call DESTRUCTOR so ... */ + nrnran123_deletestream(*pv); + *pv = nullptr; +#endif } *offset += 5; } diff --git a/test/coreneuron/mod files/watchrange2.mod b/test/coreneuron/mod files/watchrange2.mod new file mode 100644 index 0000000000..13100e45a4 --- /dev/null +++ b/test/coreneuron/mod files/watchrange2.mod @@ -0,0 +1,76 @@ +: for testing multiple WATCH statements activated as same time +: high, low, and mid regions watch a random uniform variable. +: The random variable ranges from 0 to 1 and changes at random times in +: the neighborhood of interval tick. + +NEURON { + THREADSAFE + POINT_PROCESS Bounce2 + RANGE r, result, n_high, n_low, n_mid, tick, x, t1 + RANDOM ran +} + +PARAMETER { + tick = 0.25 (ms) + LowThresh = 0.3 + HighThresh = 0.7 +} + +ASSIGNED { + x (1) + t1 (ms) + r (1) + n_high (1) + n_mid (1) + n_low (1) + result (1) +} + +DEFINE Low 1 +DEFINE Mid 2 +DEFINE High 3 +DEFINE Clock 4 + +INITIAL { + random_setseq(ran, 0) + n_high = 0 + n_mid = 0 + n_low = 0 + r = uniform() + t1 = t + x = 0 + net_send(0, Mid) + net_send(0, Clock) +} + +:AFTER SOLVE { +: result = t1*100/1(ms) + x +:} + +NET_RECEIVE(w) { + t1 = t + if (flag == Clock) { + r = uniform() + net_send(tick*(uniform() + .5), Clock) + } + if (flag == High) { + x = High + n_high = n_high + 1 + WATCH (r < LowThresh) Low + WATCH (r < HighThresh ) Mid + }else if (flag == Mid) { + x = Mid + n_mid = n_mid + 1 + WATCH (r < LowThresh) Low + WATCH (r > HighThresh) High + }else if (flag == Low) { + x = Low + n_low = n_low + 1 + WATCH (r > HighThresh) High + WATCH (r > LowThresh) Mid + } +} + +FUNCTION uniform() { + uniform = random_uniform(ran) +} diff --git a/test/coreneuron/test_ba.py b/test/coreneuron/test_ba.py index c5337ef9db..3ef94bee0c 100644 --- a/test/coreneuron/test_ba.py +++ b/test/coreneuron/test_ba.py @@ -125,7 +125,6 @@ def test_ba(): cmp(r, std) coreneuron.enable = True - h.CVode().cache_efficient(1) r = run(m) cmp(r, std) diff --git a/test/coreneuron/test_datareturn.py b/test/coreneuron/test_datareturn.py index b17da4650e..ec1fb3abcb 100644 --- a/test/coreneuron/test_datareturn.py +++ b/test/coreneuron/test_datareturn.py @@ -172,7 +172,6 @@ def run(tstop, mode): std = model.data() print("CoreNEURON run") - h.CVode().cache_efficient(1) coreneuron.enable = True coreneuron.verbose = 0 coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) diff --git a/test/coreneuron/test_direct.hoc b/test/coreneuron/test_direct.hoc index 4a524fc10e..6d7cf67ce0 100644 --- a/test/coreneuron/test_direct.hoc +++ b/test/coreneuron/test_direct.hoc @@ -23,7 +23,6 @@ proc test_direct_memory_transfer() { localobj po, pc, ic, tv, vvec, i_mem, tvstd soma { insert Sample } cvode.use_fast_imem(1) - cvode.cache_efficient(1) // record results of a run vvec = new Vector() diff --git a/test/coreneuron/test_direct.py b/test/coreneuron/test_direct.py index cbb37f8405..add5fe1c6b 100644 --- a/test/coreneuron/test_direct.py +++ b/test/coreneuron/test_direct.py @@ -18,7 +18,6 @@ def test_direct_memory_transfer(): h.soma.insert("Sample") h.cvode.use_fast_imem(1) - h.cvode.cache_efficient(1) v = h.Vector() v.record(h.soma(0.5)._ref_v, sec=h.soma) diff --git a/test/coreneuron/test_fornetcon.py b/test/coreneuron/test_fornetcon.py index e668da5dfe..1acc0e0043 100644 --- a/test/coreneuron/test_fornetcon.py +++ b/test/coreneuron/test_fornetcon.py @@ -83,7 +83,6 @@ def get_weights(): weight_std = get_weights() print("CoreNEURON run") - h.CVode().cache_efficient(1) coreneuron.enable = True coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) diff --git a/test/coreneuron/test_inputpresyn.py b/test/coreneuron/test_inputpresyn.py index 12d180b4fd..a1974a5fc5 100644 --- a/test/coreneuron/test_inputpresyn.py +++ b/test/coreneuron/test_inputpresyn.py @@ -60,7 +60,6 @@ def same(): spikes = sortspikes(spiketime, gidvec) assert spikes_std == spikes - h.CVode().cache_efficient(1) from neuron import coreneuron coreneuron.enable = 1 diff --git a/test/coreneuron/test_netmove.py b/test/coreneuron/test_netmove.py index 32d3539e67..5cbe1858e9 100644 --- a/test/coreneuron/test_netmove.py +++ b/test/coreneuron/test_netmove.py @@ -76,7 +76,6 @@ def run(tstop, mode): stdlist = [cell.result() for cell in cells] print("CoreNEURON run") - h.CVode().cache_efficient(1) coreneuron.enable = True coreneuron.verbose = 0 coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) diff --git a/test/coreneuron/test_nmodlrandom.py b/test/coreneuron/test_nmodlrandom.py new file mode 100644 index 0000000000..067f26979e --- /dev/null +++ b/test/coreneuron/test_nmodlrandom.py @@ -0,0 +1,169 @@ +# augmented to also checkpoint verify when RANDOM is permuted +from neuron.tests.utils.strtobool import strtobool +import os + +from neuron import h, coreneuron + +pc = h.ParallelContext() + + +class Cell: + def __init__(self, gid): + self.soma = h.Section(name="soma", cell=self) + self.soma.insert("noisychan") + if gid % 2 == 0: + # CoreNEURON permutation not the identity if cell topology not homogeneous + self.dend = h.Section(name="dend", cell=self) + self.dend.connect(self.soma(0.5)) + self.dend.insert("noisychan") + self.gid = gid + pc.set_gid2node(gid, pc.id()) + pc.cell(gid, h.NetCon(self.soma(0.5)._ref_v, None, sec=self.soma)) + + +def model(): + nslist = [h.NetStim() for _ in range(3)] + cells = [Cell(gid) for gid in range(10, 15)] + for gid, ns in enumerate(nslist): + ns.start = 0 + ns.number = 1e9 + ns.interval = 1 + ns.noise = 1 + pc.set_gid2node(gid, pc.id()) + pc.cell(gid, h.NetCon(ns, None)) + spiketime = h.Vector() + spikegid = h.Vector() + pc.spike_record(-1, spiketime, spikegid) + return nslist, spiketime, spikegid, cells + + +def pspike(m): + print("spike raster") + for i, t in enumerate(m[1]): + print("%.5f %g" % (t, m[2][i])) + + +def run(tstop, m): + pc.set_maxstep(10) + h.finitialize(-65) + pc.psolve(tstop) + + +def chk(std, m): + assert std[0].eq(m[1]) + assert std[1].eq(m[2]) + + +def test_embeded_run(): + m = model() + run(5, m) + std = [m[1].c(), m[2].c()] + pc.psolve(7) + std2 = [m[1].c(), m[2].c()] + + coreneuron.enable = True + coreneuron.verbose = 0 + coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) + run(5, m) + chk(std, m) + + coreneuron.enable = False + pc.psolve(7) + chk(std2, m) + + +def sortspikes(spiketime, gidvec): + return sorted(zip(spiketime, gidvec)) + + +def test_chkpnt(): + import shutil, os, platform, subprocess + + # clear out the old if any exist + shutil.rmtree("coredat", ignore_errors=True) + + m = model() + + # std spikes from 0-5 and 5-10 + run(5, m) + std1 = [m[1].c(), m[2].c()] + m[1].resize(0) + m[2].resize(0) + pc.psolve(10) + std2 = [m[1].c(), m[2].c()] + pspike(m) + + # Files for coreneuron runs + h.finitialize(-65) + pc.nrncore_write("coredat") + + def runcn(tstop, perm, args): + exe = os.path.join(os.getcwd(), platform.machine(), "special-core") + common = [ + "-d", + "coredat", + "--voltage", + "1000", + "--verbose", + "0", + "--cell-permute", + str(perm), + ] + + gpu_run = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) + if gpu_run: + common.append("--gpu") + + cmd = [exe] + ["--tstop", "{:g}".format(tstop)] + common + args + print(" ".join(cmd)) + subprocess.run( + cmd, + check=True, + shell=False, + ) + + runcn(5, 1, ["--checkpoint", "coredat/chkpnt", "-o", "coredat"]) + runcn(10, 1, ["--restore", "coredat/chkpnt", "-o", "coredat/chkpnt"]) + cmp_spks(sortspikes(std2[0], std2[1]), "coredat") + + # cleanup + shutil.rmtree("coredat", ignore_errors=True) + + +def cmp_spks(spikes, dir): # modified from test_pointer.py + import os, subprocess, shutil + + # sorted nrn standard spikes into dir/out.spk + with open(os.path.join(dir, "temp"), "w") as f: + for spike in spikes: + f.write("{:.8g}\t{}\n".format(spike[0], int(spike[1]))) + + # sometimes roundoff to %.8g gives different sort. + def help(cmd, name_in, name_out): + # `cmd` is some generic utility, which does not need to have a + # sanitizer runtime pre-loaded. LD_PRELOAD=/path/to/libtsan.so can + # cause problems for *nix utilities, so drop it if it was present. + env = os.environ.copy() + try: + del env["LD_PRELOAD"] + except KeyError: + pass + subprocess.run( + [ + shutil.which(cmd), + os.path.join(dir, name_in), + os.path.join(dir, name_out), + ], + check=True, + env=env, + shell=False, + ) + + help("sortspike", "temp", "nrn.spk") + help("sortspike", "chkpnt/out.dat", "chkpnt/out.spk") + help("cmp", "chkpnt/out.spk", "nrn.spk") + + +if __name__ == "__main__": + test_embeded_run() + test_chkpnt() diff --git a/test/coreneuron/test_nmodlrandom_syntax.py b/test/coreneuron/test_nmodlrandom_syntax.py new file mode 100644 index 0000000000..9e04092a7b --- /dev/null +++ b/test/coreneuron/test_nmodlrandom_syntax.py @@ -0,0 +1,99 @@ +from neuron import h +import subprocess +from pathlib import Path +import os + + +# default args generate accepted nmodl string +def modfile( + s0=":s0", + s1="RANDOM rv1, rv2", + s2=":s2", + s3=":s3", + s4="x1 = random_negexp(rv1, 1.0)", + s5=":s5", + s6="foo = random_negexp(rv1, 1.0)", +): + txt = """ +: temp.mod file with format elements to test for RANDOM syntax errors + +%s : 0 error if randomvar is mentioned + +NEURON { + SUFFIX temp + RANGE x1 + %s : 1 declare randomvars +} + +%s : 2 error if randomvar is mentioned + +ASSIGNED { + x1 +} + +BEFORE STEP { + %s : 3 error if assign or eval a randomvar + %s : 4 random_function accepted but ranvar must be first arg +} + +FUNCTION foo(arg) { + %s : 5 LOCAL ranvar makes it a double in this scope + %s : 6 random_function accepted but ranvar must be first arg + foo = arg +} +""" % ( + s0, + s1, + s2, + s3, + s4, + s5, + s6, + ) + return txt + + +def run(cmd): + result = subprocess.run(cmd, capture_output=True) + return result + + +def chk_nmodl(txt, program="nocmodl", rcode=False): + f = open("temp.mod", "w") + f.write(txt) + f.close() + result = run([program, "temp.mod"]) + ret = (result.returncode == 0) == rcode + if ret: + Path("temp.mod").unlink(missing_ok=True) + Path("temp.cpp").unlink(missing_ok=True) + else: + print("chk_nmodl ", program, " return code ", result.returncode) + print(txt) + print(result.stderr.decode()) + print(result.stdout.decode()) + return (result.returncode == 0) == rcode + + +def test_syntax(): + # nmodl could be external installation (not in PATH) + nmodl_binary = os.environ.get("NMODL_BINARY", "nmodl") + for program in ["nocmodl", nmodl_binary]: + foo = False + assert chk_nmodl(modfile(), program, rcode=True) + assert chk_nmodl(modfile(s0="ASSIGNED{rv1}"), program) + foo = True if program == "nocmodl" else False + assert chk_nmodl(modfile(s0="LOCAL rv1"), program, rcode=foo) + assert chk_nmodl(modfile(s2="ASSIGNED{rv1}"), program) + assert chk_nmodl(modfile(s2="LOCAL rv1"), program) + assert chk_nmodl(modfile(s3="rv1 = 1"), program) + assert chk_nmodl(modfile(s3="x1 = rv1"), program) + assert chk_nmodl(modfile(s4="foo(rv1)"), program) + assert chk_nmodl(modfile(s4="random_negexp()"), program) + assert chk_nmodl(modfile(s4="random_negexp(1.0)"), program) + assert chk_nmodl(modfile(s5="LOCAL rv1"), program) + assert chk_nmodl(modfile(s4="random_negexp(rv1, rv2)"), program) + + +if __name__ == "__main__": + test_syntax() diff --git a/test/coreneuron/test_pointer.py b/test/coreneuron/test_pointer.py index 1c08a351d2..3d56fd8a06 100644 --- a/test/coreneuron/test_pointer.py +++ b/test/coreneuron/test_pointer.py @@ -1,6 +1,8 @@ -import os from neuron.tests.utils.strtobool import strtobool from neuron import h +import os +import platform +import shutil import subprocess from subprocess import PIPE @@ -126,33 +128,12 @@ class Model: def __init__(self, ncell, nsec): self.cells = [Cell(i, nsec) for i in range(ncell)] self.update_pointers() - # Setup callback to update dipole POINTER for cache_efficiency - # The PtrVector is used only to support the callback. - self._callback_setup = h.PtrVector(1) - self._callback_setup.ptr_update_callback(self.update_pointers) def update_pointers(self): for cell in self.cells: cell.update_pointers() -def srun(cmd): - print("--------------------") - print(cmd) - r = subprocess.run(cmd, shell=True, stdout=PIPE, stderr=PIPE) - if r.returncode != 0: - print(r) - r.check_returncode() - - -def runcn(args): - import platform - - cpu = platform.machine() - cmd = cpu + "/special-core " + args - srun(cmd) - - def test_axial(): m = Model(5, 5) cvode.use_fast_imem(1) @@ -203,7 +184,6 @@ def run(tstop): std = run(tstop) - cvode.cache_efficient(1) chk(std, run(tstop)) from neuron import coreneuron @@ -217,8 +197,6 @@ def run(tstop): coreneuron.cell_permute = perm chk(std, run(tstop)) coreneuron.enable = False - - m._callback_setup = None # get rid of the callback first. del m @@ -227,18 +205,33 @@ def run_coreneuron_offline_checkpoint_restore(spikes_std): tpnts = [5.0, 10.0] for perm in [0, 1]: print("\n\ncell_permute ", perm) - common = "-d coredat --voltage 1000 --verbose 0 --cell-permute %d" % (perm,) + common = [ + "-d", + "coredat", + "--voltage", + "1000", + "--verbose", + "0", + "--cell-permute", + str(perm), + ] + + def run(tstop, args): + exe = os.path.join(os.getcwd(), platform.machine(), "special-core") + subprocess.run( + [exe] + ["--tstop", "{:g}".format(tstop)] + common + args, + check=True, + shell=False, + ) + # standard full run - runcn(common + " --tstop %g" % float(tpnts[-1]) + " -o coredat") + run(tpnts[-1], ["-o", "coredat"]) # sequence of checkpoints for i, tpnt in enumerate(tpnts): - tend = tpnt - restore = " --restore coredat/chkpnt%d" % (i,) if i > 0 else "" - checkpoint = " --checkpoint coredat/chkpnt%d" % (i + 1,) - outpath = " -o coredat/chkpnt%d" % (i + 1,) - runcn( - common + " --tstop %g" % (float(tend),) + outpath + restore + checkpoint - ) + restore = ["--restore", "coredat/chkpnt{}".format(i)] if i > 0 else [] + checkpoint = ["--checkpoint", "coredat/chkpnt{}".format(i + 1)] + outpath = ["-o", "coredat/chkpnt{}".format(i + 1)] + run(tpnt, outpath + restore + checkpoint) # compare spikes cmp_spks( @@ -251,7 +244,7 @@ def test_checkpoint(): return # clear out the old - srun("rm -r -f coredat") + shutil.rmtree("coredat", ignore_errors=True) m = Model(5, 5) # file mode CoreNEURON real cells need gids @@ -271,7 +264,6 @@ def test_checkpoint(): spktime = h.Vector() spkgid = h.Vector() pc.spike_record(-1, spktime, spkgid) - cvode.cache_efficient(1) pc.set_maxstep(10) h.finitialize(-65) pc.nrncore_write("coredat") @@ -298,7 +290,6 @@ def run(tstop): coreneuron.enable = False # Delete model before launching the CoreNEURON simulation offline - m._callback_setup = None pc.gid_clear() del m @@ -316,22 +307,39 @@ def run(tstop): def cmp_spks(spikes, dir, chkpntdirs): # sorted nrn standard spikes into dir/out.spk - f = open(dir + "/temp", "w") - for spike in spikes: - f.write("%.8g\t%d\n" % (spike[0], int(spike[1]))) - f.close() + with open(os.path.join(dir, "temp"), "w") as f: + for spike in spikes: + f.write("{:.8g}\t{}\n".format(spike[0], int(spike[1]))) # sometimes roundoff to %.8g gives different sort. - srun("sortspike {}/temp {}/nrn.spk".format(dir, dir)) - - srun("sortspike {}/out.dat {}/out.spk".format(dir, dir)) - srun("cmp {}/out.spk {}/nrn.spk".format(dir, dir)) + def help(cmd, name_in, name_out): + # `cmd` is some generic utility, which does not need to have a + # sanitizer runtime pre-loaded. LD_PRELOAD=/path/to/libtsan.so can + # cause problems for *nix utilities, so drop it if it was present. + env = os.environ.copy() + try: + del env["LD_PRELOAD"] + except KeyError: + pass + subprocess.run( + [ + shutil.which(cmd), + os.path.join(dir, name_in), + os.path.join(dir, name_out), + ], + check=True, + env=env, + shell=False, + ) - cmd = "cat" - for i in chkpntdirs: - cmd = cmd + " " + i + "/out.dat" - srun(cmd + " > " + dir + "/temp") - srun("sortspike {}/temp {}/chkptout.spk".format(dir, dir)) - srun("cmp {}/out.spk {}/chkptout.spk".format(dir, dir)) + help("sortspike", "temp", "nrn.spk") + help("sortspike", "out.dat", "out.spk") + help("cmp", "out.spk", "nrn.spk") + with open(os.path.join(dir, "temp"), "wb") as ofile: + for subdir in chkpntdirs: + with open(os.path.join(subdir, "out.dat"), "rb") as ifile: + shutil.copyfileobj(ifile, ofile) + help("sortspike", "temp", "chkptout.spk") + help("cmp", "out.spk", "chkptout.spk") if __name__ == "__main__": diff --git a/test/coreneuron/test_psolve.py b/test/coreneuron/test_psolve.py index 0411160e05..b196f26f82 100644 --- a/test/coreneuron/test_psolve.py +++ b/test/coreneuron/test_psolve.py @@ -48,7 +48,6 @@ def run(tstop): coreneuron.enable = True coreneuron.verbose = 0 coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) - h.CVode().cache_efficient(True) run(h.tstop) if vvec_std.eq(vvec) == 0: for i, x in enumerate(vvec_std): @@ -95,7 +94,6 @@ def test_NetStim_noise(): spikegid.resize(0) from neuron import coreneuron - h.CVode().cache_efficient(True) coreneuron.verbose = 0 coreneuron.enable = True for cell in cells.values(): diff --git a/test/coreneuron/test_spikes.py b/test/coreneuron/test_spikes.py index 677cffe81d..dc2185c804 100644 --- a/test/coreneuron/test_spikes.py +++ b/test/coreneuron/test_spikes.py @@ -1,5 +1,6 @@ from neuron.tests.utils.strtobool import strtobool import os +import tempfile # Hacky, but it's non-trivial to pass commandline arguments to pytest tests. enable_gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) @@ -55,7 +56,6 @@ def test_spikes( h.tstop = 10 h.cvode.use_fast_imem(1) - h.cvode.cache_efficient(1) pc = h.ParallelContext() @@ -76,13 +76,6 @@ def test_spikes( nrn_spike_t = nrn_spike_t.to_python() nrn_spike_gids = nrn_spike_gids.to_python() - # CORENEURON run - from neuron import coreneuron - - coreneuron.enable = True - coreneuron.gpu = enable_gpu - coreneuron.file_mode = file_mode - coreneuron.verbose = 0 corenrn_all_spike_t = h.Vector() corenrn_all_spike_gids = h.Vector() @@ -123,11 +116,20 @@ def run(mode): assert nrn_spike_t == corenrn_all_spike_t_py assert nrn_spike_gids == corenrn_all_spike_gids_py - if file_mode is False: - for mode in [0, 1, 2]: + # CORENEURON run + from neuron import coreneuron + + with coreneuron(enable=True, gpu=enable_gpu, file_mode=file_mode, verbose=0): + run_modes = [0] if file_mode else [0, 1, 2] + for mode in run_modes: run(mode) - else: - run(0) + # Make sure that file mode also works with custom coreneuron.data_path + if file_mode: + temp_coreneuron_data_folder = tempfile.TemporaryDirectory( + "coreneuron_input" + ) # auto removed + coreneuron.data_path = temp_coreneuron_data_folder.name + run(0) return h diff --git a/test/coreneuron/test_subworlds.py b/test/coreneuron/test_subworlds.py index 8fc4320ae9..ae9fbc34bf 100644 --- a/test/coreneuron/test_subworlds.py +++ b/test/coreneuron/test_subworlds.py @@ -57,7 +57,6 @@ def test_subworlds(): pc.set_gid2node(gid, pc.id()) pc.cell(gid, spike_detector) - h.cvode.cache_efficient(1) h.finitialize(-65) pc.set_maxstep(10) pc.psolve(250.0) diff --git a/test/coreneuron/test_units.py b/test/coreneuron/test_units.py index cd35abcb47..742aeb2f9d 100644 --- a/test/coreneuron/test_units.py +++ b/test/coreneuron/test_units.py @@ -1,4 +1,5 @@ from neuron.tests.utils.strtobool import strtobool +from neuron.expect_hocerr import expect_err import os from neuron import h @@ -7,6 +8,12 @@ def test_units(): + # should just emit warning + h.nrnunit_use_legacy(0) + + # should generate an error + expect_err("h.nrnunit_use_legacy(1)") + s = h.Section() pp = h.UnitsTest(s(0.5)) h.ion_style("na_ion", 1, 2, 1, 1, 0, sec=s) @@ -18,7 +25,6 @@ def test_units(): from neuron import coreneuron - h.CVode().cache_efficient(1) coreneuron.enable = True coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) pc.set_maxstep(10) diff --git a/test/coreneuron/test_watchrange.py b/test/coreneuron/test_watchrange.py index fe5bfce203..d121e87a3c 100644 --- a/test/coreneuron/test_watchrange.py +++ b/test/coreneuron/test_watchrange.py @@ -1,5 +1,6 @@ # Basically want to test that net_move statement doesn't get # mixed up with other instances. +# Augmented to also test RANDOM (Bounce2) with coreneuron permutation. from neuron.tests.utils.strtobool import strtobool import os @@ -13,7 +14,7 @@ class Cell: - def __init__(self, gid): + def __init__(self, gid, bounce=0): self.soma = h.Section(name="soma", cell=self) if gid % 2 == 0: # CoreNEURON permutation not the identity if cell topology not homogeneous @@ -21,11 +22,13 @@ def __init__(self, gid): self.dend.connect(self.soma(0.5)) self.gid = gid pc.set_gid2node(gid, pc.id()) - self.r = h.Random() - self.r.Random123(gid, 0, 0) - self.syn = h.Bounce(self.soma(0.5)) + if bounce == 0: + self.syn = h.Bounce(self.soma(0.5)) + self.syn.noiseFromRandom123(gid, 0, 1) + else: + self.syn = h.Bounce2(self.soma(0.5)) + self.syn.ran.set_ids(gid, 0, 1) pc.cell(gid, h.NetCon(self.soma(0.5)._ref_v, None, sec=self.soma)) - self.syn.noiseFromRandom123(gid, 0, 1) self.t1vec = h.Vector() self.t1vec.record(self.syn._ref_t1, sec=self.soma) self.xvec = h.Vector() @@ -44,7 +47,7 @@ def result(self): ) -def test_watchrange(): +def watchrange(): from neuron import coreneuron coreneuron.enable = False @@ -88,7 +91,6 @@ def run(tstop, mode): stdlist = [cell.result() for cell in cells] print("CoreNEURON run") - h.CVode().cache_efficient(1) coreneuron.enable = True coreneuron.verbose = 0 coreneuron.gpu = bool(strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false"))) @@ -146,16 +148,26 @@ def runassert(mode): for mode in [0, 1, 2]: runassert(mode) + # replace Bounce with Bounce2 (Uses RANDOM declaration) + pc.gid_clear() + cells = [Cell(gid, bounce=2) for gid in gids] + for mode in [0, 1, 2]: + runassert(mode) + coreneuron.enable = False # teardown pc.gid_clear() return stdlist, tvec +def test_watchrange(): + watchrange() + + if __name__ == "__main__": from neuron import gui - stdlist, tvec = test_watchrange() + stdlist, tvec = watchrange() g = h.Graph() print("n_high n_mid n_low") for i, result in enumerate(stdlist): diff --git a/test/coreneuron/unit/CMakeLists.txt b/test/coreneuron/unit/CMakeLists.txt index 742b762732..0b099c3eac 100644 --- a/test/coreneuron/unit/CMakeLists.txt +++ b/test/coreneuron/unit/CMakeLists.txt @@ -12,37 +12,20 @@ add_compile_definitions(${CORENRN_COMPILE_DEFS}) add_compile_options(${CORENRN_EXTRA_CXX_FLAGS}) add_link_options(${CORENRN_EXTRA_LINK_FLAGS}) -if(NOT Boost_USE_STATIC_LIBS) - add_definitions(-DBOOST_TEST_DYN_LINK=TRUE) -endif() - set(CMAKE_BUILD_RPATH ${CMAKE_BINARY_DIR}/bin/${CMAKE_HOST_SYSTEM_PROCESSOR}) - -set(Boost_NO_BOOST_CMAKE TRUE) -# Minimum set by needing the multi-argument version of BOOST_AUTO_TEST_CASE. -find_package(Boost 1.59 QUIET COMPONENTS filesystem system atomic unit_test_framework) - -if(Boost_FOUND) - if(CORENRN_ENABLE_UNIT_TESTS) - include_directories(${PROJECT_SOURCE_DIR}/src ${Boost_INCLUDE_DIRS}) - add_library(coreneuron-unit-test INTERFACE) - target_compile_options(coreneuron-unit-test - INTERFACE ${CORENEURON_BOOST_UNIT_TEST_COMPILE_FLAGS}) - target_include_directories(coreneuron-unit-test SYSTEM INTERFACE ${Boost_INCLUDE_DIRS} - ${CMAKE_BINARY_DIR}/include) - target_link_libraries(coreneuron-unit-test INTERFACE coreneuron-all) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/cmdline_interface) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/interleave_info) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/alignment) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/queueing) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/solver) - # lfp test uses nrnmpi_* wrappers but does not load the dynamic MPI library TODO: re-enable - # after NEURON and CoreNEURON dynamic MPI are merged - if(NOT CORENRN_ENABLE_MPI_DYNAMIC) - add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lfp) - endif() +if(CORENRN_ENABLE_UNIT_TESTS) + include_directories(${PROJECT_SOURCE_DIR}/src) + add_library(coreneuron-unit-test INTERFACE) + target_include_directories(coreneuron-unit-test SYSTEM INTERFACE ${CMAKE_BINARY_DIR}/include) + target_link_libraries(coreneuron-unit-test INTERFACE coreneuron-all) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/cmdline_interface) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/interleave_info) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/alignment) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/queueing) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/solver) + # lfp test uses nrnmpi_* wrappers but does not load the dynamic MPI library TODO: re-enable after + # NEURON and CoreNEURON dynamic MPI are merged + if(NOT NRN_ENABLE_MPI_DYNAMIC) + add_subdirectory(${CMAKE_CURRENT_SOURCE_DIR}/lfp) endif() - message(STATUS "Boost found, unit tests enabled") -else() - message(STATUS "Boost not found, unit tests disabled") endif() diff --git a/test/coreneuron/unit/alignment/CMakeLists.txt b/test/coreneuron/unit/alignment/CMakeLists.txt index 89da4da146..bf821b9d20 100644 --- a/test/coreneuron/unit/alignment/CMakeLists.txt +++ b/test/coreneuron/unit/alignment/CMakeLists.txt @@ -4,6 +4,6 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(alignment_test_bin alignment.cpp) -target_link_libraries(alignment_test_bin coreneuron-unit-test) +target_link_libraries(alignment_test_bin coreneuron-unit-test Catch2::Catch2) add_test(NAME alignment_test COMMAND $) cpp_cc_configure_sanitizers(TARGET alignment_test_bin TEST alignment_test) diff --git a/test/coreneuron/unit/alignment/alignment.cpp b/test/coreneuron/unit/alignment/alignment.cpp index a31db74095..9a7b5ebadf 100644 --- a/test/coreneuron/unit/alignment/alignment.cpp +++ b/test/coreneuron/unit/alignment/alignment.cpp @@ -7,12 +7,12 @@ */ #include "coreneuron/utils/memory.h" -#include -#define BOOST_TEST_MODULE PaddingCheck -#include +#define CATCH_CONFIG_MAIN +#include #include #include +#include template struct data { @@ -20,117 +20,121 @@ struct data { static const int chunk = n; }; -typedef boost::mpl::list, data> chunk_default_data_type; - -typedef boost::mpl::list, - data, - data, - data, - data, - data, - data, - data, - data, - data> +typedef std::tuple, data> chunk_default_data_type; + +typedef std::tuple, + data, + data, + data, + data, + data, + data, + data, + data, + data> chunk_data_type; -BOOST_AUTO_TEST_CASE(padding_simd) { +TEST_CASE("padding_simd", "[PaddingCheck]") { /** AOS test */ int pad = coreneuron::soa_padded_size<1>(11, 1); - BOOST_CHECK_EQUAL(pad, 11); + REQUIRE(pad == 11); /** SOA tests with 11 */ pad = coreneuron::soa_padded_size<1>(11, 0); - BOOST_CHECK_EQUAL(pad, 11); + REQUIRE(pad == 11); pad = coreneuron::soa_padded_size<2>(11, 0); - BOOST_CHECK_EQUAL(pad, 12); + REQUIRE(pad == 12); pad = coreneuron::soa_padded_size<4>(11, 0); - BOOST_CHECK_EQUAL(pad, 12); + REQUIRE(pad == 12); pad = coreneuron::soa_padded_size<8>(11, 0); - BOOST_CHECK_EQUAL(pad, 16); + REQUIRE(pad == 16); pad = coreneuron::soa_padded_size<16>(11, 0); - BOOST_CHECK_EQUAL(pad, 16); + REQUIRE(pad == 16); pad = coreneuron::soa_padded_size<32>(11, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); /** SOA tests with 32 */ pad = coreneuron::soa_padded_size<1>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); pad = coreneuron::soa_padded_size<2>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); pad = coreneuron::soa_padded_size<4>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); pad = coreneuron::soa_padded_size<8>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); pad = coreneuron::soa_padded_size<16>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); pad = coreneuron::soa_padded_size<32>(32, 0); - BOOST_CHECK_EQUAL(pad, 32); + REQUIRE(pad == 32); /** SOA tests with 33 */ pad = coreneuron::soa_padded_size<1>(33, 0); - BOOST_CHECK_EQUAL(pad, 33); + REQUIRE(pad == 33); pad = coreneuron::soa_padded_size<2>(33, 0); - BOOST_CHECK_EQUAL(pad, 34); + REQUIRE(pad == 34); pad = coreneuron::soa_padded_size<4>(33, 0); - BOOST_CHECK_EQUAL(pad, 36); + REQUIRE(pad == 36); pad = coreneuron::soa_padded_size<8>(33, 0); - BOOST_CHECK_EQUAL(pad, 40); + REQUIRE(pad == 40); pad = coreneuron::soa_padded_size<16>(33, 0); - BOOST_CHECK_EQUAL(pad, 48); + REQUIRE(pad == 48); pad = coreneuron::soa_padded_size<32>(33, 0); - BOOST_CHECK_EQUAL(pad, 64); + REQUIRE(pad == 64); } /// Even number is randomly depends of the TYPE!!! and the number of elements. /// This test work for 64 bits type not for 32 bits. -BOOST_AUTO_TEST_CASE_TEMPLATE(memory_alignment_simd_false, T, chunk_default_data_type) { - const int c = T::chunk; +TEMPLATE_LIST_TEST_CASE("memory_alignment_simd_false", + "[memory_alignment_simd_false]", + chunk_default_data_type) { + const int c = TestType::chunk; int total_size_chunk = coreneuron::soa_padded_size(247, 0); int ne = 6 * total_size_chunk; - typename T::value_type* data = - (typename T::value_type*) coreneuron::ecalloc_align(ne, sizeof(typename T::value_type), 16); + typename TestType::value_type* data = (typename TestType::value_type*) + coreneuron::ecalloc_align(ne, sizeof(typename TestType::value_type), 16); for (int i = 1; i < 6; i += 2) { bool b = coreneuron::is_aligned((data + i * total_size_chunk), 16); - BOOST_CHECK_EQUAL(b, 0); + REQUIRE_FALSE(b); } for (int i = 0; i < 6; i += 2) { bool b = coreneuron::is_aligned((data + i * total_size_chunk), 16); - BOOST_CHECK_EQUAL(b, 1); + REQUIRE(b); } free_memory(data); } -BOOST_AUTO_TEST_CASE_TEMPLATE(memory_alignment_simd_true, T, chunk_data_type) { - const int c = T::chunk; +TEMPLATE_LIST_TEST_CASE("memory_alignment_simd_true", + "[memory_alignment_simd_true]", + chunk_data_type) { + const int c = TestType::chunk; int total_size_chunk = coreneuron::soa_padded_size(247, 0); int ne = 6 * total_size_chunk; - typename T::value_type* data = - (typename T::value_type*) coreneuron::ecalloc_align(ne, sizeof(typename T::value_type), 16); + typename TestType::value_type* data = (typename TestType::value_type*) + coreneuron::ecalloc_align(ne, sizeof(typename TestType::value_type), 16); for (int i = 0; i < 6; ++i) { bool b = coreneuron::is_aligned((data + i * total_size_chunk), 16); - BOOST_CHECK_EQUAL(b, 1); + REQUIRE(b); } free_memory(data); diff --git a/test/coreneuron/unit/cmdline_interface/CMakeLists.txt b/test/coreneuron/unit/cmdline_interface/CMakeLists.txt index cc98ad78d7..c2448d4e7b 100644 --- a/test/coreneuron/unit/cmdline_interface/CMakeLists.txt +++ b/test/coreneuron/unit/cmdline_interface/CMakeLists.txt @@ -4,6 +4,6 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(cmd_interface_test_bin test_cmdline_interface.cpp) -target_link_libraries(cmd_interface_test_bin coreneuron-unit-test) +target_link_libraries(cmd_interface_test_bin coreneuron-unit-test Catch2::Catch2) add_test(NAME cmd_interface_test COMMAND $) cpp_cc_configure_sanitizers(TARGET cmd_interface_test_bin TEST cmd_interface_test) diff --git a/test/coreneuron/unit/cmdline_interface/test_cmdline_interface.cpp b/test/coreneuron/unit/cmdline_interface/test_cmdline_interface.cpp index ccd9e1f66d..bd5e94bf66 100644 --- a/test/coreneuron/unit/cmdline_interface/test_cmdline_interface.cpp +++ b/test/coreneuron/unit/cmdline_interface/test_cmdline_interface.cpp @@ -7,14 +7,14 @@ */ #include "coreneuron/apps/corenrn_parameters.hpp" -#define BOOST_TEST_MODULE cmdline_interface -#include +#define CATCH_CONFIG_MAIN +#include #include using namespace coreneuron; -BOOST_AUTO_TEST_CASE(cmdline_interface) { +TEST_CASE("cmdline_interface") { const char* argv[] = { "nrniv-core", @@ -80,55 +80,55 @@ BOOST_AUTO_TEST_CASE(cmdline_interface) { corenrn_param_test.parse(argc, const_cast(argv)); // discarding const as CLI11 // interface is not const - BOOST_CHECK(corenrn_param_test.seed == -1); // testing default value + REQUIRE(corenrn_param_test.seed == -1); // testing default value - BOOST_CHECK(corenrn_param_test.spikebuf == 100); + REQUIRE(corenrn_param_test.spikebuf == 100); - BOOST_CHECK(corenrn_param_test.threading == true); + REQUIRE(corenrn_param_test.threading == true); - BOOST_CHECK(corenrn_param_test.dt == 0.02); + REQUIRE(corenrn_param_test.dt == 0.02); - BOOST_CHECK(corenrn_param_test.tstop == 0.1); + REQUIRE(corenrn_param_test.tstop == 0.1); - BOOST_CHECK(corenrn_param_test.prcellgid == 12); + REQUIRE(corenrn_param_test.prcellgid == 12); #ifdef CORENEURON_ENABLE_GPU - BOOST_CHECK(corenrn_param_test.gpu == true); + REQUIRE(corenrn_param_test.gpu == true); #else - BOOST_CHECK(corenrn_param_test.gpu == false); + REQUIRE(corenrn_param_test.gpu == false); #endif - BOOST_CHECK(corenrn_param_test.dt_io == 0.2); + REQUIRE(corenrn_param_test.dt_io == 0.2); - BOOST_CHECK(corenrn_param_test.forwardskip == 0.02); + REQUIRE(corenrn_param_test.forwardskip == 0.02); - BOOST_CHECK(corenrn_param_test.celsius == 25.12); + REQUIRE(corenrn_param_test.celsius == 25.12); - BOOST_CHECK(corenrn_param_test.mpi_enable == true); + REQUIRE(corenrn_param_test.mpi_enable == true); - BOOST_CHECK(corenrn_param_test.cell_interleave_permute == 2); + REQUIRE(corenrn_param_test.cell_interleave_permute == 2); - BOOST_CHECK(corenrn_param_test.voltage == -32); + REQUIRE(corenrn_param_test.voltage == -32); - BOOST_CHECK(corenrn_param_test.nwarp == 8); + REQUIRE(corenrn_param_test.nwarp == 8); - BOOST_CHECK(corenrn_param_test.multisend == true); + REQUIRE(corenrn_param_test.multisend == true); - BOOST_CHECK(corenrn_param_test.mindelay == 0.1); + REQUIRE(corenrn_param_test.mindelay == 0.1); - BOOST_CHECK(corenrn_param_test.ms_phases == 1); + REQUIRE(corenrn_param_test.ms_phases == 1); - BOOST_CHECK(corenrn_param_test.ms_subint == 2); + REQUIRE(corenrn_param_test.ms_subint == 2); - BOOST_CHECK(corenrn_param_test.spkcompress == 32); + REQUIRE(corenrn_param_test.spkcompress == 32); - BOOST_CHECK(corenrn_param_test.multisend == true); + REQUIRE(corenrn_param_test.multisend == true); // Reset all parameters to their default values. corenrn_param_test.reset(); // Should match a default-constructed set of parameters. - BOOST_CHECK_EQUAL(corenrn_param_test.voltage, corenrn_parameters{}.voltage); + REQUIRE(corenrn_param_test.voltage == corenrn_parameters{}.voltage); // Everything has its default value, and the first `false` says not to // include default values in the output, so this should be empty - BOOST_CHECK(corenrn_param_test.config_to_str(false, false).empty()); + REQUIRE(corenrn_param_test.config_to_str(false, false).empty()); } diff --git a/test/coreneuron/unit/interleave_info/CMakeLists.txt b/test/coreneuron/unit/interleave_info/CMakeLists.txt index cda875eaec..5e16d0760a 100644 --- a/test/coreneuron/unit/interleave_info/CMakeLists.txt +++ b/test/coreneuron/unit/interleave_info/CMakeLists.txt @@ -4,6 +4,6 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(interleave_info_bin check_constructors.cpp) -target_link_libraries(interleave_info_bin coreneuron-unit-test) +target_link_libraries(interleave_info_bin coreneuron-unit-test Catch2::Catch2) add_test(NAME interleave_info_constructor_test COMMAND $) cpp_cc_configure_sanitizers(TARGET interleave_info_bin TEST interleave_info_constructor_test) diff --git a/test/coreneuron/unit/interleave_info/check_constructors.cpp b/test/coreneuron/unit/interleave_info/check_constructors.cpp index 00353072ca..d780f772a4 100644 --- a/test/coreneuron/unit/interleave_info/check_constructors.cpp +++ b/test/coreneuron/unit/interleave_info/check_constructors.cpp @@ -7,42 +7,41 @@ */ #include "coreneuron/permute/cellorder.hpp" -#define BOOST_TEST_MODULE cmdline_interface -#include +#define CATCH_CONFIG_MAIN +#include +#include using namespace coreneuron; -BOOST_AUTO_TEST_CASE(interleave_info_test) { - size_t nwarp = 4; - size_t nstride = 6; +TEST_CASE("interleave_info_test", "[interleave_info]") { + const size_t nwarp = 4; + const size_t nstride = 6; InterleaveInfo info1; - int data1[] = {11, 37, 45, 2, 18, 37, 7, 39, 66, 33}; - size_t data2[] = {111, 137, 245, 12, 118, 237, 199, 278, 458}; + std::array data1 = {11, 37, 45, 2, 18, 37, 7, 39, 66, 33}; + std::array data2 = {111, 137, 245, 12, 118, 237, 199, 278, 458}; info1.nwarp = nwarp; info1.nstride = nstride; // to avoid same values, different sub-array is used to initialize different members - copy_align_array(info1.stridedispl, data1, nwarp + 1); - copy_align_array(info1.stride, data1 + 1, nstride); - copy_align_array(info1.firstnode, data1 + 1, nwarp + 1); - copy_align_array(info1.lastnode, data1 + 1, nwarp + 1); + copy_align_array(info1.stridedispl, data1.data(), nwarp + 1); + copy_align_array(info1.stride, data1.data() + 1, nstride); + copy_align_array(info1.firstnode, data1.data() + 1, nwarp + 1); + copy_align_array(info1.lastnode, data1.data() + 1, nwarp + 1); // check if copy_array works - BOOST_CHECK_NE(info1.firstnode, info1.lastnode); - BOOST_CHECK_EQUAL_COLLECTIONS(info1.firstnode, - info1.firstnode + nwarp + 1, - info1.lastnode, - info1.lastnode + nwarp + 1); + REQUIRE(info1.firstnode != info1.lastnode); + REQUIRE(std::equal( + info1.firstnode, info1.firstnode + nwarp + 1, info1.lastnode, info1.lastnode + nwarp + 1)); - copy_align_array(info1.cellsize, data1 + 4, nwarp); - copy_array(info1.nnode, data2, nwarp); - copy_array(info1.ncycle, data2 + 1, nwarp); - copy_array(info1.idle, data2 + 2, nwarp); - copy_array(info1.cache_access, data2 + 3, nwarp); - copy_array(info1.child_race, data2 + 4, nwarp); + copy_align_array(info1.cellsize, data1.data() + 4, nwarp); + copy_array(info1.nnode, data2.data(), nwarp); + copy_array(info1.ncycle, data2.data() + 1, nwarp); + copy_array(info1.idle, data2.data() + 2, nwarp); + copy_array(info1.cache_access, data2.data() + 3, nwarp); + copy_array(info1.child_race, data2.data() + 4, nwarp); // copy constructor InterleaveInfo info2(info1); @@ -58,27 +57,13 @@ BOOST_AUTO_TEST_CASE(interleave_info_test) { // test few members for (size_t i = 0; i < infos.size(); i++) { - BOOST_CHECK_EQUAL(info1.nwarp, infos[i]->nwarp); - BOOST_CHECK_EQUAL(info1.nstride, infos[i]->nstride); - - BOOST_CHECK_EQUAL_COLLECTIONS(info1.stridedispl, - info1.stridedispl + nwarp + 1, - infos[i]->stridedispl, - infos[i]->stridedispl + nwarp + 1); - - BOOST_CHECK_EQUAL_COLLECTIONS(info1.stride, - info1.stride + nstride, - infos[i]->stride, - infos[i]->stride + nstride); - - BOOST_CHECK_EQUAL_COLLECTIONS(info1.cellsize, - info1.cellsize + nwarp, - infos[i]->cellsize, - infos[i]->cellsize + nwarp); - - BOOST_CHECK_EQUAL_COLLECTIONS(info1.child_race, - info1.child_race + nwarp, - infos[i]->child_race, - infos[i]->child_race + nwarp); + REQUIRE(info1.nwarp == infos[i]->nwarp); + REQUIRE(info1.nstride == infos[i]->nstride); + + REQUIRE( + std::equal(info1.stridedispl, info1.stridedispl + nwarp + 1, infos[i]->stridedispl)); + REQUIRE(std::equal(info1.stride, info1.stride + nstride, infos[i]->stride)); + REQUIRE(std::equal(info1.cellsize, info1.cellsize + nwarp, infos[i]->cellsize)); + REQUIRE(std::equal(info1.child_race, info1.child_race + nwarp, infos[i]->child_race)); } } diff --git a/test/coreneuron/unit/lfp/CMakeLists.txt b/test/coreneuron/unit/lfp/CMakeLists.txt index 34231b9f96..56600060b5 100644 --- a/test/coreneuron/unit/lfp/CMakeLists.txt +++ b/test/coreneuron/unit/lfp/CMakeLists.txt @@ -4,7 +4,7 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(lfp_test_bin lfp.cpp) -target_link_libraries(lfp_test_bin coreneuron-unit-test) +target_link_libraries(lfp_test_bin coreneuron-unit-test Catch2::Catch2) add_test(NAME lfp_test COMMAND $) cpp_cc_configure_sanitizers(TARGET lfp_test_bin TEST lfp_test) set_property( diff --git a/test/coreneuron/unit/lfp/lfp.cpp b/test/coreneuron/unit/lfp/lfp.cpp index 4357f044b6..24d60cf3ec 100644 --- a/test/coreneuron/unit/lfp/lfp.cpp +++ b/test/coreneuron/unit/lfp/lfp.cpp @@ -10,8 +10,8 @@ #include "coreneuron/io/reports/report_event.hpp" #include "coreneuron/mpi/nrnmpi.h" -#define BOOST_TEST_MODULE LFPTest -#include +#define CATCH_CONFIG_MAIN +#include #include @@ -29,7 +29,7 @@ double integral(F f, double a, double b, int n) { } -BOOST_AUTO_TEST_CASE(LFP_PointSource_LineSource) { +TEST_CASE("LFP_PointSource_LineSource") { #if NRNMPI nrnmpi_init(nullptr, nullptr, false); #endif @@ -74,15 +74,17 @@ BOOST_AUTO_TEST_CASE(LFP_PointSource_LineSource) { // TEST of analytic vs numerical integration std::clog << "ANALYTIC line source " << analytic_circling_lfp << " vs NUMERIC line source LFP " << numeric_circling_lfp << "\n"; - BOOST_REQUIRE_CLOSE(analytic_circling_lfp, numeric_circling_lfp, 1.0e-6); + REQUIRE(Approx(analytic_circling_lfp).margin(1.0e-6) == numeric_circling_lfp); // TEST of LFP Flooring - BOOST_REQUIRE((approaching_elec[1] < 0.866e-6) ? analytic_approaching_lfp == 1.0e6 : true); + if (approaching_elec[1] < 0.866e-6) { + REQUIRE(analytic_approaching_lfp == 1.0e6); + } vals[k] = analytic_circling_lfp; } // TEST of SYMMETRY of LFP FORMULA for (size_t k = 0; k < 5; k++) { - BOOST_REQUIRE(std::abs((vals[k] - vals[k + 5]) / - std::max(std::abs(vals[k]), std::abs(vals[k + 5]))) < 1.0e-12); + REQUIRE(std::abs((vals[k] - vals[k + 5]) / + std::max(std::abs(vals[k]), std::abs(vals[k + 5]))) < 1.0e-12); } std::vector> segments_starts = {{0., 0., 1.}, {0., 0., 0.5}, @@ -102,15 +104,18 @@ BOOST_AUTO_TEST_CASE(LFP_PointSource_LineSource) { segments_starts, segments_ends, radii, indices, electrodes, 1.0); lfpp.template lfp>({0.0, 1.0, 2.0, 3.0}); std::vector res_point_source = lfpp.lfp_values(); - BOOST_REQUIRE_CLOSE(res_line_source[0], res_point_source[0], 1.0); - BOOST_REQUIRE_CLOSE(res_line_source[1], res_point_source[1], 1.0); + REQUIRE(res_line_source[0] == Approx(res_point_source[0]).margin(1.0)); + REQUIRE(res_line_source[1] == Approx(res_point_source[1]).margin(1.0)); #if NRNMPI nrnmpi_finalize(); #endif } #ifdef ENABLE_SONATA_REPORTS -BOOST_AUTO_TEST_CASE(LFP_ReportEvent) { +#define CATCH_CONFIG_MAIN +#include + +TEST_CASE("LFP_ReportEvent") { const std::string report_name = "compartment_report"; const std::vector gids = {42, 134}; const std::vector segment_ids = {0, 1, 2, 3, 4}; @@ -123,18 +128,17 @@ BOOST_AUTO_TEST_CASE(LFP_ReportEvent) { for (const auto& gid: gids) { mapinfo->mappingvec.push_back(new CellMapping(gid)); for (const auto& segment: segment_ids) { - mapinfo->mappingvec.back()->add_segment_lfp_factor(segment, - {segment + 1.0, segment + 2.0}); + std::vector lfp_factors{segment + 1.0, segment + 2.0}; + mapinfo->mappingvec.back()->add_segment_lfp_factor(segment, lfp_factors); } } mapinfo->prepare_lfp(); // Total number of electrodes 2 gids * 2 factors - BOOST_REQUIRE_EQUAL(mapinfo->_lfp.size(), 4); CellMapping* c42 = mapinfo->mappingvec[0]; CellMapping* c134 = mapinfo->mappingvec[1]; - BOOST_REQUIRE_EQUAL(c42->lfp_factors.size(), 5); - BOOST_REQUIRE_EQUAL(c134->num_electrodes(), 2); + REQUIRE(c42->lfp_factors.size() == 5); + REQUIRE(c134->num_electrodes() == 2); // Pass _lfp variable to vars_to_report size_t offset_lfp = 0; @@ -175,8 +179,8 @@ BOOST_AUTO_TEST_CASE(LFP_ReportEvent) { ReportEvent event(dt, tstart, vars_to_report, report_name.data(), report_dt, report_type); event.lfp_calc(&nt); - BOOST_REQUIRE_CLOSE(mapinfo->_lfp[0], 5.5, 1.0); - BOOST_REQUIRE_CLOSE(mapinfo->_lfp[3], 7.0, 1.0); + REQUIRE(mapinfo->_lfp[0] == Approx(5.5).margin(1.0)); + REQUIRE(mapinfo->_lfp[3] == Approx(7.0).margin(1.0)); delete mapinfo; delete nt.nrn_fast_imem; diff --git a/test/coreneuron/unit/queueing/CMakeLists.txt b/test/coreneuron/unit/queueing/CMakeLists.txt index 05b2a12f2a..08250a479b 100644 --- a/test/coreneuron/unit/queueing/CMakeLists.txt +++ b/test/coreneuron/unit/queueing/CMakeLists.txt @@ -4,6 +4,6 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(queuing_test_bin test_queueing.cpp) -target_link_libraries(queuing_test_bin coreneuron-unit-test) +target_link_libraries(queuing_test_bin coreneuron-unit-test Catch2::Catch2) add_test(NAME queuing_test COMMAND $) cpp_cc_configure_sanitizers(TARGET queuing_test_bin TEST queuing_test) diff --git a/test/coreneuron/unit/queueing/test_queueing.cpp b/test/coreneuron/unit/queueing/test_queueing.cpp index 4c6e08a0d8..6f6ef6739a 100644 --- a/test/coreneuron/unit/queueing/test_queueing.cpp +++ b/test/coreneuron/unit/queueing/test_queueing.cpp @@ -8,8 +8,8 @@ #include "coreneuron/network/netcvode.hpp" #include "coreneuron/network/tqueue.hpp" -#define BOOST_TEST_MODULE QueueingTest -#include +#define CATCH_CONFIG_MAIN +#include #include #include @@ -17,7 +17,7 @@ using namespace coreneuron; // UNIT TESTS -BOOST_AUTO_TEST_CASE(priority_queue_nq_dq) { +TEST_CASE("priority_queue_nq_dq") { TQueue tq = TQueue(); const int num = 8; int cnter = 0; @@ -25,7 +25,7 @@ BOOST_AUTO_TEST_CASE(priority_queue_nq_dq) { for (int i = 0; i < num; ++i) tq.insert(static_cast(i), NULL); - BOOST_CHECK(tq.pq_que_.size() == (num - 1)); + REQUIRE(tq.pq_que_.size() == (num - 1)); // dequeue items with time <= 5.0. Should be 6 events: from 0. to 5. TQItem* item = NULL; @@ -33,8 +33,8 @@ BOOST_AUTO_TEST_CASE(priority_queue_nq_dq) { ++cnter; delete item; } - BOOST_CHECK(cnter == 6); - BOOST_CHECK(tq.pq_que_.size() == (num - 6 - 1)); + REQUIRE(cnter == 6); + REQUIRE(tq.pq_que_.size() == (num - 6 - 1)); // dequeue the rest while ((item = tq.atomic_dq(8.0)) != NULL) { @@ -42,12 +42,12 @@ BOOST_AUTO_TEST_CASE(priority_queue_nq_dq) { delete item; } - BOOST_CHECK(cnter == num); - BOOST_CHECK(tq.pq_que_.empty()); - BOOST_CHECK(tq.least() == NULL); + REQUIRE(cnter == num); + REQUIRE(tq.pq_que_.empty()); + REQUIRE(tq.least() == NULL); } -BOOST_AUTO_TEST_CASE(tqueue_ordered_test) { +TEST_CASE("tqueue_ordered_test") { TQueue tq = TQueue(); const int num = 10; int cnter = 0; @@ -63,41 +63,41 @@ BOOST_AUTO_TEST_CASE(tqueue_ordered_test) { TQItem* item = NULL; // dequeue all items and check that previous item time <= current item time while ((item = tq.atomic_dq(10.0)) != NULL) { - BOOST_CHECK(time <= item->t_); + REQUIRE(time <= item->t_); ++cnter; time = item->t_; delete item; } - BOOST_CHECK(cnter == num); - BOOST_CHECK(tq.pq_que_.empty()); - BOOST_CHECK(tq.least() == NULL); + REQUIRE(cnter == num); + REQUIRE(tq.pq_que_.empty()); + REQUIRE(tq.least() == NULL); } -BOOST_AUTO_TEST_CASE(tqueue_move_nolock) {} +TEST_CASE("tqueue_move_nolock") {} -BOOST_AUTO_TEST_CASE(tqueue_remove) {} +TEST_CASE("tqueue_remove") {} -BOOST_AUTO_TEST_CASE(threaddata_interthread_send) { +TEST_CASE("threaddata_interthread_send") { NetCvodeThreadData nt{}; const size_t num = 6; for (size_t i = 0; i < num; ++i) nt.interthread_send(static_cast(i), NULL, NULL); - BOOST_CHECK(nt.inter_thread_events_.size() == num); + REQUIRE(nt.inter_thread_events_.size() == num); } /* -BOOST_AUTO_TEST_CASE(threaddata_enqueue){ +TEST_CASE(threaddata_enqueue){ NetCvode n = NetCvode(); const int num = 6; for(int i = 0; i < num; ++i) n.p[1].interthread_send(static_cast(i), NULL, NULL); - BOOST_CHECK(n.p[1].inter_thread_events_.size() == num); + REQUIRE(n.p[1].inter_thread_events_.size() == num); //enqueue the inter_thread_events_ n.p[1].enqueue(&n, &(n.p[1])); - BOOST_CHECK(n.p[1].inter_thread_events_.empty()); - BOOST_CHECK(n.p[1].tqe_->pq_que_.size() == num); + REQUIRE(n.p[1].inter_thread_events_.empty()); + REQUIRE(n.p[1].tqe_->pq_que_.size() == num); //cleanup priority queue TQItem* item = NULL; diff --git a/test/coreneuron/unit/solver/CMakeLists.txt b/test/coreneuron/unit/solver/CMakeLists.txt index f8bc522879..74af549094 100644 --- a/test/coreneuron/unit/solver/CMakeLists.txt +++ b/test/coreneuron/unit/solver/CMakeLists.txt @@ -4,6 +4,6 @@ # See top-level LICENSE file for details. # ============================================================================= add_executable(test-solver test_solver.cpp) -target_link_libraries(test-solver coreneuron-unit-test) +target_link_libraries(test-solver coreneuron-unit-test Catch2::Catch2) add_test(NAME test-solver COMMAND $) cpp_cc_configure_sanitizers(TARGET test-solver TEST test-solver) diff --git a/test/coreneuron/unit/solver/test_solver.cpp b/test/coreneuron/unit/solver/test_solver.cpp index 52d4c3e7de..dcdd93d6f7 100644 --- a/test/coreneuron/unit/solver/test_solver.cpp +++ b/test/coreneuron/unit/solver/test_solver.cpp @@ -11,8 +11,8 @@ #include "coreneuron/permute/node_permute.h" #include "coreneuron/sim/multicore.hpp" -#define BOOST_TEST_MODULE CoreNEURON solver -#include +#define CATCH_CONFIG_MAIN +#include #include #include @@ -22,8 +22,6 @@ #include using namespace coreneuron; -namespace utf = boost::unit_test; - struct SolverData { std::vector d, rhs; @@ -158,15 +156,15 @@ struct SetupThreads { } // Check we didn't mess up populating any parent indices for (auto i = 0; i < nt.end; ++i) { - BOOST_REQUIRE(parent_indices[i] != magic_index_value); + REQUIRE(parent_indices[i] != magic_index_value); // Root nodes should come first for --cell-permute=0 if (i < nt.ncell) { - BOOST_REQUIRE(parent_indices[i] == -1); + REQUIRE(parent_indices[i] == -1); } } if (interleave_permute_type) { nt._permute = interleave_order(nt.id, nt.ncell, nt.end, parent_indices); - BOOST_REQUIRE(nt._permute); + REQUIRE(nt._permute); permute_data(vec_a, nt.end, nt._permute); permute_data(vec_b, nt.end, nt._permute); // This isn't done in CoreNEURON because these are reset every @@ -191,8 +189,8 @@ struct SetupThreads { std::cout << "\n...no more warnings expected" << std::endl; } // Make sure we produced the number of cells we were aiming for - BOOST_REQUIRE(total_cells == config.num_cells); - BOOST_REQUIRE(num_cells_remaining == 0); + REQUIRE(total_cells == config.num_cells); + REQUIRE(num_cells_remaining == 0); } ~SetupThreads() { @@ -236,9 +234,9 @@ struct SetupThreads { } delete[] inv_permute; for (auto i = 0; i < nt.end; ++i) { - BOOST_REQUIRE(sd.d[i] != magic_double_value); - BOOST_REQUIRE(sd.parent_index[i] != magic_index_value); - BOOST_REQUIRE(sd.rhs[i] != magic_double_value); + REQUIRE(sd.d[i] != magic_double_value); + REQUIRE(sd.parent_index[i] != magic_index_value); + REQUIRE(sd.rhs[i] != magic_double_value); } } return ret; @@ -285,24 +283,21 @@ void compare_solver_data( // CellPermute0_CPU is the simplest version of the solver, it should always // be present and it's a good reference to use constexpr auto ref_impl = SolverImplementation::CellPermute0_CPU; - BOOST_REQUIRE(solver_data.find(ref_impl) != solver_data.end()); + REQUIRE(solver_data.find(ref_impl) != solver_data.end()); auto const& ref_data = solver_data.at(ref_impl); for (auto const& [impl, impl_data]: solver_data) { // Must have compatible numbers of threads. - BOOST_REQUIRE(impl_data.size() == ref_data.size()); + REQUIRE(impl_data.size() == ref_data.size()); std::cout << "Comparing " << impl << " to " << ref_impl << std::endl; for (auto n_thread = 0ul; n_thread < impl_data.size(); ++n_thread) { // Must have compatible numbers of segments/data entries - BOOST_REQUIRE(impl_data[n_thread].d.size() == ref_data[n_thread].d.size()); - BOOST_REQUIRE(impl_data[n_thread].parent_index.size() == - ref_data[n_thread].parent_index.size()); - BOOST_REQUIRE(impl_data[n_thread].rhs.size() == ref_data[n_thread].rhs.size()); - BOOST_TEST(impl_data[n_thread].d == ref_data[n_thread].d, - boost::test_tools::per_element()); - BOOST_TEST(impl_data[n_thread].parent_index == ref_data[n_thread].parent_index, - boost::test_tools::per_element()); - BOOST_TEST(impl_data[n_thread].rhs == ref_data[n_thread].rhs, - boost::test_tools::per_element()); + REQUIRE(impl_data[n_thread].d.size() == ref_data[n_thread].d.size()); + REQUIRE(impl_data[n_thread].parent_index.size() == + ref_data[n_thread].parent_index.size()); + REQUIRE(impl_data[n_thread].rhs.size() == ref_data[n_thread].rhs.size()); + CHECK_THAT(impl_data[n_thread].d, Catch::Approx(ref_data[n_thread].d)); + REQUIRE(impl_data[n_thread].parent_index == ref_data[n_thread].parent_index); + CHECK_THAT(impl_data[n_thread].rhs, Catch::Approx(ref_data[n_thread].rhs)); } } } @@ -321,39 +316,38 @@ auto compare_all_active_implementations(Args&&... args) { // from the pseudorandom seeded tests. constexpr double default_tolerance = 2e-11; -// May need to add some different tolerances here -BOOST_AUTO_TEST_CASE(SingleCellAndThread, *utf::tolerance(default_tolerance)) { +TEST_CASE("SingleCellAndThread", "[solver][single-thread]") { constexpr std::size_t segments = 32; ToyModelConfig config{}; config.num_segments_per_cell = segments; auto const solver_data = compare_all_active_implementations(config); for (auto const& [impl, data]: solver_data) { - BOOST_REQUIRE(data.size() == 1); // nthreads - BOOST_REQUIRE(data[0].d.size() == segments); - BOOST_REQUIRE(data[0].parent_index.size() == segments); - BOOST_REQUIRE(data[0].rhs.size() == segments); + REQUIRE(data.size() == 1); // nthreads + REQUIRE(data[0].d.size() == segments); + REQUIRE(data[0].parent_index.size() == segments); + REQUIRE(data[0].rhs.size() == segments); } } -BOOST_AUTO_TEST_CASE(UnbalancedCellSingleThread, *utf::tolerance(default_tolerance)) { +TEST_CASE("UnbalancedCellSingleThread", "[solver][single-thread]") { ToyModelConfig config{}; config.num_segments_per_cell = 19; // not a nice round number compare_all_active_implementations(config); } -BOOST_AUTO_TEST_CASE(LargeCellSingleThread, *utf::tolerance(default_tolerance)) { +TEST_CASE("LargeCellSingleThread", "[solver][single-thread]") { ToyModelConfig config{}; config.num_segments_per_cell = 4096; compare_all_active_implementations(config); } -BOOST_AUTO_TEST_CASE(ManySmallCellsSingleThread, *utf::tolerance(default_tolerance)) { +TEST_CASE("ManySmallCellsSingleThread", "[solver][single-thread]") { ToyModelConfig config{}; config.num_cells = 1024; compare_all_active_implementations(config); } -BOOST_AUTO_TEST_CASE(ManySmallCellsMultiThread, *utf::tolerance(default_tolerance)) { +TEST_CASE("ManySmallCellsMultiThread", "[solver][multi-thread]") { ToyModelConfig config{}; config.num_cells = 1024; config.num_threads = 2; @@ -379,13 +373,13 @@ auto random_config() { return config; } -BOOST_AUTO_TEST_CASE(LargeCellSingleThreadRandom, *utf::tolerance(default_tolerance)) { +TEST_CASE("LargeCellSingleThreadRandom", "[solver][single-thread][random]") { auto config = random_config(); config.num_segments_per_cell = 4096; compare_all_active_implementations(config); } -BOOST_AUTO_TEST_CASE(ManySmallCellsSingleThreadRandom, *utf::tolerance(default_tolerance)) { +TEST_CASE("ManySmallCellsSingleThreadRandom", "[solver][single-thread][random]") { auto config = random_config(); config.num_cells = 1024; compare_all_active_implementations(config); diff --git a/test/cover/test_netcvode.py b/test/cover/test_netcvode.py index 073c7c6142..19ac3f2612 100644 --- a/test/cover/test_netcvode.py +++ b/test/cover/test_netcvode.py @@ -83,7 +83,7 @@ def node(): def ev(*arg): print("ev t=%g v=%g x=%g nc.x=%g" % (h.t, s(0.5).v, src.x, nc.x)) ref_t, ref_x = results[arg[0]][arg[1]] - assert h.t == ref_t + assert math.isclose(h.t, ref_t, rel_tol=1e-15) assert math.isclose(src.x, ref_x, rel_tol=1e-13) def run(): diff --git a/test/datahandle/opaque_token.mod b/test/datahandle/opaque_token.mod new file mode 100644 index 0000000000..95c5b8ca78 --- /dev/null +++ b/test/datahandle/opaque_token.mod @@ -0,0 +1,16 @@ +COMMENT + In mechanism libraries, cannot use + auto const token = nrn_ensure_model_data_are_sorted(); + because the reference is incomplete (from include/neuron/model_data_fwd.hpp). + So use an opaque version of the reference if need to call a function with + an argument of type neuron::model_sorted_token const& +ENDCOMMENT + +NEURON { SUFFIX nothing } + +PROCEDURE opaque_advance(){ + VERBATIM + auto const token = nrn_ensure_model_data_are_sorted_opaque(); + nrn_fixed_step(token); + ENDVERBATIM +} diff --git a/test/datahandle/ptr.mod b/test/datahandle/ptr.mod new file mode 100644 index 0000000000..fa6f1538a3 --- /dev/null +++ b/test/datahandle/ptr.mod @@ -0,0 +1,14 @@ +NEURON { + POINT_PROCESS Pnt + POINTER p +} + +ASSIGNED { + p +} + +PROCEDURE pr() { + printf("*p = %g\n", p) +} + + diff --git a/test/datahandle/test_1.py b/test/datahandle/test_1.py new file mode 100644 index 0000000000..f85feb180f --- /dev/null +++ b/test/datahandle/test_1.py @@ -0,0 +1,198 @@ +from neuron import h +from neuron.expect_hocerr import expect_err, set_quiet + +pc = h.ParallelContext() + + +def test_1(): + a = h.Section(name="axon") + a.nseg = 5 + rv = {seg.x: seg._ref_v for seg in a.allseg()} + + def set(val): + for seg in a.allseg(): + seg.v = val + 10.0 * seg.x + + def cmp(val): + set(val) + # test evaluation + for x in rv: + assert a(x).v == rv[x][0] + # test assignment + x = 0.5 + y = a(x).v * 2.0 + rv[x][0] = y + assert a(x).v == y + + cmp(10) + + a.nseg *= 3 # simulations now 9 times more accurate spatially + cmp(20) + + a.nseg = 5 + cmp(30) + + +def test_2(): + a = h.Section(name="axon") + a.nseg = 5 + rv = {seg.x: seg._ref_v for seg in a.allseg()} + h.finitialize(-65) + assert rv[0.3][0] == -65.0 + expect_err("print(rv[0.3][1])") + expect_err("rv[0.3][1] = 50.0") + a.nseg = 3 + print(rv[0.3]) + expect_err("print(rv[0.3][0])") + expect_err("rv[0.3][0] = 0.1") + assert rv[0.5][0] == -65.0 + del a + expect_err("print(rv[0.5][0])") + + del rv + locals() + + +def test_3(): + a = h.Section(name="axon") + a.nseg = 5 + pv = a(0.1)._ref_v + a.nseg = 1 + v = h.Vector() + expect_err("v.record(pv, v, sec=a)") + del v, a + locals() + + +def test_4(): + a = h.Section(name="axon") + pnt = h.Pnt(0.5) + expect_err("print(pnt.p)") + expect_err("pnt.p = 5.0") + pnt._ref_p = a(0.5)._ref_v + a.v = 25 + print(pnt.p, a.v) + print(pnt.p, a.v) + print(pnt.p, a.v) + a.nseg = 5 + pnt._ref_p = a(0.1)._ref_v + print(pnt.p, a(0.1)._ref_v) + a.nseg = 1 + expect_err("print(pnt.p, a(0.1).v)") + + del pnt, a + locals() + + +def test_py2n_component(): + print("test py2n_component use of nrnpy_hoc_pop()") + + a = h.Section(name="axon") + a.nseg = 5 + a.v = 50.0 + + class Pntr: + def __init__(self, ref): + self.p = ref + self.bas = [25] + + def foo(self, bar, bas): + print(bar, bas) + + p = Pntr(a(0.1)._ref_v) + assert p.p[0] == a(0.1).v + print("p.p", p.p) + h( + r""" +proc tst_py2n_component1() { + nseg = $3 + $o1.foo(&$&2, 1) +} + +func tst_py2n_component2_nseg() { + printf("inside tst_py2n_component2_nseg\n") + nseg = $1 + return nseg +} + +proc tst_py2n_component2() { + printf("nseg = %d\n", nseg) + nseg = 1 + $o1.foo(&$&2, tst_py2n_component2_nseg(1)) + printf("nseg = %d\n", nseg) +} + +proc tst_py2n_component3() { + nseg = 1 + printf("%g\n",$&1) +} + +""" + ) + + a.nseg = 5 + h.tst_py2n_component1(p, a(0.1)._ref_v, 5, sec=a) + vref = a(0.1)._ref_v + print("Expect: arg 1 error: Invalid data handle") + expect_err("h.tst_py2n_component1(p, vref, 1, sec=a)") + print("Expect: Invalid pointer (arg 1)") + expect_err("h.tst_py2n_component1(p, vref, 1, sec=a)") + + a.nseg = 5 + print("Expect: arg 1 error: Invalid data handle") + expect_err("h.tst_py2n_component2(p, a(.1)._ref_v, sec=a)") + + a.nseg = 5 + print("Expect: hoc argument 1 is an invalid datahandle") + expect_err("h.tst_py2n_component3(a(.1)._ref_v, sec=a)") + + del p, vref, a + locals() + print("leaving test_py2n_component") + + +def test_hocobj_getitem(): + v = h.Vector(5).indgen() + assert v.x[2] == 2.0 # will call nrnpy_hoc_pxpop when VAR on stack + # There appears no way to do that with a datahandle though + a = h.Section(name="axon") + h("objref test_hocobj_getitem_vref") + h.test_hocobj_getitem_vref = a(0.5)._ref_v + assert h.test_hocobj_getitem_vref is None + + +def test_array_dim_change(): + # not really a datapointer test, but... + print("declare double array_dim_change[5][2]") + h("double array_dim_change[5][2]") + x2 = h.array_dim_change[3] + print("declare double array_dim_change[10]") + h("double array_dim_change[10]") + expect_err("print(x2[1])") + + print("declare double array_dim_change[5][2]") + h("double array_dim_change[5][2]") + x2 = h.array_dim_change[4] + print("x2 is ", x2) + print("declare double array_dim_change[2][2]") + h("double array_dim_change[2][2]") + print("x2 is ", x2) + expect_err("print('x2[1] is ', x2[1])") + print("x2 is ", x2) + expect_err("x2[1] = 5.0") + + del x2 + locals() + print("leaving test_array_dim_change") + + +if __name__ == "__main__": + set_quiet(False) + test_1() + test_2() + test_3() + test_4() + test_py2n_component() + test_hocobj_getitem() + test_array_dim_change() + h.topology() diff --git a/test/datahandle/test_token.py b/test/datahandle/test_token.py new file mode 100644 index 0000000000..0a6fa26859 --- /dev/null +++ b/test/datahandle/test_token.py @@ -0,0 +1,36 @@ +from neuron import h + +# model - membrane action potential +def model(): + s = h.Section() + s.L = s.diam = h.sqrt(100.0 / h.PI) + s.insert("hh") + ic = h.IClamp(s(0.5)) + ic.delay = 0.5 + ic.dur = 0.1 + ic.amp = 0.3 + return s, ic + + +def run(method): + h.finitialize(-65.0) + while h.t < 1.0: + method() + + +def test_run(): + s, ic = model() + tvec = h.Vector().record(h._ref_t, sec=s) + vvec = h.Vector().record(s(0.5)._ref_v, sec=s) + + run(h.fadvance) + tstd = tvec.c() + vstd = vvec.c() + + run(h.opaque_advance) + assert tvec.eq(tstd) + assert vvec.eq(vstd) + + +if __name__ == "__main__": + test_run() diff --git a/test/external/CMakeLists.txt b/test/external/CMakeLists.txt index cb6423c4c9..7a9c8f3e63 100644 --- a/test/external/CMakeLists.txt +++ b/test/external/CMakeLists.txt @@ -19,7 +19,7 @@ FetchContent_Declare( FetchContent_Declare( nrntest GIT_REPOSITORY https://github.com/neuronsimulator/nrntest - GIT_TAG a669e7172b3fd4210731b29870d22fb267b96bfb + GIT_TAG 8f7cf8f9301bfef386601309747e2440a3c11830 SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/tests/nrntest) FetchContent_Declare( @@ -53,8 +53,8 @@ endif() if("channel-benchmark" IN_LIST NRN_ENABLE_MODEL_TESTS) FetchContent_Declare( channel-benchmark - GIT_REPOSITORY git@bbpgitlab.epfl.ch:hpc/channel-benchmark.git - GIT_TAG 85e282834ec04c48adbd4f38d04bbbce6fb99afc + GIT_REPOSITORY https://github.com/bluebrain/nmodlbench.git + GIT_TAG 2313db91599bcdd83e4291ab508d1e4474e87f25 SOURCE_DIR ${PROJECT_SOURCE_DIR}/external/tests/channel-benchmark) FetchContent_MakeAvailable(channel-benchmark) add_subdirectory(channel-benchmark) diff --git a/test/external/olfactory-bulb-3d/CMakeLists.txt b/test/external/olfactory-bulb-3d/CMakeLists.txt index 0c143949a2..6fb6990cce 100644 --- a/test/external/olfactory-bulb-3d/CMakeLists.txt +++ b/test/external/olfactory-bulb-3d/CMakeLists.txt @@ -6,7 +6,7 @@ if(NRN_ENABLE_CORENEURON AND NOT CORENRN_ENABLE_SHARED) set(olfactory_bulb_3d_neuron_args -mpi -python) else() set(preload_sanitizer PRELOAD_SANITIZER) - set(launch_executable ${preload_sanitizer_mpiexec} ${PYTHON_EXECUTABLE}) + set(launch_executable ${preload_sanitizer_mpiexec} ${NRN_DEFAULT_PYTHON_EXECUTABLE}) set(extra_env NEURON_INIT_MPI=1) endif() list(APPEND olfactory_bulb_3d_neuron_args bulb3dtest.py) diff --git a/test/external/ringtest/CMakeLists.txt b/test/external/ringtest/CMakeLists.txt index bf9e2830d0..d082f1fd99 100644 --- a/test/external/ringtest/CMakeLists.txt +++ b/test/external/ringtest/CMakeLists.txt @@ -15,8 +15,8 @@ set(ringtest_prefix ${MPIEXEC_NAME} ${MPIEXEC_NUMPROC_FLAG} ${ringtest_mpi_ranks ${MPIEXEC_OVERSUBSCRIBE} ${MPIEXEC_PREFLAGS}) set(ringtest_special ${ringtest_prefix} special ${MPIEXEC_POSTFLAGS} -mpi -python ringtest.py) set(ringtest_special_core ${ringtest_prefix} special-core ${MPIEXEC_POSTFLAGS}) -set(ringtest_python ${ringtest_prefix} ${preload_sanitizer_mpiexec} ${PYTHON_EXECUTABLE} - ${MPIEXEC_POSTFLAGS} ringtest.py) +set(ringtest_python ${ringtest_prefix} ${preload_sanitizer_mpiexec} + ${NRN_DEFAULT_PYTHON_EXECUTABLE} ${MPIEXEC_POSTFLAGS} ringtest.py) # Step 2 -- add configurations to the group (e.g. here NEURON without MPI) When CoreNEURON is # enabled then TABLE statements are disabled in hh.mod, which causes slight numerical differences in diff --git a/test/external/tqperf/CMakeLists.txt b/test/external/tqperf/CMakeLists.txt index 1345582e29..90fb400b61 100644 --- a/test/external/tqperf/CMakeLists.txt +++ b/test/external/tqperf/CMakeLists.txt @@ -11,7 +11,7 @@ if("tqperf-heavy" IN_LIST NRN_ENABLE_MODEL_TESTS) else() set(tqperf_mpi_ranks 2) endif() -set(python_executable ${preload_sanitizer_mpiexec} ${PYTHON_EXECUTABLE}) +set(python_executable ${preload_sanitizer_mpiexec} ${NRN_DEFAULT_PYTHON_EXECUTABLE}) set(special_executable special) set(python_requires coreneuron_shared) set(special_requires coreneuron) diff --git a/test/gjtests/test_natrans.py b/test/gjtests/test_natrans.py index 0dbcd8bb47..53077d1cd5 100644 --- a/test/gjtests/test_natrans.py +++ b/test/gjtests/test_natrans.py @@ -75,7 +75,6 @@ def test_natrans(): target.sgid = sgid cvode = h.CVode() - cvode.cache_efficient(1) pc.set_maxstep(10) pc.setup_transfer() diff --git a/test/hoctests/rand.mod b/test/hoctests/rand.mod new file mode 100644 index 0000000000..a0147a2291 --- /dev/null +++ b/test/hoctests/rand.mod @@ -0,0 +1,43 @@ +NEURON { + SUFFIX rantst + RANGE x1, x2 + RANDOM ran1, ran2 +} + +ASSIGNED { + x1 x2 +} + +INITIAL { + random_setseq(ran1, 5) + random_setseq(ran2, 5) + x1 = random_uniform(ran1) +} + +BEFORE STEP { + x2 = random_normal(ran2) +} + +FUNCTION uniform0() { + uniform0 = random_uniform(ran1) +} + +FUNCTION uniform2(min, max) { + uniform2 = random_uniform(ran1, min, max) +} + +FUNCTION negexp0() { + negexp0 = random_negexp(ran1) +} + +FUNCTION negexp1(mean) { + negexp1 = random_negexp(ran1, mean) +} + +FUNCTION normal0() { + normal0 = random_normal(ran1) +} + +FUNCTION normal2(mean, std) { + normal2 = random_normal(ran1, mean, std) +} diff --git a/test/hoctests/rand_art.mod b/test/hoctests/rand_art.mod new file mode 100644 index 0000000000..bdeac87022 --- /dev/null +++ b/test/hoctests/rand_art.mod @@ -0,0 +1,50 @@ +NEURON { + ARTIFICIAL_CELL RanArt + RANGE x1, x2, mean + RANDOM ran1, ran2 +} + +PARAMETER { mean = .1 (ms) } + +ASSIGNED { + x1 x2 +} + +INITIAL { + random_setseq(ran1, 5) + random_setseq(ran2, 5) + x1 = random_uniform(ran1) + net_send(mean*negexp0(), 1) +} + +NET_RECEIVE(w){ + if (flag == 1) { + net_send(mean*negexp0(), 1) + net_event(t) + x2 = t + } +} + +FUNCTION uniform0() { + uniform0 = random_uniform(ran1) +} + +FUNCTION uniform2(min, max) { + uniform2 = random_uniform(ran1, min, max) +} + +FUNCTION negexp0() { + negexp0 = random_negexp(ran1) +} + +FUNCTION negexp1(mean) { + negexp1 = random_negexp(ran1, mean) +} + +FUNCTION normal0() { + normal0 = random_normal(ran1) +} + +FUNCTION normal2(mean, std) { + normal2 = random_normal(ran1, mean, std) +} diff --git a/test/hoctests/rand_pp.mod b/test/hoctests/rand_pp.mod new file mode 100644 index 0000000000..046f8bcf96 --- /dev/null +++ b/test/hoctests/rand_pp.mod @@ -0,0 +1,43 @@ +NEURON { + POINT_PROCESS RanPP + RANGE x1, x2 + RANDOM ran1, ran2 +} + +ASSIGNED { + x1 x2 +} + +INITIAL { + random_setseq(ran1, 5) + random_setseq(ran2, 5) + x1 = random_uniform(ran1) +} + +BEFORE STEP { + x2 = random_normal(ran2) +} + +FUNCTION uniform0() { + uniform0 = random_uniform(ran1) +} + +FUNCTION uniform2(min, max) { + uniform2 = random_uniform(ran1, min, max) +} + +FUNCTION negexp0() { + negexp0 = random_negexp(ran1) +} + +FUNCTION negexp1(mean) { + negexp1 = random_negexp(ran1, mean) +} + +FUNCTION normal0() { + normal0 = random_normal(ran1) +} + +FUNCTION normal2(mean, std) { + normal2 = random_normal(ran1, mean, std) +} diff --git a/test/hoctests/sdata.inc b/test/hoctests/sdata.inc new file mode 100644 index 0000000000..b4b0284b01 --- /dev/null +++ b/test/hoctests/sdata.inc @@ -0,0 +1,98 @@ +: Tests for exploring setdata +NEURON { + RANGE a, b, c + POINTER p +} + +PARAMETER { + a = 0 + b = 1 + c[3] +} + +ASSIGNED { p } + +INITIAL { + a = 1 + b = 2 + c[1] = 3 +} + +PROCEDURE AA() { : prior to A to get some extra lines of coverage + A(1,2,3) +} + +PROCEDURE A(x,y,z) { + a = x + b = y + c[1] = z +} + +PROCEDURE Aexp() { + printf("exp(a) is %g\n", exp(a)) +} + +PROCEDURE Aexp2(x) { + LOCAL a + a = x + printf("exp(%g) is %g\n", a, exp(a)) +} + +FUNCTION Aexcept(x) { : can test exception +VERBATIM + hoc_execerr_ext("exception in Aexcept, x=%g", _lx); +ENDVERBATIM + Aexcept = x +} + +FUNCTION C(i) { + C = c[i] +} + +PROCEDURE d(x) { + e(x) +} + +PROCEDURE e(x) { + a = x +} + +FUNCTION f(x) { + if (x > 0) { + f = g(x) + }else{ + f = 0 + } +} + +PROCEDURE g(x) { + b = b + f(x-1) +} + +FUNCTION h(x) { +VERBATIM + _lh = _lx*_lx; +ENDVERBATIM +} + +FUNCTION k(x) { + if (x > 0) { + k = k(x-1) + x + } else { + k = 0 + } +} + +PROCEDURE P() { + a = p +} + +FUNCTION foo(x) { + foo = ft(x) +} + +FUNCTION bar() { + bar = ft(a) +} + +FUNCTION_TABLE ft(arg) diff --git a/test/hoctests/sdata.mod b/test/hoctests/sdata.mod new file mode 100644 index 0000000000..1d0758b226 --- /dev/null +++ b/test/hoctests/sdata.mod @@ -0,0 +1,6 @@ +: Tests for exploring setdata +NEURON { + SUFFIX sdata +} + +INCLUDE "sdata.inc" diff --git a/test/hoctests/sdata_pp.mod b/test/hoctests/sdata_pp.mod new file mode 100644 index 0000000000..5169251b66 --- /dev/null +++ b/test/hoctests/sdata_pp.mod @@ -0,0 +1,6 @@ +: Tests for exploring setdata +NEURON { + POINT_PROCESS SData +} + +INCLUDE "sdata.inc" diff --git a/test/hoctests/sdata_ppts.mod b/test/hoctests/sdata_ppts.mod new file mode 100644 index 0000000000..06dac743d1 --- /dev/null +++ b/test/hoctests/sdata_ppts.mod @@ -0,0 +1,7 @@ +: Tests for exploring setdata +NEURON { + THREADSAFE + POINT_PROCESS SDataTS +} + +INCLUDE "sdata.inc" diff --git a/test/hoctests/sdata_ts.mod b/test/hoctests/sdata_ts.mod new file mode 100644 index 0000000000..e33345eb02 --- /dev/null +++ b/test/hoctests/sdata_ts.mod @@ -0,0 +1,7 @@ +: Tests for exploring setdata +NEURON { + THREADSAFE + SUFFIX sdatats +} + +INCLUDE "sdata.inc" diff --git a/test/hoctests/tests/test_cvinterp.py b/test/hoctests/tests/test_cvinterp.py new file mode 100644 index 0000000000..afcbdc2cbb --- /dev/null +++ b/test/hoctests/tests/test_cvinterp.py @@ -0,0 +1,60 @@ +# cvode and ida should interpolate correctly +# Consider a simulation with linearly increasing solution (charging capacitor) +# If during a free running large time step, one requests the value of +# the voltage in the middle of that time step, the voltage should be +# the average of the voltages at the beginning and end of the time step. +# Prior to this PR, IDA failed this test + +from neuron import h + +h.load_file("stdrun.hoc") +import math + + +def model(): + s = h.Section(name="s") + s.L = s.diam = h.sqrt(100.0 / h.PI) + ic = h.IClamp(s(0.5)) + ic.dur = 10 + ic.amp = 0.01 + return s, ic + + +def run(ida): + h.cvode_active(1) + h.cvode.use_daspk(ida) + h.v_init = 0.0 + h.run() + + +def points(vecs): + return [x for x in zip(vecs[0], vecs[1])] + + +def test(): + m = model() + vref = m[0](0.5)._ref_v + freerun = [h.Vector().record(h._ref_t), h.Vector().record(vref)] + for ida in [0, 1]: + run(ida) + n = len(freerun[0]) + std = [v.c() for v in freerun] + pts = points(freerun)[-3:-1] + midpnt = [(pts[0][i] + pts[1][i]) / 2 for i in range(2)] + trec = h.Vector([midpnt[0]]) + rec = [h.Vector().record(h._ref_t, trec), h.Vector().record(vref, trec)] + run(ida) + print(midpnt) + print(points(rec)) + + assert len(freerun[0]) == n # rec does not add to original freerun + for i, s in enumerate(std): # and rec did not affect freerun. + for j, v in enumerate(s): + assert math.isclose(v, freerun[i][j]) + + for i in range(2): + assert math.isclose(midpnt[i], rec[i][0]) + + +if __name__ == "__main__": + test() diff --git a/test/hoctests/tests/test_hocGUI2.py b/test/hoctests/tests/test_hocGUI2.py new file mode 100644 index 0000000000..70598e9d47 --- /dev/null +++ b/test/hoctests/tests/test_hocGUI2.py @@ -0,0 +1,142 @@ +# tests of GUI with hoc variables that go out of scope or move + +from neuron import config, h, gui +from neuron.expect_hocerr import expect_err, set_quiet +import os + +set_quiet(False) + +h( + """ +var1 = 0.0 +proc act1() { print "hoc var1 = ", var1 } +proc actvec() { print "hoc vec.x[0] = ", $o1.x[0] } +proc actcell() {forall {print secname(), " ", g_pas(.5)}} +""" +) + + +class Cell: + def __init__(self, id): + self.id = id + s = self.soma = h.Section(name="soma", cell=self) + s.diam = 10.0 + s.L = 100.0 / h.PI / s(0.5).diam + s.insert("pas") + s.g_pas = 0.001 + s.e_pas = -65.0 + + def __str__(self): + return "Cell_%d" % self.id + + +class GUI: + def __init__(self, cell): + self.cell = cell + self.vec = h.Vector(1) # for hoc ref variable self.vec._ref_x[0] + self.build() + self.map() + + def build(self): + self.box = h.HBox() + self.box.intercept(1) + self.box.ref(self) + + h.xpanel("") + h.xstatebutton("hoc var1", h._ref_var1, "act1()") + h.xcheckbox("hoc var1", h._ref_var1, "act1()") + h.xmenu("hoc var1") + h.xcheckbox("hoc var1", h._ref_var1, "act1()") + h.xmenu() + h.xslider(h._ref_var1, 0, 1, 0, 1) + h.xvalue("hoc var1", h._ref_var1, 1, "act1()") + + h.xstatebutton("vec.x[0]", self.vec._ref_x[0], "actvec(%s)" % self.vec) + h.xcheckbox("vec.x[0]", self.vec._ref_x[0], "actvec(%s)" % self.vec) + h.xmenu("vec.x[0]") + h.xcheckbox("vec.x[0]", self.vec._ref_x[0], "actvec(%s)" % self.vec) + h.xmenu() + h.xslider(self.vec._ref_x[0], 0, 1, 0, 1) + h.xvalue("vec.x[0]", self.vec._ref_x[0], 1, "actvec(%s)" % self.vec) + + h.xstatebutton( + "cell.soma(.5).pas.g", self.cell.soma(0.5).pas._ref_g, "actcell()" + ) + h.xcheckbox("cell.soma(.5).pas.g", self.cell.soma(0.5).pas._ref_g, "actcell()") + h.xmenu("cell.soma(.5).pas.g") + h.xcheckbox("cell.soma(.5).pas.g", self.cell.soma(0.5).pas._ref_g, "actcell()") + h.xmenu() + h.xslider(self.cell.soma(0.5).pas._ref_g, 0, 1, 0, 1) + h.xvalue("cell.soma(.5).pas.g", self.cell.soma(0.5).pas._ref_g, 1, "actcell()") + h.xpanel() + + self.box.intercept(0) + + def map(self): + self.box.map("GUI test with %s" % str(self.cell)) + + def act1(self, vec): + print("hoc vec.x[0] = ", vec.x[0]) + + +def test1(): # statebutton, checkbox, slider + cells = [Cell(i) for i in range(5)] + gui = GUI(cells[3]) + h("delete var1") # does NOT gray out the items + gui.vec = None + gui.cell = None + # sliders do not get grayed out + return gui + + +def test2(): # Graph.xexpr + cells = [Cell(i) for i in range(5)] + for cell in cells: + cell.soma.uninsert("pas") + cell.soma.insert("hh") + ic = cell.ic = h.IClamp(cell.soma(0.5)) + ic.delay = 0.5 + ic.dur = 0.1 + ic.amp = 0.3 + h.newPlotV() + if config.arguments["NRN_ENABLE_INTERVIEWS"] and "DISPLAY" in os.environ: + expect_err('h.graphItem.xexpr("_pysec.Cell_3.soma(0.5).ina", 1)') + h.graphItem.xexpr("_pysec.Cell_3.soma.ina(0.5)", 1) + h.run() + h.graphItem.exec_menu("View = plot") + cells = cells[3] # moves cell[3] to beginning of soa by deleting all others + h.run() + return cells + + +def test_Hinton(): # PlotShape.hinton + class Net: + def __init__(self, netid): + self.netid = netid + self.cells = [ + [Cell(i + 10 * j + 100 * netid) for i in range(10)] for j in range(10) + ] + + net = [Net(i) for i in range(3)] + + def pnet(net): + sl = h.SectionList() # leave empty so only hinton plot + s = h.PlotShape(sl) + for i, cellrow in enumerate(net.cells): + for j, cell in enumerate(cellrow): + s.hinton(cell.soma(0.5)._ref_v, i, j, 1, 1) + cell.soma(0.5).v = i / 10.0 + j / 10.0 + + s.size(-1, 11, -1, 11) + s.scale(0, 2) + return s, sl, net + + rval = pnet(net[1]) + + return rval # net[1] is only remaining Net (in rval) + + +if __name__ == "__main__": + gui = test1() + cells = test2() + net = test_Hinton() diff --git a/test/hoctests/tests/test_kschan.json b/test/hoctests/tests/test_kschan.json new file mode 100644 index 0000000000..911ad4fcd4 --- /dev/null +++ b/test/hoctests/tests/test_kschan.json @@ -0,0 +1,1875 @@ +{ + "khh inserted": "c7fac0a91458c013fc1d958de60c4b82", + "khh same except for na_ion": "dd67b008494c0ad9cc15272e5d4dec5d", + "nahh now": "e347cfdcaf29a795d128c8e6d448ace7", + "cb.ks.pr()": "7086b928209ac8d065c50226b66a9e92", + "nahh cvode=True": [ + [ + 0.0, + 0.00245193773986701, + 0.00490387547973402, + 0.011343926658017558, + 0.017783977836301094, + 0.02422402901458463, + 0.03442215740629855, + 0.05959292637493886, + 0.0717927520065163, + 0.08399257763809374, + 0.09619240326967118, + 0.09999999999999779, + 0.10000000000000112, + 0.10263805068250757, + 0.10527610136501403, + 0.11205936160250875, + 0.11884262184000347, + 0.12562588207749817, + 0.13240914231499287, + 0.13919240255248758, + 0.15226722278104374, + 0.1653420430095999, + 0.17841686323815606, + 0.19149168346671222, + 0.2205861694524643, + 0.24968065543821635, + 0.2768889711481657, + 0.304097286858115, + 0.3313056025680643, + 0.3585139182780136, + 0.37525437399062056, + 0.3919948297032275, + 0.4087352854158345, + 0.42547574112844144, + 0.4422161968410484, + 0.45895665255365536, + 0.4685026085643592, + 0.47804856457506306, + 0.4875945205857669, + 0.49714047659647076, + 0.5066864326071746, + 0.5210660174659459, + 0.5354456023247173, + 0.5498251871834886, + 0.5642047720422599, + 0.5785843569010313, + 0.5929639417598026, + 0.6073435266185739, + 0.6217231114773453, + 0.6361026963361166, + 0.6504822811948879, + 0.6648618660536593, + 0.6792414509124306, + 0.6936210357712019, + 0.7080006206299733, + 0.7223802054887446, + 0.745902837978681, + 0.7694254704686173, + 0.7929481029585537, + 0.81647073544849, + 0.8399933679384264, + 0.8635160004283627, + 0.8870386329182991, + 0.9105612654082355, + 0.9340838978981718, + 0.9576065303881082, + 0.9971074388490605, + 1.0 + ], + [ + -65.0, + -64.26484111153752, + -63.53089400597157, + -61.60862994684805, + -59.693587447610454, + -57.78622684869641, + -54.78162733894752, + -47.44281638250003, + -43.923664068893245, + -40.42547971951952, + -36.94382264194653, + -35.86018255657097, + -35.86018255657097, + -35.90082319848319, + -35.940222689051474, + -36.03568498464055, + -36.1226627732539, + -36.199698266090564, + -36.26566771729424, + -36.31957746558762, + -36.38574444995952, + -36.39748086911035, + -36.34853899987099, + -36.23254921100638, + -35.70317801563712, + -34.74329305521833, + -33.39016154402204, + -31.525963189927655, + -29.06741314493461, + -25.914719167503286, + -23.579832370127882, + -20.906268510967095, + -17.863529577751116, + -14.424432214913185, + -10.570911591208795, + -6.299280611440655, + -3.6819900003844763, + -0.9443230219848133, + 1.8997829803973731, + 4.831465377889435, + 7.8266072797326585, + 12.391377960424135, + 16.914709246024483, + 21.26616111118005, + 25.314373159763356, + 28.945880002554883, + 32.081532129323456, + 34.68490450668356, + 36.761733164052245, + 38.3520430966959, + 39.51802808542601, + 40.33127278420815, + 40.8622889734875, + 41.173818843521396, + 41.31780455634654, + 41.3350255167126, + 41.16764401415044, + 40.83173522931841, + 40.377735615974345, + 39.83391465243114, + 39.21753555745409, + 38.539831235711155, + 37.80808784291536, + 37.027134102231216, + 36.20064198740306, + 35.3319092157213, + 33.786997763595046, + 33.66993536143933 + ] + ], + "kchan with single": "a4600b6156ca96ce9338c37c0f464c8a", + "kchan failed to turn off single": "a4600b6156ca96ce9338c37c0f464c8a", + "kchan without single": "0e6f04378e8456893795c88970e056d3", + "before remove transition": "277655b8ea0d4483a3449f9a1c2c951b", + "after remove transition": "35033bca7e44b8b88945fdc3f71cd23a", + "KSTrans 1<->2 with cai": "a3ee87362e20cb7fafecfdae59305539", + "KSTrans 1<->2 change to cao": "5b1729b60c4f8f5f134498975a70be78", + "KSTrans 1<->2 change to cli": "b41397bb15644a328ee0fb8a0d4a587d", + "KSTrans 1<->2 has no ligand": "292155541558e8ed3e218a9122242562", + "bug? cl_ion not used but still ligand 0": "db46a89fe983ac7a3d30ea05be7a7e39", + "bug? 4 ligands (cl_ion, 2 u238_ion, ca_ion), none in use": "e901ab7ab099e838de6636b72728d2ff", + "nahh cvode=False": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35058176245748, + -43.190787735212204, + -36.118088285063514, + -36.45179206325389, + -36.639683180337215, + -36.638457002083015, + -36.40717873794613, + -35.9065893307845, + -35.097384670820446, + -33.937842305000196, + -32.38113474668251, + -30.37270696340593, + -27.848279175113895, + -24.73347116525542, + -20.946869840499335, + -16.409664074547237, + -11.066410190793526, + -4.92132412488944, + 1.9115414024555495, + 9.162291238688454, + 16.40208546500034, + 23.119453306206903, + 28.86297968027117, + 33.37339782615963, + 36.626014123954135, + 38.775016546643236, + 40.05562683288041, + 40.70252387329813, + 40.907402655512314, + 40.80908948399294, + 40.500713597347094, + 40.0419194886514, + 39.470080601687044, + 38.80863611614157, + 38.07263962395035, + 37.27221551004364, + 36.41462453874912, + 35.505466340289104, + 34.54936780351269 + ] + ], + "hh": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35058176245748, + -43.190787735212204, + -36.118088285063514, + -36.45179206325389, + -36.639683180337215, + -36.638457002083015, + -36.40717873794613, + -35.9065893307845, + -35.097384670820446, + -33.937842305000196, + -32.38113474668251, + -30.37270696340593, + -27.848279175113895, + -24.73347116525542, + -20.946869840499335, + -16.409664074547237, + -11.066410190793526, + -4.92132412488944, + 1.9115414024555495, + 9.162291238688454, + 16.40208546500034, + 23.119453306206903, + 28.86297968027117, + 33.37339782615963, + 36.626014123954135, + 38.775016546643236, + 40.05562683288041, + 40.70252387329813, + 40.907402655512314, + 40.80908948399294, + 40.500713597347094, + 40.0419194886514, + 39.470080601687044, + 38.80863611614157, + 38.07263962395035, + 37.27221551004364, + 36.41462453874912, + 35.505466340289104, + 34.54936780351269 + ] + ], + "nahh vs hh": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35058176245748, + -43.190787735212204, + -36.118088285063514, + -36.45179206325389, + -36.639683180337215, + -36.638457002083015, + -36.40717873794613, + -35.906589330785195, + -35.097384670821135, + -33.9378423050009, + -32.381134746683244, + -30.372706963406717, + -27.848279175111397, + -24.73347116525313, + -20.946869840497147, + -16.409664074553778, + -11.066410190799454, + -4.921324124887538, + 1.9115414024406947, + 9.16229123869197, + 16.402085464998397, + 23.119453306218556, + 28.862979680284397, + 33.37339782615999, + 36.626014123947385, + 38.77501654663574, + 40.05562683287618, + 40.7025238732948, + 40.907402655510545, + 40.80908948399219, + 40.50071359734767, + 40.04191948865277, + 39.470080601688025, + 38.80863611614215, + 38.072639623950714, + 37.2722155100439, + 36.41462453875059, + 35.50546634029127, + 34.54936780351494 + ] + ], + "coarse table": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35057353731332, + -43.19076273919519, + -36.118028197359834, + -36.45167173400669, + -36.63947244128102, + -36.638128246613746, + -36.40670319977631, + -35.90593007950302, + -35.09650942697628, + -33.93671594774348, + -32.37972444552205, + -30.37098184083644, + -27.8462162625112, + -24.731043766261713, + -20.944035487124513, + -16.406381298081506, + -11.062654000808466, + -4.917103112284723, + 1.9161169789007042, + 9.16700494886599, + 16.406639164162556, + 23.123529470149464, + 28.866345208426143, + 33.37596605596701, + 36.6278312215249, + 38.776209648584945, + 40.05634863168854, + 40.70291069895333, + 40.90755915588483, + 40.80909001334211, + 40.500606449434905, + 40.04173507382194, + 39.46983867084725, + 38.80835011480037, + 38.0723175638188, + 37.271862861083356, + 36.4142457058574, + 35.505065323089674, + 34.54894623396785 + ] + ], + "fine table": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35058143790863, + -43.19078651524354, + -36.11808543414796, + -36.45178654560699, + -36.63967374119015, + -36.63844218343882, + -36.40715696227037, + -35.90655919395056, + -35.09734452466214, + -33.93779051399031, + -32.38106968513363, + -30.372626869350217, + -27.84818194699513, + -24.733354587981978, + -20.94673248325976, + -16.4095047429514, + -11.066228101641586, + -4.921120162323548, + 1.9117619297986241, + 9.162517970869775, + 16.40230425318992, + 23.11964938003416, + 28.86314186600052, + 33.3735216799336, + 36.626101836097845, + 38.77507422246805, + 40.0556617534158, + 40.70254261257757, + 40.90741028375061, + 40.80908955731465, + 40.50070848453129, + 40.04191068211437, + 39.47006901959207, + 38.808622348727006, + 38.07262408172849, + 37.27219845537888, + 36.41460619485241, + 35.50544685740321, + 34.54934732153218 + ] + ], + "KSChanTable": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.350581727560694, + -43.190787560770694, + -36.11808787054618, + -36.45179112417501, + -36.63968116757228, + -36.638453183738804, + -36.40717219767617, + -35.906579415530615, + -35.09737066180619, + -33.93782350187764, + -32.381109551183634, + -30.372673868790475, + -27.84823627514335, + -24.733416159613824, + -20.946800496588263, + -16.409578321155717, + -11.066307286554071, + -4.921204578242057, + 1.9116742464128356, + 9.16243062737357, + 16.402221959601523, + 23.11957683357874, + 28.863082631401827, + 33.37347698363254, + 36.62607055881491, + 38.77505400446827, + 40.05564987617797, + 40.70253663471813, + 40.90740833024499, + 40.80909031467276, + 40.50071106595489, + 40.041914542463445, + 39.47007384052239, + 38.80862792360521, + 38.07263025306411, + 37.2722051364123, + 36.414613291685534, + 35.505454321140185, + 34.54935509637728 + ] + ], + "KSChanTable limits": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.33849228491098, + -43.163643068339915, + -36.07097489587372, + -36.37369005447766, + -36.51904941385565, + -36.46351831946079, + -36.16587776241627, + -35.586251558753055, + -34.68418043750873, + -33.41611225852698, + -31.732669691487146, + -29.576118014334263, + -26.87870412696572, + -23.563112518835887, + -19.547188103990457, + -14.756613561048631, + -9.150346455243032, + -2.762216492042823, + 4.247043365888272, + 11.552556791042054, + 18.685252708047464, + 25.13484789672232, + 30.499942004292606, + 34.591448169600156, + 37.423046410577776, + 39.205008775262876, + 40.19845812369661, + 40.63585653566567, + 40.693786757949844, + 40.49425984751048, + 40.11665962682792, + 39.61079324533732, + 39.007407314645505, + 38.32555625855712, + 37.577362159875726, + 36.770934098199, + 35.91209785997389, + 35.00539669639432, + 34.05466009572817 + ] + ], + "kchan without single cvode=True": [ + [ + 0.0, + 0.0027138443813536513, + 0.005427688762707303, + 0.012497719672848798, + 0.019567750582990293, + 0.026637781493131788, + 0.03803632185741194, + 0.055176791278722916, + 0.07231726070003389, + 0.08945773012134486, + 0.09999999999999779, + 0.10000000000000112, + 0.1029099907014636, + 0.10581998140292609, + 0.11305637171381606, + 0.12029276202470604, + 0.12752915233559603, + 0.13476554264648602, + 0.142001932957376, + 0.156152689337463, + 0.17030344571755002, + 0.18445420209763702, + 0.19860495847772403, + 0.22870181004821277, + 0.2587986616187015, + 0.28889551318919027, + 0.318992364759679, + 0.3379262672558148, + 0.3568601697519506, + 0.3757940722480864, + 0.4075885053962256, + 0.4393829385443647, + 0.45712250256401116, + 0.4748620665836576, + 0.49260163060330403, + 0.5103411946229505, + 0.5280807586425968, + 0.5458203226622433, + 0.55809408291673, + 0.5703678431712167, + 0.5826416034257034, + 0.59491536368019, + 0.6071891239346767, + 0.6194628841891634, + 0.6383136675491474, + 0.6508417401965236, + 0.6633698128438998, + 0.675897885491276, + 0.6884259581386523, + 0.7009540307860285, + 0.7134821034334047, + 0.7260101760807809, + 0.7586374237787784, + 0.7774648679384102, + 0.796292312098042, + 0.8151197562576737, + 0.8339472004173055, + 0.8527746445769373, + 0.8716020887365691, + 0.8904295328962009, + 0.9189698572512912, + 0.934101671072843, + 0.9492334848943949, + 1.0 + ], + [ + -65.0, + -64.18646893974743, + -63.37442173683355, + -61.265462310626305, + -59.16517900529351, + -57.07410699935114, + -53.72224075938467, + -48.72484743816321, + -43.77539189932893, + -38.86553349845051, + -35.86150319791169, + -35.86150319791169, + -35.90624370002775, + -35.94946275932843, + -36.050211120092214, + -36.14114860041652, + -36.22054975345217, + -36.287065691146644, + -36.33949794648734, + -36.396676305630784, + -36.38793872376296, + -36.30543391887692, + -36.14113154886276, + -35.484371558983845, + -34.3458430524747, + -32.63946323849545, + -30.264369516679647, + -28.37505912332529, + -26.137045071566778, + -23.50899357839703, + -18.0969136138025, + -11.274228077786306, + -6.810622872774583, + -1.897118742975757, + 3.3994496775855527, + 8.955345974805287, + 14.585573940163279, + 20.057109689544195, + 23.61688815328194, + 26.90496994574964, + 29.861310971662572, + 32.44587073815056, + 34.64139723529966, + 36.45281751496347, + 38.54241491524826, + 39.52944178963125, + 40.25066413245361, + 40.751931245039685, + 41.0754814485946, + 41.25815305421991, + 41.330368731021615, + 41.31617873152482, + 40.99784149517678, + 40.681581238398074, + 40.29660888716977, + 39.85736796408913, + 39.37260594580294, + 38.847538245824936, + 38.28572886748986, + 37.690047699471194, + 36.72789377540983, + 36.19091785177815, + 35.63641734732693, + 33.6574907875631 + ] + ], + "KSTrans cvode=True single=True": [ + [ + 0.0, + 0.002714579857699548, + 0.005429159715399096, + 0.01250802339214606, + 0.019586887068893024, + 0.02666575074563999, + 0.038074488617193376, + 0.055177667890721635, + 0.07228084716424989, + 0.08938402643777814, + 0.09999999999999779, + 0.10000000000000112, + 0.10291059249212993, + 0.10582118498425874, + 0.11307661749030269, + 0.12033204999634664, + 0.1275874825023906, + 0.13484291500843457, + 0.14209834751447853, + 0.15626672473703923, + 0.17043510195959993, + 0.18460347918216063, + 0.19877185640472134, + 0.22886651969260813, + 0.2589611829804949, + 0.2890558462683817, + 0.3191505095562685, + 0.3380751675621692, + 0.3569998255680699, + 0.3759244835739706, + 0.40711703029352764, + 0.43830957701308465, + 0.4560611389921882, + 0.47381270097129174, + 0.4915642629503953, + 0.5093158249294988, + 0.5270673869086023, + 0.5448189488877059, + 0.5571679236164115, + 0.5695168983451172, + 0.5818658730738229, + 0.5942148478025285, + 0.6065638225312342, + 0.6189127972599399, + 0.6390053707152302, + 0.6516300658765157, + 0.6642547610378012, + 0.6768794561990867, + 0.6895041513603722, + 0.7021288465216577, + 0.7240395290725938, + 0.7459502116235299, + 0.767860894174466, + 0.7897715767254021, + 0.8116822592763382, + 0.8335929418272743, + 0.8555036243782104, + 0.8774143069291465, + 0.9107030791479962, + 0.9439918513668459, + 0.9772806235856957, + 1.0 + ], + [ + -65.0, + -64.18623645893358, + -63.3739568108875, + -61.262334953249876, + -59.15940601945265, + -57.06570585934258, + -53.71082295307949, + -48.72420347968739, + -43.78529170086838, + -38.88577682636021, + -35.860522519901686, + -35.860522519901686, + -35.905220286495904, + -35.948396186229715, + -36.04925802543033, + -36.14025316406032, + -36.21964272577292, + -36.28606789974167, + -36.33832113129902, + -36.394826527253656, + -36.38517757291258, + -36.30149090614295, + -36.13570315386551, + -35.47579171470582, + -34.333602337548584, + -32.62289230177675, + -30.242622621840198, + -28.350394587764843, + -26.109317489550204, + -23.47805764870901, + -18.172588984630906, + -11.510256480457944, + -7.069226195790272, + -2.1753640553417957, + 3.1072608014785095, + 8.657983810800609, + 14.294521227280963, + 19.785227482513168, + 23.384350454658975, + 26.713270299175083, + 29.709589302187315, + 32.3311677796415, + 34.559170164493345, + 36.397608295036264, + 38.61867929346114, + 39.594433572000426, + 40.3037820374152, + 40.793219381426326, + 41.10548844853454, + 41.27776611049866, + 41.33102325817382, + 41.16698343549704, + 40.856744323481344, + 40.441767713040356, + 39.94695288717403, + 39.38817093819083, + 38.77500156136339, + 38.112912921460946, + 37.02196872103532, + 35.83871275247566, + 34.57410947748538, + 33.668960797953524 + ] + ], + "KSTrans cvode=True single=False": [ + [ + 0.0, + 0.002709768417113118, + 0.005419536834226236, + 0.012467737126800708, + 0.01951593741937518, + 0.026564137711949654, + 0.037929262088732305, + 0.055097845142384624, + 0.07226642819603694, + 0.08943501124968926, + 0.09999999999999779, + 0.10000000000000112, + 0.10291145582236776, + 0.1058229116447344, + 0.11307991550945612, + 0.12033691937417784, + 0.12759392323889956, + 0.1348509271036213, + 0.142107930968343, + 0.15627534804233148, + 0.17044276511631995, + 0.18461018219030842, + 0.1987775992642969, + 0.22903970883675928, + 0.2593018184092217, + 0.28956392798168407, + 0.31982603755414646, + 0.3387490256211881, + 0.3576720136882297, + 0.37659500175527133, + 0.4082425221547906, + 0.4398900425543099, + 0.4575896418581874, + 0.4752892411620649, + 0.4929888404659424, + 0.5106884397698199, + 0.5283880390736975, + 0.546087638377575, + 0.5583664363719241, + 0.5706452343662731, + 0.5829240323606222, + 0.5952028303549712, + 0.6074816283493203, + 0.6197604263436693, + 0.638740762004745, + 0.6512852258987768, + 0.6638296897928085, + 0.6763741536868403, + 0.688918617580872, + 0.7014630814749038, + 0.7203417997511373, + 0.7392205180273709, + 0.7725059776170974, + 0.795951860064221, + 0.8193977425113447, + 0.8523399062881889, + 0.8747121928093768, + 0.8970844793305648, + 0.9194567658517527, + 0.9418290523729407, + 0.9642013388941286, + 0.9865736254153166, + 1.0 + ], + [ + -65.0, + -64.1877602666191, + -63.37700436972419, + -61.27474347411282, + -59.181132492958355, + -57.09670622041916, + -53.754981303073464, + -48.75001932178034, + -43.79336850882914, + -38.87663940544805, + -35.866991165425695, + -35.866991165425695, + -35.91200372966857, + -35.955494472645746, + -36.057133989724306, + -36.14890908476081, + -36.22908175206404, + -36.296294096587005, + -36.349339650322335, + -36.40740587183988, + -36.39939402836742, + -36.31744169185362, + -36.153510434608705, + -35.493109152933705, + -34.345534786248635, + -32.62334558222328, + -30.223975751739708, + -28.326681257358466, + -26.080295194662764, + -23.4434938293419, + -18.04084473810003, + -11.239905426288574, + -6.783793705974767, + -1.880526831727696, + 3.403206016191021, + 8.944357511885288, + 14.559274064730252, + 20.016774744953846, + 23.577771396760312, + 26.867484841563723, + 29.825756650898406, + 32.41241607074128, + 34.61007657880926, + 36.423545772890414, + 38.52691260206173, + 39.513421346446485, + 40.23392115326073, + 40.734347045851486, + 41.05700949154364, + 41.23879945351103, + 41.31206517486708, + 41.21127720501345, + 40.75809586829616, + 40.29283191918134, + 39.73984910470677, + 38.84561439873791, + 38.17791860693501, + 37.4645617499352, + 36.70535557626778, + 35.90398401465756, + 35.06577588472098, + 34.19406272609299, + 33.65550633247177 + ] + ], + "KSTrans cvode=False single=True": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61693840588803, + -50.35058176245748, + -43.190787735212204, + -36.118088285063514, + -36.45179206325389, + -36.639683180337215, + -36.638457002083015, + -36.40717873794613, + -35.9065893307845, + -35.097384670820446, + -33.937842305000196, + -32.38113474668251, + -30.37270696340593, + -27.848279175113895, + -24.73347116525542, + -20.946869840499335, + -16.409664074547237, + -11.066410190793526, + -4.92132412488944, + 1.9115414024555495, + 9.162291238688454, + 16.40208546500034, + 23.119453306206903, + 28.86297968027117, + 33.37339782615963, + 36.626014123954135, + 38.775016546643236, + 40.05562683288041, + 40.70252387329813, + 40.907402655512314, + 40.80908948399294, + 40.500713597347094, + 40.0419194886514, + 39.470080601687044, + 38.80863611614157, + 38.07263962395035, + 37.27221551004364, + 36.41462453874912, + 35.505466340289104, + 34.54936780351269 + ] + ], + "KSTrans cvode=False single=False": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.618094153388654, + -50.35331957004725, + -43.19554637871293, + -36.12533991235038, + -36.461609726084326, + -36.652196654645756, + -36.65387759522344, + -36.425824092027604, + -35.9289090786643, + -35.123988299281976, + -33.96953035937861, + -32.41893323890815, + -30.417901759760593, + -27.90244075129113, + -24.798447882793944, + -21.024706403093568, + -16.50236960890041, + -11.17546930330218, + -5.046865255555875, + 1.7718268027648048, + 9.01398290198184, + 16.253758465186838, + 22.98075273301613, + 28.74174378996713, + 33.273534063204586, + 36.54728935111428, + 38.71440952508067, + 40.00906435711161, + 40.66613162356864, + 40.878074673197304, + 40.784574163129875, + 40.479474091262134, + 40.02294019882796, + 39.45269777509161, + 38.792413422880266, + 38.057284770176736, + 37.257527143950945, + 36.40045933736523, + 35.49171880280428, + 34.53595780412696 + ] + ], + "khh4 ivtype=0 ion=NonSpecific": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -56.92264886480129, + -48.967421990009726, + -41.115817020806084, + -33.33584552753264, + -32.92149473978101, + -32.29898575791654, + -31.396019260540804, + -30.135068302292193, + -28.432425744367603, + -26.19643593432356, + -23.32619300460945, + -19.712994074820607, + -15.248552102593335, + -9.846260979579451, + -3.4828285414206865, + 3.7383611292730956, + 11.522006809975645, + 19.36590902125762, + 26.653165959524607, + 32.84052558036581, + 37.63378137193018, + 41.03605389014615, + 43.2628919976747, + 44.61011241984578, + 45.353984535696355, + 45.70709897544323, + 45.81456099133785, + 45.7679422062151, + 45.62219756224159, + 45.40938704300586, + 45.148054129901276, + 44.84904934356837, + 44.518939606289855, + 44.161932239056085, + 43.780937104448626, + 43.378146197736974, + 42.95534857298508, + 42.51410153521215, + 42.055823915034765, + 41.581846925276864 + ] + ], + "khh4 ivtype=0 ion=k": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.61702352922038, + -50.350783819875204, + -43.19115530485599, + -36.11872628671981, + -36.45285606643255, + -36.641324347111066, + -36.640830998309696, + -36.41045697220133, + -35.91097256961707, + -35.1031194181889, + -33.94524068813066, + -32.39059888693879, + -30.384758700819685, + -27.863595087950504, + -24.75292012093821, + -20.971544075201837, + -16.440882609406156, + -11.10565474431586, + -4.970052827944583, + 1.852234096889334, + 9.092093133022857, + 16.321724496705333, + 23.03052153223972, + 28.76732297279475, + 33.27244840648859, + 36.52047913201449, + 38.66499630698969, + 39.94093883079571, + 40.58299450432298, + 40.78303015804343, + 40.68009412588732, + 40.36752538224106, + 39.905142993847385, + 39.33045713495111, + 38.66701001663449, + 37.92992937949733, + 37.12938856099484, + 36.27267466583069, + 35.36539303032151, + 34.41215742246829 + ] + ], + "khh4 ivtype=1 ion=NonSpecific": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -56.92264886480129, + -48.967421990009726, + -41.115817020806084, + -33.33584552753264, + -32.92149473978101, + -32.29898575791654, + -31.396019260540804, + -30.135068302292193, + -28.432425744367603, + -26.19643593432356, + -23.32619300460945, + -19.712994074820607, + -15.248552102593335, + -9.846260979579451, + -3.4828285414206865, + 3.7383611292730956, + 11.522006809975645, + 19.36590902125762, + 26.653165959524607, + 32.84052558036581, + 37.63378137193018, + 41.03605389014615, + 43.2628919976747, + 44.61011241984578, + 45.353984535696355, + 45.70709897544323, + 45.81456099133785, + 45.7679422062151, + 45.62219756224159, + 45.40938704300586, + 45.148054129901276, + 44.84904934356837, + 44.518939606289855, + 44.161932239056085, + 43.780937104448626, + 43.378146197736974, + 42.95534857298508, + 42.51410153521215, + 42.055823915034765, + 41.581846925276864 + ] + ], + "khh4 ivtype=1 ion=k": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.59151808830934, + -50.29999001142348, + -43.11501251825745, + -36.01675948706978, + -36.32374691398067, + -36.4830790517387, + -36.450557345289006, + -36.18414016787205, + -35.64323725440067, + -34.78696403010512, + -33.57173657224199, + -31.948564334013327, + -29.86045652844534, + -27.24057838322941, + -24.01229173747115, + -20.09313432696096, + -15.406185489637394, + -9.90362227249964, + -3.606403343620764, + 3.343829188612731, + 10.643050907651677, + 17.83503795468507, + 24.404588969407058, + 29.92694870599027, + 34.18822577269685, + 37.20661400218113, + 39.162117996467536, + 40.297174825754574, + 40.84160486166514, + 40.9775607620104, + 40.83437347081322, + 40.49766951297116, + 40.02184720937168, + 39.440869055094346, + 38.776048538873226, + 38.041139083579225, + 37.245466063361704, + 36.395783277289574, + 35.49734781915183, + 34.554533419857584 + ] + ], + "khh4 ivtype=2 ion=NonSpecific": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -56.92264886480129, + -48.967421990009726, + -41.115817020806084, + -33.33584552753264, + -32.92149473978101, + -32.29898575791654, + -31.396019260540804, + -30.135068302292193, + -28.432425744367603, + -26.19643593432356, + -23.32619300460945, + -19.712994074820607, + -15.248552102593335, + -9.846260979579451, + -3.4828285414206865, + 3.7383611292730956, + 11.522006809975645, + 19.36590902125762, + 26.653165959524607, + 32.84052558036581, + 37.63378137193018, + 41.03605389014615, + 43.2628919976747, + 44.61011241984578, + 45.353984535696355, + 45.70709897544323, + 45.81456099133785, + 45.7679422062151, + 45.62219756224159, + 45.40938704300586, + 45.148054129901276, + 44.84904934356837, + 44.518939606289855, + 44.161932239056085, + 43.780937104448626, + 43.378146197736974, + 42.95534857298508, + 42.51410153521215, + 42.055823915034765, + 41.581846925276864 + ] + ], + "khh4 ivtype=2 ion=k": [ + [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984 + ], + [ + -65.0, + -57.51786282312795, + -50.13013684023657, + -42.832012108320036, + -35.60894365285124, + -35.78945905002407, + -35.81383627036566, + -35.63372279570802, + -35.201777496687846, + -34.4709436338266, + -33.392661874939364, + -31.914454637277142, + -29.977368917067103, + -27.5139606492686, + -24.447956875439914, + -20.697588665646453, + -16.185919166188974, + -10.862876464505057, + -4.743184837262516, + 2.042578710659906, + 9.204253601824274, + 16.296502515647866, + 22.8055199608176, + 28.29682704806782, + 32.54090654373155, + 35.54196663452814, + 37.47190800245638, + 38.57070559947728, + 39.06902952363845, + 39.15111548101207, + 38.948547391279355, + 38.54907068512086, + 38.00908543539925, + 37.36450833002551, + 36.638588675405444, + 35.84698178462745, + 35.000841536268105, + 34.10863229444654, + 33.177164854758416, + 32.21218074323528, + 31.218680285698994 + ] + ] +} \ No newline at end of file diff --git a/test/hoctests/tests/test_kschan.py b/test/hoctests/tests/test_kschan.py index 7f5494fc40..3774b37e56 100644 --- a/test/hoctests/tests/test_kschan.py +++ b/test/hoctests/tests/test_kschan.py @@ -1,35 +1,255 @@ +import math from neuron import h, gui +from neuron.expect_hocerr import expect_err +from neuron import expect_hocerr +import numpy as np +import os, sys, hashlib -# Cover KSChan::state_consist(int shift) in nrniv/kschan.cpp +expect_hocerr.quiet = False + +from neuron.tests.utils.capture_stdout import capture_stdout +from neuron.tests.utils.checkresult import Chk + +# Avoid needing different results depending on NRN_ENABLE_CORENEURON +if hasattr(h, "usetable_hh"): + h.usetable_hh = False + +# Create a helper for managing reference results +dir_path = os.path.dirname(os.path.realpath(__file__)) +chk = Chk(os.path.join(dir_path, "test_kschan.json")) + +if sys.argv[0].split("/")[-1] == "nrniv": + + def chkstdout(key, capture): + print(key, " not checked") + +else: + def chkstdout(key, capture): + chk(key, capture) + + +def chkpr(key): + chkstdout(key, capture_stdout("h.ks.pr()", True)) + + +# Cover KSChan::state_consist(int shift) in nrniv/kschan.cpp h.load_file("chanbild.hoc") -cb = h.ChannelBuild() -cb.khh() # HH potassium channel -s = h.Section(name="soma") -s.insert("khh") # exists in soma and has one state -# Generally one does not modify the name/structure of an inserted channel -h.psection() -cb.nahh() # now called nahh and has two states (HH sodium channel) -h.psection() - -# to cover the "shift" fragments. Need a POINT_PROCESS KSChan -# copy from nrn/share/demo/singhhchan.hoc -h( + +# For checking if a run is doing something useful +# Activate the graphics by uncomment the "# return" statement in +# hrun below. Need to press the "Continue" button after each pause. +# If the graph seems incorrect or unexpected, you can stop by hitting +# (ctrl)C in the terminal window and then pressing the "Continue" button +# From the exception you can determine which hrun you are stopping at. +trec = h.Vector() +vrec = h.Vector() +grph = h.Graph() + + +def hrun(name, t_tol=0.0, v_tol=0.0, v_tol_per_time=0.0): + """ + Run the simulation, then compare against reference results. + The reference results are loaded from the `name` key in test_kschan.json. + Time values are compared using the relative tolerance `t_tol`. + Voltage values are linearly interpolated to the reference times, then + compared using a relative tolerance of v_tol + t * v_tol_per_time. + + The reference files are typically generated with GCC, and the tolerances + are typically driven by the requirements of NVHPC and oneAPI... """ -{objref ks, ksvec, ksgate, ksstates, kstransitions, tobj} + h.run() + ref_data = chk.get(name) + if ref_data is None: + raise Exception("No reference data for key: " + key) + ref_tv, ref_vv = ref_data + new_tv, new_vv = trec.to_python(), vrec.to_python() + assert len(ref_tv) == len(ref_vv) + assert len(ref_tv) == len(new_tv) + assert len(ref_tv) == len(new_vv) + match = True + max_diff_t, max_diff_v = 0.0, 0.0 + # Interpolate the new v values to the reference t values + def interp(new_t, old_t, old_v): + assert np.all(np.diff(old_t) > 0) + return np.interp(new_t, old_t, old_v) + + interp_vv = interp(ref_tv, new_tv, new_vv) + for ref_t, ref_v, new_t, new_v in zip(ref_tv, ref_vv, new_tv, interp_vv): + if not math.isclose(ref_t, new_t, rel_tol=t_tol): + diff_t = abs(ref_t - new_t) / max(abs(ref_t), abs(new_t)) + max_diff_t = max(max_diff_t, diff_t) + print("t diff", ref_t, new_t, diff_t, ">", t_tol) + match = False + v_tol_for_this_t = v_tol + v_tol_per_time * ref_t + if not math.isclose(ref_v, new_v, rel_tol=v_tol_for_this_t): + diff_v = abs(ref_v - new_v) / max(abs(ref_v), abs(new_v)) + max_diff_v = max(max_diff_v, diff_v) + print("v diff at t", ref_t, ref_v, new_v, diff_v, ">", v_tol_for_this_t) + match = False + if not match: + print("summary for", name) + if max_diff_t: + print("max t diff", max_diff_t) + if max_diff_v: + print("max v diff", max_diff_v) + assert match + return + grph.erase() + trec.printf() + vrec.printf() + vrec.line(grph, trec) + vrec.resize(0) + trec.resize(0) + grph.exec_menu("View = plot") + h.continue_dialog("continue") + + +def cell(): + s = h.Section(name="soma") + s.L = 3.18 + s.diam = 10 + s.insert("hh") + ic = h.IClamp(s(0.5)) + ic.dur = 0.1 + ic.amp = 0.3 + trec.record(h._ref_t, sec=s) + vrec.record(s(0.5)._ref_v, sec=s) + return s, ic + + +def test_1(): + cb = h.ChannelBuild() + cb.khh() # HH potassium channel + s, ic = cell() + s.insert("khh") # exists in soma and has one state + chkstdout("khh inserted", capture_stdout("h.psection()", True)) + # It is not supported (anymore) to change the number of variables + # of a mechanism while instances of that mechanism are active. + # In this case the change would be from 1 state to 2 states. + expect_err("cb.nahh()") # cb changes name and inserted na_ion before failure + cb.ks.name("khh") # change name back + chkstdout("khh same except for na_ion", capture_stdout("h.psection()", True)) + s.uninsert("khh") + cb.nahh() # try again + s.insert("nahh") + chkstdout("nahh now", capture_stdout("h.psection()", True)) + chkstdout("cb.ks.pr()", capture_stdout("cb.ks.pr()", True)) + assert cb.ks.ntrans() == 2.0 + assert cb.ks.nstate() == 2.0 + assert cb.ks.ngate() == 2.0 + assert cb.ks.nligand() == 0.0 + assert cb.ks.gate(0).nstate() == 1 + assert cb.ks.gate(0).power() == 3 + assert cb.ks.gate(0).power(3) == 3 + assert cb.ks.gate(0).sindex() == 0 + assert cb.ks.gate(0).index() == 0 + assert cb.ks.trans(1).index() == 1 + assert cb.ks.trans(0).ftype(0) == 3 + assert cb.ks.trans(0).ftype(1) == 2 + assert cb.ks.state(0).gate().index() == 0 + assert cb.ks.state(1).gate().index() == 1 + expect_err("cb.ks.trans(cb.ks.state(0), cb.ks.state(1))") + + # cover interface. Should verify return + cb.ks.trans(0).ab(h.Vector().indgen(-80, 60, 10), h.Vector(), h.Vector()) + cb.ks.trans(0).inftau(h.Vector().indgen(-80, 60, 10), h.Vector(), h.Vector()) + assert cb.ks.trans(0).f(0, -10) == 3.157187089473768 + assert cb.ks.trans(0).src() == cb.ks.state(0) + assert cb.ks.trans(0).target() == cb.ks.state(0) + assert cb.ks.trans(0).parm(0).to_python() == [1.0, 0.1, -40.0] + assert cb.ks.trans(0).parm(1).to_python() == [4.0, -0.05555555555555555, -65.0] + + expect_err("h.KSState()") # kss_cons + expect_err("h.KSGate()") # ksg_cons + expect_err("h.KSTrans()") # kst_cons + + # cover many mechanism runtime interface functions + h.tstop = 1 + for cvode in [1, 0]: + h.cvode_active(cvode) + hrun( + "nahh cvode={}".format(bool(cvode)), + t_tol=8e-9 if cvode else 0.0, + v_tol=2e-9 if cvode else 3e-11, + ) + + s.uninsert("nahh") + kss = cb.ks.add_hhstate("xxx") + assert cb.ks.nstate() == 3 + assert kss.name() == "xxx" + cb.ks.remove_state(kss.index()) + assert cb.ks.nstate() == 2 + kss = cb.ks.add_hhstate("xxx") + cb.ks.remove_state(kss) + cb.ks.ion("NonSpecific") + + cb.nahh() + s.insert("hh") + hrun("hh", v_tol=3e-11) # used to be the reference run + std = (trec.c(), vrec.c()) + # nahh gives same results as sodium channel in hh (usetable_hh is on) + s.gnabar_hh = 0 + s.insert("nahh") + s.gmax_nahh = 0.12 + + # used to compare to the reference run with 1e-9 tolerance + hrun("nahh vs hh", v_tol=7e-12) + # table + cb.ks.usetable(1) + # used to compare to the reference run with 0.5 tolerance + hrun("coarse table", v_tol=1e-11) + cb.ks.usetable(1, 1000, -80, 60) + # used to compare to the reference run with 1e-3 tolerance + hrun("fine table", v_tol=5e-12) + # cover usetable return info + vmin = h.ref(0) + vmax = h.ref(1) + n = cb.ks.usetable(vmin, vmax) + assert n == 1000 and vmin[0] == -80 and vmax[0] == 60 + + # cover KSChanTable + cb.ks.usetable(0) + xvec = h.Vector().indgen(-80, 60, 0.1) + avec = h.Vector() + bvec = h.Vector() + cb.ks.trans(0).ab(xvec, avec, bvec) + cb.ks.trans(0).set_f(0, 7, avec, xvec[0], xvec[xvec.size() - 1]) + # used to compare to the reference run with 1e-3 tolerance + hrun("KSChanTable", v_tol=4e-12) + aref = h.ref(0) + bref = h.ref(0) + cb.ks.trans(0).parm(0, aref, bref) + assert aref[0] == xvec[0] and bref[0] == xvec[xvec.size() - 1] + + # cover some table limit code. + cb.ks.usetable(1, 200, -50, 30) + # used to compare to the reference run with 20 tolerance (!) + hrun("KSChanTable limits", v_tol=2e-12) + + del cb, s, kss, ic, std + locals() + + +def mk_khh(chan_name, is_pnt=1): + # to cover the "shift" fragments. Need a POINT_PROCESS KSChan + # copy from nrn/share/demo/singhhchan.hoc (concept of shift is + # obsolete but such a channel is still needed for testing). + h( + """ { ion_register("k", 1) } objref ks, ksvec, ksgate, ksstates, kstransitions, tobj { ksvec = new Vector() ksstates = new List() kstransitions = new List() - ks = new KSChan(1) + ks = new KSChan(%d) } // khh0 Point Process (Allow Single Channels) // k ohmic ion current // ik (mA/cm2) = khh0.g * (v - ek)*(0.01/area) { - ks.name("khh0") + ks.name("%s") ks.ion("k") ks.iv_type(0) ks.gmax(0.036) @@ -82,12 +302,169 @@ tobj.set_f(1, 2, ksvec.c.append(0.5, -0.0125, -65)) } { ksstates.remove_all kstransitions.remove_all } -{ ks.single(1) } -""" -) - -kchan = h.khh0(s(0.5)) -h.psection() -h.ks.single(0) -h.psection() -h.ks.single(1) +{ ks.single(%d) } + """ + % (is_pnt, chan_name, is_pnt) + ) + + +def test_2(): + print("test_2") + mk_khh("khh0") + s, ic = cell() + kchan = h.khh0(s(0.5)) + chkstdout("kchan with single", capture_stdout("h.psection()", True)) + assert kchan.nsingle(10) == 10.0 + assert kchan.nsingle() == 10.0 + expect_err("h.ks.single(0)") + chkstdout("kchan failed to turn off single", capture_stdout("h.psection()", True)) + del kchan + locals() + h.ks.single(0) + kchan = h.khh0(s(0.5)) + s.gkbar_hh = 0 + kchan.gmax = 0.036 + chkstdout("kchan without single", capture_stdout("h.psection()", True)) + h.cvode_active(1) + # At least executes KSChan::mulmat + hrun( + "kchan without single cvode=True", t_tol=2e-7, v_tol=1e-11, v_tol_per_time=5e-7 + ) + h.cvode_active(0) + + # location coverage + assert kchan.has_loc() == True + assert kchan.get_loc() == 0.5 + h.pop_section() + assert kchan.get_segment() == s(0.5) + kchan.loc(s(1)) + assert kchan.get_segment() == s(1) + + # remove transition and state + del kchan + locals() + assert h.ks.nstate() == 5 + assert h.ks.ntrans() == 4 + chkpr("before remove transition") + h.ks.remove_transition(0) + chkpr("after remove transition") + assert h.ks.ntrans() == 3 + h.ks.remove_transition(h.ks.add_transition(0, 1)) + h.ks.add_transition(0, 1) + h.ks.remove_transition(h.ks.trans(h.ks.state(0), h.ks.state(1))) + h.ks.remove_state(0) + assert h.ks.nstate() == 4 + assert h.ks.vres(0.01) == 0.01 + assert h.ks.rseed(10) == 10 + + del s, ic + locals() + + +def test_3(): + print("test_3") + # ligand tests (mostly for coverage) start with fresh channel. + mk_khh("khh2") + h.ion_register("ca", 2) + h.ion_register("cl", -1) + s, ic = cell() + + # replace 1<->2 transition with ligand sensitive transition + expect_err('h.ks.trans(h.ks.state(1), h.ks.state(2)).type(2, "ca")') + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(3, "cai") + chkpr("KSTrans 1<->2 with cai") + assert h.ks.ligand(0) == "ca_ion" + assert h.ks.trans(h.ks.state(1), h.ks.state(2)).ligand() == "cai" + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(2, "cao") + chkpr("KSTrans 1<->2 change to cao") + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(3, "cli") + chkpr("KSTrans 1<->2 change to cli") + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(0) + chkpr("KSTrans 1<->2 has no ligand") + + # try for a few more lines of coverage by using ligands for two KSTrans + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(3, "cai") + h.ks.trans(h.ks.state(2), h.ks.state(3)).type(3, "cli") + + for cvon in [1, 0]: + for singleon in [1, 0]: + h.cvode_active(cvon) + h.ks.single(singleon) + kchan = h.khh2(s(0.5)) + t_tol = 0.0 + tols = {} + if cvon: + tols["v_tol"] = 1e-12 + if singleon: + tols["t_tol"] = 6e-8 + tols["v_tol_per_time"] = 1e-8 + else: + # seems a bit high? + tols["t_tol"] = 2e-6 + tols["v_tol_per_time"] = 6e-8 + else: + tols["v_tol"] = 8e-11 + hrun( + "KSTrans cvode={} single={}".format(bool(cvon), bool(singleon)), **tols + ) + del kchan + locals() + + h.ks.trans(h.ks.state(2), h.ks.state(3)).type(3, "cai") + h.ks.trans(h.ks.state(2), h.ks.state(3)).type(0) + chkpr("bug? cl_ion not used but still ligand 0") + h.ion_register("u238", 3) + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(3, "u238i") + h.ks.trans(h.ks.state(2), h.ks.state(3)).type(2, "u238o") + h.ks.trans(h.ks.state(1), h.ks.state(2)).type(0) + h.ks.trans(h.ks.state(2), h.ks.state(3)).type(0) + chkpr("bug? 4 ligands (cl_ion, 2 u238_ion, ca_ion), none in use") + + del s, ic + locals() + + +def test_4(): + print("test_4") + # KSChan.iv_type tests, mostly for coverage + mk_khh("khh3") + kpnt = h.ks + kpnt.single(0) + mk_khh("khh4", is_pnt=False) + kden = h.ks + s, ic = cell() + + for ivtype in range(3): + for ion in ["NonSpecific", "k"]: + kpnt.ion(ion) + kpnt.iv_type(ivtype) + kchan = h.khh3(s(0.5)) + kden.ion(ion) + kden.iv_type(ivtype) + s.insert("khh4") + s.gkbar_hh = 0 + if ivtype == 2 and ion == "k": + s(0.5).khh4.pmax = 0.00025 # works + assert s.gmax_khh4 == 0.00025 + s.gmax_khh4 = 0.00025 # bug python should know about pmax_khh4 + kchan.pmax = 2.5e-10 + else: + s.gmax_khh4 = 0.036 / 2.0 + kchan.gmax = 0.036 / 2.0 + hrun("khh4 ivtype={} ion={}".format(ivtype, ion), v_tol=2e-7) + s.uninsert("khh4") + del kchan + locals() + + del s, ic + locals() + + +if __name__ == "__main__": + test_1() + test_2() + test_3() + test_4() + + chk.save() + print("DONE") diff --git a/test/hoctests/tests/test_loadbal.hoc b/test/hoctests/tests/test_loadbal.hoc new file mode 100644 index 0000000000..1ced8175c3 --- /dev/null +++ b/test/hoctests/tests/test_loadbal.hoc @@ -0,0 +1,18 @@ +proc exit_with_error() { + nrnpython("import sys; sys.exit(1)") +} + +objref mt +mt = new MechanismType(0) + +// check for non ion type +mt.select("hh") +if (mt.is_ion() != 0) { + exit_with_error() +} + +// check for ion type +mt.select("k_ion") +if (mt.is_ion() != 1) { + exit_with_error() +} diff --git a/test/hoctests/tests/test_mechfunc.py b/test/hoctests/tests/test_mechfunc.py new file mode 100644 index 0000000000..d44a37422a --- /dev/null +++ b/test/hoctests/tests/test_mechfunc.py @@ -0,0 +1,167 @@ +# test pp.func(...) and sec(x).mech.func(...) + +from neuron import h +from neuron.expect_hocerr import expect_err, set_quiet +from math import isclose + +set_quiet(False) + + +def fc(name, m): # callable + return getattr(name, m) + + +def varref(name, m): # variable reference + return getattr("_ref_" + name, m) + + +def model(): # 3 cables each with nseg=3 + cables = [h.Section(name="cable%d" % i) for i in range(3)] + mechs = [] + for c in cables: + c.nseg = 3 + c.insert("sdata") + c.insert("sdatats") + for seg in c: + mechs.append(h.SData(seg)) + mechs.append(h.SDataTS(seg)) + mechs.append(seg.sdata) + mechs.append(seg.sdatats) + return mechs # cable section stay in existence since mechs ref them + + +def test1(): + print("test1") + s = h.Section() # so we can delete later and verify mechs is still ok + s.nseg = 10 + mechs = model() + + def tst(): + for i, m in enumerate(mechs): + m.A(i, 2 * i, 3 * i) + for i, m in enumerate(mechs): + assert m.a == float(i) + assert m.b == float(2 * i) + assert m.c[1] == float(3 * i) + + tst() + h.finitialize() + tst() + del s + tst() + h.finitialize() + tst() + + for sec in h.allsec(): + h.delete_section(sec=sec) + for i in range(4): + expect_err("mechs[i].A(0, 0, 0)") + + del mechs, sec, i + locals() + + +def refs(mech): + sec = mech.segment().sec + seg = mech.segment() + Af = mech.A + aref = mech._ref_a + return sec, seg, mech, Af, aref + + +def mech_expect_invalid(mech, Af, aref): + expect_err("Af(0, 0, 0)") + expect_err("aref[0] = 1.0") + assert "died" in str(aref) + assert str(mech) in [ + "", + "", + ] + expect_err("print(mech.name())") + del mech, Af, aref + locals() + + +def test2(): + print("test2") + mechs = model() + sec, seg, mech, Af, aref = refs(mechs[-1]) + assert Af.name() == "sdatats.A" # covers NPyMechObj_name + expect_err("mech.Aexcept(5)") # covers catch + assert Af.mech() == mech # covers NPyMechFunc_mech + assert Af.__repr__() == "sdatats.A" # covers pymechfunc_repr + assert "A" in mech.__dict__ + # mech uninserted, should invalidate mechs[-1] + mechs[-1].segment().sec.uninsert(mechs[-1].name()) + mech_expect_invalid(mech, Af, aref) + del mechs, sec, seg, mech, Af, aref + locals() + + +def test3(): + print("test3") + mechs = model() + sec, seg, mech, Af, aref = refs(mechs[-1]) + # internal segment destroyed, should invalidate mechs[-1] + sec.nseg = 1 + mech_expect_invalid(mech, Af, aref) + # seg exists but refers to the internal segment containing x + assert seg.sec == sec + assert seg.x == 5.0 / 6.0 + assert str(seg) == "cable2(0.833333)" + del mechs, sec, seg, mech, Af, aref + locals() + + +def test4(): + print("test4") + mechs = model() + sec, seg, mech, Af, aref = refs(mechs[-1]) + # section deleted, should invalidate mechs[-1] + h.delete_section(sec=mechs[-1].segment().sec) + mech_expect_invalid(mech, Af, aref) + del mechs, sec, seg, mech, Af, aref + locals() + + +def test5(): + print("test5") + mechs = model() + for sec in h.allsec(): + for seg in sec: + for mech in seg: + for rv in mech: + assert rv.mech() == mech + assert rv.mech().segment() == seg + assert rv.mech().segment().sec == sec + + +def test6(): + print("test6") + mechs = model() + m = mechs[0] + expect_err("m.ft(1)") + m.table_ft(5) + assert m.ft(8) == 5.0 + + vx = h.Vector([-100, 100]) + vy = vx.c().mul(2) + m.table_ft(vy, vx) + assert isclose(m.ft(5), 10.0) + assert isclose(m.ft(10), 20.0) + assert isclose(m.foo(10), m.ft(10)) + m.a = 3 + assert isclose(m.bar(), 2 * m.a) + + del mechs, m + locals() + + +if __name__ == "__main__": + test1() + test2() + test3() + test4() + test5() + test6() + h.topology() diff --git a/test/hoctests/tests/test_mode.py b/test/hoctests/tests/test_mode.py new file mode 100644 index 0000000000..476a2f6d66 --- /dev/null +++ b/test/hoctests/tests/test_mode.py @@ -0,0 +1,19 @@ +from neuron import h + + +"""Test to make sure mode for show can be changed with and without interviews""" + +# PlotShape +ps = h.PlotShape() + +for i in range(3): + ps.show(i) + if ps.show() != i: + raise RuntimeError("PlotShape error") + +try: + ps.show(3) + raise Exception("ps.show should only take 0, 1, or 2") +except RuntimeError: + # RuntimeError is expected because ps.show(3) should fail + ... diff --git a/test/hoctests/tests/test_neurondemo.py b/test/hoctests/tests/test_neurondemo.py index c6a89a47e5..b3c8d6bcbf 100644 --- a/test/hoctests/tests/test_neurondemo.py +++ b/test/hoctests/tests/test_neurondemo.py @@ -36,6 +36,7 @@ def run(cmd, input): # HOC: select demo, run, and print all lines on all Graphs input = r""" proc dodemo() { + usetable_hh = 0 // Compatible with -DNRN_ENABLE_CORENEURON=ON demo(%d) run() printf("\nZZZbegin\n") @@ -126,6 +127,48 @@ def neurondemo(extra, input): # we should have munched everything assert len(data) == 0 key = "demo%d" % i - chk(key, rich_data) + + if os.uname().sysname == "Darwin": + # Sometimes a Graph y value str differs in last digit by 1 + # Perhaps a locale issue? But float32->float64->str can differ + # between machines. For this reason, if a number str is not + # identical, demand the relative difference < 1e-5 (float32 accuracy) + err = 0 + try: + err = 0 + chk(key, rich_data) + except AssertionError: + err = 1 + if err: + from math import isclose + + reltol = 1e-5 + std = chk.d[key] + + for ig, gstd in enumerate(std): + gname = gstd[0] + for iline, line in enumerate(gstd[1]): + for coord in line: + vstd = [float(a) for a in line[coord]] + vd = [float(a) for a in rich_data[ig][1][iline][coord]] + if vstd != vd: + for i, sval in enumerate(vstd): + if not isclose(sval, vd[i], rel_tol=reltol): + print( + gname, + iline, + coord, + i, + ": %g %g" % (sval, vd[i]), + ) + assert isclose(sval, vd[i], rel_tol=reltol) + print( + gname, + iline, + coord, + " float32 not identical but within rel_tol=%g" % reltol, + ) + else: + chk(key, rich_data) chk.save() diff --git a/test/hoctests/tests/test_nrniv-launch.py b/test/hoctests/tests/test_nrniv-launch.py index 3056967851..c9407da81d 100644 --- a/test/hoctests/tests/test_nrniv-launch.py +++ b/test/hoctests/tests/test_nrniv-launch.py @@ -1,23 +1,39 @@ +import os +import shutil +import subprocess from sys import platform if platform == "win32": # skip the test. # Cannot get subprocess.run to feed stdin to nrniv quit() -import subprocess - -def srun(cmd, inp): - print(cmd, inp) - cp = subprocess.run( - cmd, shell=True, input=inp, capture_output=True, text=True, timeout=5 +def nrniv(args, input): + print(args, input) + # nrniv is a NEURON executable, so it will be linked against any sanitizer + # runtimes that are required => LD_PRELOAD is not needed. Delete LD_PRELOAD + # if it is present, because if dynamic Python is enabled then nrniv will + # run a bash subprocess, and bash + LD_PRELOAD=/path/to/libtsan.so crashes. + env = os.environ.copy() + try: + del env["LD_PRELOAD"] + print("Unset LD_PRELOAD before running nrniv") + except KeyError: + pass + return subprocess.run( + [shutil.which("nrniv")] + args, + env=env, + shell=False, + input=input, + capture_output=True, + text=True, + timeout=5, ) - return cp.returncode, cp.stderr, cp.stdout -r = srun( - 'nrniv -isatty -c "a=5" -', - r""" +r = nrniv( + args=["-isatty", "-nobanner", "-nogui", "-c", "a=5", "-"], + input=r""" func square() { return $1*$1 } @@ -27,8 +43,11 @@ def srun(cmd, inp): """, ) -assert r[0] == 0 -print(r[2]) -assert "square(a)=25" in r[2] +print("status={}".format(r.returncode)) +print("stdout\n----\n{}----".format(r.stdout)) +print("stderr\n----\n{}----".format(r.stderr)) +assert r.returncode == 0 +assert "square(a)=25" in r.stdout if platform != "darwin": # Mac does not print the "oc>" prompt - assert "oc>quit()" in r[2] + assert "oc>quit()" in r.stdout +assert len(r.stderr) == 0 diff --git a/test/hoctests/tests/test_random.hoc b/test/hoctests/tests/test_random.hoc new file mode 100644 index 0000000000..61f08a0dab --- /dev/null +++ b/test/hoctests/tests/test_random.hoc @@ -0,0 +1,79 @@ +load_file("expect_err.hoc") + +objref rt, z +z = new List("NMODLRandom") +proc assert() { + if ($1 == 0) { + hoc_execerror("assert", "") + } +} + +// ARTIFICIAL_CELL syntax tests (same as POINT_PROCESS wrt RANDOM) +rt = new RanArt() + +print rt.ran1 +assert(z.count == 0) + +assert(rt.ran1.set_seq(5).set_ids(7,8,9).get_seq() == 5) +assert(rt.ran1.get_ids().x[2] == 9) +assert(z.count == 0) + +objref r +r = rt.ran1 +r.set_seq(25) +assert(rt.ran1.get_seq() == 25) +assert(z.count == 1) +assert(r.get_ids().x[1] == 8) +objref r +assert(z.count == 0) + +create cable +access cable +nseg=3 +insert rantst +finitialize(-70) + +r = cable.ran1_rantst +assert(cable.ran1_rantst.set_seq(10).get_seq() == 10) +assert(cable.ran1_rantst(.5).get_ids().eq(r.get_ids())) + +for (x,0) { + r = cable.ran1_rantst(x) + assert(ran1_rantst(x).get_ids().eq(r.get_ids())) +} +objref r +assert(z.count == 0) + +// test syntax in a cell type +begintemplate Cell +public cable, rt +create cable +objref rt +proc init() { + create cable + cable { + nseg = 3 + insert rantst + rt = new RanPP(.1) + } +} +endtemplate Cell + +objref cell +cell = new Cell() +cell.cable print ran2_rantst +cell.cable print ran2_rantst(.1) +expect_err("cell.cable print ran2_rantst(.1, .2)") +print cell.cable.ran2_rantst +print cell.cable.ran2_rantst(.1) +expect_err("print cell.cable.ran2_rantst(.1, .2)") +cell.cable.ran2_rantst(.1).set_seq(10).set_ids(8,9,10) +cell.cable.ran2_rantst(.5).set_seq(11).set_ids(1,2,3) +assert( cell.cable.ran2_rantst(.1).get_seq() == 10) + +objref rp +rp = cell.rt +objref cell // but rp is still a reference to unlocated point process. +expect_err("rp.ran1") + + diff --git a/test/hoctests/tests/test_random.json b/test/hoctests/tests/test_random.json new file mode 100644 index 0000000000..4ad254d50c --- /dev/null +++ b/test/hoctests/tests/test_random.json @@ -0,0 +1,175 @@ +{ + "results": [ + 0.17893146859041148, + 2.5858652899862125, + 0.5900787127623134, + 13.879620017511858, + 2.5285665260297696, + -3.8255380765067546, + 0.6195943787182694, + 0.8640101598892337, + 0.8705624195582786, + 0.5816326112923024, + 0.6824968245405592, + 12.935409682585064, + 5.5629342829617965, + 15.621557479479236, + 0.00018058368000653952, + 0.7241277485796884, + 0.5950188712228511, + 1.4151762421390146, + -1.0273020350279256, + 11.370265127306276, + 1.5457504368518737, + 7.40405624730305, + 0.7815047526309489, + 0.9767377004081529, + 0.9199947437923414, + 1.770347276557777, + 1.1348050752784042, + 10.21118748695329, + 0.8290698041860384, + 3.2620640633572284, + 0.7935756776031163, + 0.6215100738635496, + 13.0, + 1.0, + 9.0, + 1.0, + 9.0, + 1.0, + 9.0, + 1.0 + ], + "bbsavestate 1 to 2": [ + [ + -0.7247871714935129, + 0.7219008192457033, + 0.5391448928750727, + -0.22338622131375607, + -0.5209245315875433, + -0.20192660100822468, + -0.40971682383225255, + 0.40125116545729983, + -1.018708027424587, + -0.6435348201983898, + -1.110505571561422, + -0.5456431266293069, + -0.6431811174215588, + -0.2050222092130618, + -0.41871095517167045, + 2.1327022497045847, + 1.2475561990837372, + -0.8148916287356835, + 1.3637834172361567, + 0.44911001868255424, + -0.8038191925528606, + -1.0182609292625484, + 0.814562018050233, + -0.2001845683092376, + -0.7036837840067589, + -0.1749096487126645, + -0.1384874349183572, + -0.27802269131343627, + 0.5024084209351181, + 0.2857886341240531, + 1.1578619121493123, + 0.8320672365682753, + 0.8388016796407406, + -2.204403101188663, + -0.41677220453431346, + -0.3493109618344672, + 0.6394206556529147, + 0.433099968696616, + -1.6289203897217996, + -0.5961253470352493, + -0.028642516485346325 + ], + [ + -0.007237015674221396, + 1.0485038145975802, + 0.5666028312087761, + 1.5478221474628917, + -1.9262214185465736, + -1.1342009341579276, + 0.7839089952205353, + -2.1453640858801464, + 0.5459020601695509, + 0.5702794219336828, + -2.7987171853712787, + -0.5441341689538746, + 0.16498316711023667, + 0.5332911939974267, + 1.5636051898318526, + -0.6390453305292507, + -0.14494230468386957, + -0.7174754220603327, + 0.0539835666341803, + 0.9773045846743287, + 0.36026791233149735, + -1.1293229505809468, + -0.6829744883194595, + -0.6966328933631747, + -1.5001099642041043, + -0.07403611736759062, + -0.8287345152039473, + -1.7481478211594699, + -0.0019827309900973702, + -0.2831062597817745, + -1.3860153441895837, + -0.7725712206688502, + 0.9644474198597336, + 0.038712287163401804, + -0.49387615024354914, + -0.5149700079858756, + -0.06700819948825379, + -1.1356059525253281, + -0.4846497787175894, + -0.05689192371120626, + 0.31197352531127626 + ], + [ + 0.952070199416881, + 0.952070199416881, + 0.952070199416881, + 0.952070199416881, + 0.952070199416881, + 0.952070199416881, + 0.952070199416881, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.1554634134011021, + 1.4540666926704917, + 1.4540666926704917, + 1.4540666926704917, + 1.4540666926704917, + 1.4540666926704917, + 1.4540666926704917, + 1.4540666926704917, + 1.633567197773812, + 1.633567197773812, + 1.633567197773812, + 1.633567197773812, + 1.7246756477971599, + 1.7246756477971599, + 1.7246756477971599, + 1.7246756477971599, + 1.7246756477971599, + 1.7246756477971599, + 1.7246756477971599, + 1.8967211458457505, + 1.8967211458457505, + 1.8967211458457505, + 1.8967211458457505 + ] + ] +} diff --git a/test/hoctests/tests/test_random.py b/test/hoctests/tests/test_random.py new file mode 100644 index 0000000000..90d60389dd --- /dev/null +++ b/test/hoctests/tests/test_random.py @@ -0,0 +1,178 @@ +from neuron import h +from neuron.expect_hocerr import expect_err, set_quiet +from neuron.tests.utils.checkresult import Chk +import os +import math + +pc = h.ParallelContext() + +dir_path = os.path.dirname(os.path.realpath(__file__)) +chk = Chk(os.path.join(dir_path, "test_random.json")) + +set_quiet(False) + +z = h.List("NMODLRandom") + +# ARTIFICIAL_CELL syntax tests +rt = h.RanArt() + +print(rt.ran1) +assert z.count() == 0 +assert rt.ran1.set_seq(5).set_ids(7, 8, 9).get_seq() == 5 +assert rt.ran1.get_ids().x[2] == 9 +assert z.count() == 0 + +x = rt.ran1 +x.set_seq(25) +assert rt.ran1.get_seq() == 25 +assert z.count() == 1 +assert x.get_ids().x[1] == 8 + +y = rt.ran1.set_seq +y(50) +assert x.get_seq() == 50 + +del x, y +assert z.count() == 0 + +x = rt.ran1 +expect_err("rt.ran1 = rt.ran2") # cannot assign +del rt +expect_err("x.get_seq()") +del x +assert z.count() == 0 + +# density mechanism tests +cable = h.Section(name="cable") +cable.nseg = 3 +cable.insert("rantst") +nr = [(seg.x, seg.rantst.ran1) for seg in cable] +for r in nr: + assert r[1].get_ids().eq(cable(r[0]).ran1_rantst.get_ids()) +del nr, r +assert z.count() == 0 + +rt = h.RanArt() +r1 = rt.ran1 +# wrap around at end +assert r1.set_seq(2**34 - 1).get_seq() == (2**34 - 1) +r1.uniform() +assert r1.get_seq() == 0 +assert r1.set_seq(2**34 + 1).get_seq() == 1 +assert r1.set_seq(-10).get_seq() == 0 # all neg setseq to 0 +assert r1.set_seq(2**40 - 1).get_seq() == 17179869183.0 # up to 2**40 wrap +assert r1.set_seq(2**40 + 1).get_seq() == 0 # all above 2**40 setseq to 0 + +# negexp(mean) has proper scale +r1.set_seq(0) +x = rt.negexp0() +r1.set_seq(0) +assert math.isclose(rt.negexp1(5), 5 * x) + +r1 = h.NMODLRandom() +expect_err("r1.uniform()") +del r1 + +# test the mod random_... functions +rt = h.RanArt() +cable = h.Section(name="cable") +cable.nseg = 3 +cable.insert("rantst") +mlist = [rt] +mlist.extend(seg.rantst for seg in cable) +for i, m in enumerate(mlist): + m.ran1.set_ids(i, 1, 0).set_seq(0) + m.ran2.set_ids(i, 2, 0).set_seq(0) + +results = [] +for m in mlist: + results.extend([m.uniform0(), m.negexp0(), m.normal0()]) + results.extend( + [ + m.uniform2(10, 20), + m.negexp1(5), + m.normal2(5, 8), + ] + ) + results.extend([m.ran1.uniform(), m.ran2.uniform()]) +for m in mlist: + results.extend([m.ran1.get_seq(), m.ran2.get_seq()]) + +assert z.count() == 0 + +chk("results", results, tol=1e-12) + +# test access to hoc cell class +h( + """ +begintemplate Cell +public cable, rt +create cable +objref rpp, rart +proc init() { + create cable + cable { + nseg = 3 + insert rantst + rpp = new RanPP(0.1) + } + rart = new RanArt() + rart.mean = 0.1 +} +endtemplate Cell +""" +) +cell = h.Cell() +cell.cable(0.1).ran2_rantst.set_seq(10).set_ids(8, 9, 10) +cell.cable(0.5).ran2_rantst.set_seq(11).set_ids(1, 2, 3) +assert cell.cable(0.1).rantst.ran2.get_seq() == 10 + +rp = cell.rpp +del cell +expect_err("rp.ran2") + + +def set_gids(cells): + gid = 0 + for cell in cells: + for port in [cell.cable(0.5)._ref_v, cell.rart]: + gid += 1 + pc.set_gid2node(gid, pc.id()) + pc.cell(gid, h.NetCon(port, None, sec=cell.cable)) + + +def test_bbsavestate(): + cell = h.Cell() + set_gids([cell]) ## BBSaveState requires that real cells have gids + mechs = [cell.cable(0.5).rantst, cell.rpp, cell.rart] + rec = [] + for i, m in enumerate(mechs): + m.ran2.set_ids(i, 2, 3) + rec.append(h.Vector().record(m._ref_x2, sec=cell.cable)) + + def run(tstop, init=True): + if init: + pc.set_maxstep(10) + h.finitialize(-65) + else: + h.frecord_init() + pc.psolve(tstop) + + # rerunning in two parts gives same result. + run(1) + bbss = h.BBSaveState() + bbss.save("bbss_random.txt") + + # second half is our standard + run(2, False) + chk("bbsavestate 1 to 2", [v.to_python() for v in rec], tol=1e-12) + + # savestate restore and redo second half. + bbss.restore("bbss_random.txt") + run(2, False) + chk("bbsavestate 1 to 2", [v.to_python() for v in rec], tol=1e-12) + + +test_bbsavestate() + +chk.save() diff --git a/test/hoctests/tests/test_setdata.py b/test/hoctests/tests/test_setdata.py new file mode 100644 index 0000000000..0e4e2400e1 --- /dev/null +++ b/test/hoctests/tests/test_setdata.py @@ -0,0 +1,96 @@ +from neuron import h +from neuron.expect_hocerr import expect_err, set_quiet + +set_quiet(False) + + +def test_setdata(sfx): # "" or "ts" + expect_err("h.A_sdata%s(5,6,7)" % sfx) # No data for A_sdata. + expect_err("h.setdata_sdata%s()" % sfx) # not enough args + expect_err("h.setdata_sdata%s(0.5)" % sfx) # Section access unspecified + + fk = h.k_sdata if sfx == "" else h.k_sdatats # does not use RANGE + assert fk(4) == 10.0 + + s = h.Section("s") + s.insert("sdata%s" % sfx) + s.nseg = 5 + + setdata = h.setdata_sdata if sfx == "" else h.setdata_sdatats + func = h.A_sdata if sfx == "" else h.A_sdatats + + for seg in s: + setdata(seg) + x = seg.x + func(x, 10 * x, 100 * x) + assert fk(3) == 6.0 + + for seg in s: + x = seg.x + mech = seg.sdata if sfx == "" else seg.sdatats + assert mech.a == x + assert mech.b == 10 * x + assert mech.c[1] == 100 * x + + # What happens if we setdata, call a function that sweeps over all + # instances, and then call an instance function. We expect the value + # in accordance with the last setdata. + for seg in s: # set a value that does not get changed by finitialize + x = seg.x + mech = seg.sdata if sfx == "" else seg.sdatats + mech.c[2] = 1000 * x + fc = h.C_sdata if sfx == "" else h.C_sdatats + setdata(s(0.5)) + assert fc(2) == 1000 * 0.5 + h.finitialize() + assert fc(2) == 1000 * 0.5 + + del s, setdata, seg, mech, x + locals() + + +# what happens if the _extcall_prop becomes invalid +def test_prop_invalid(sfx): # "" or "ts" + s = h.Section("s2") + s.insert("sdata%s" % sfx) + s.nseg = 5 + setdata = h.setdata_sdata if sfx == "" else h.setdata_sdatats + func = h.A_sdata if sfx == "" else h.A_sdatats + fk = h.k_sdata if sfx == "" else h.k_sdatats # does not use RANGE + + def set(x, y): + seg = s(x) + setdata(seg) + func(y * 1, y * 10, y * 100) + mech = seg.sdata if sfx == "" else seg.sdatats + assert mech.c[1] == y * 100.0 + return seg + + set(0.5, 5) + seg = set(0.1, 1) + + s.nseg = 1 # setdata invalid since old s2(0.1) no longer exists. + mech = seg.sdata if sfx == "" else seg.sdatats + expect_err("h.A_sdata%s(2, 20, 200)" % sfx) # Assertion failed: m_mech_handle + assert mech.c[1] == 500.0 # Though seg.x == 0.1, the only point is 0.5 + assert fk(3) == 6.0 + carray = mech.c + assert carray[1] == 500.0 + seg = set(0.5, 5) + s.uninsert("sdata%s" % sfx) + expect_err("print(carray[1])") # c_sdata, the mechanism does not exist at s2(0.1) + expect_err("func(3, 30, 300)") # Assertion failed: m_mech_handle + assert fk(4) == 10.0 + h.delete_section(sec=s) + expect_err("print(carray[1])") # nrn.RangeVar can't access a deleted section + expect_err("func(4, 40, 400)") # Assertion failed: m_mech_handle + assert fk(5) == 15.0 + + del s, setdata, seg, mech, carray, fk + locals() + + +if __name__ == "__main__": + for sfx in ["ts", ""]: + test_setdata(sfx) + test_prop_invalid(sfx) diff --git a/test/hoctests/tests/test_thread_partition.py b/test/hoctests/tests/test_thread_partition.py new file mode 100644 index 0000000000..10b10aabdf --- /dev/null +++ b/test/hoctests/tests/test_thread_partition.py @@ -0,0 +1,98 @@ +from neuron import h +from neuron.expect_hocerr import expect_err, set_quiet + +set_quiet(False) +pc = h.ParallelContext() + + +class Cell: + def __init__(self, id): + self.id = id + self.secs = [h.Section(name="d_" + str(i), cell=self) for i in range(3)] + s = self.secs + for i in range(1, len(s)): + s[i].connect(s[i - 1](1)) + s[i].nseg = 11 + s[i].insert("hh") + + def __str__(self): + return "Cell_" + str(self.id) + + +def prroots(): + print("prroots") + sr = h.SectionList() + sr.allroots() + for s in sr: + print(s) + + +def prpart(): + for ith in range(pc.nthread()): + sl = pc.get_partition(ith) + for sec in sl: + print(ith, sec, sec.cell().id) + + +def assertpart(parts="default"): + if str(parts) == "default": # not round-robin but root order + roots = h.SectionList() + roots.allroots() + roots = [root for root in roots] + i = 0 + for ith in range(pc.nthread()): + sl = pc.get_partition(ith) + for sec in sl: + assert sec == roots[i] + i += 1 + else: # equal to the parts + assert len(parts) == pc.nthread() + for ith in range(pc.nthread()): + sl = pc.get_partition(ith) + a = [sec for sec in pc.get_partition(ith)] + b = [sec for sec in parts[ith]] + assert a == b + + +def test_default(): + assertpart("default") + + cells = [Cell(i) for i in range(5)] + assertpart("default") + + pc.nthread(3) + assertpart("default") + + pc.nthread(2) + assertpart("default") + + +def test_parts(): + cells = [Cell(i) for i in range(10)] + r = h.Random() + r.Random123(1, 0, 0) + nt = 3 + pc.nthread(nt) + r.discunif(0, nt - 1) + parts = [h.SectionList() for _ in range(nt)] + for cell in cells: + parts[int(r.repick())].append(cell.secs[0]) + for i in range(nt): + pc.partition(i, parts[i]) + assertpart(parts) + + def run(tstop): + pc.thread_ctime() # all theads 0 + pc.set_maxstep(10) + h.finitialize(-65) + pc.psolve(tstop) + + run(20) + print("ith ncell thread_ctime") + for ith in range(pc.nthread()): + print(ith, len([1 for _ in parts[ith]]), pc.thread_ctime(ith)) + + +if __name__ == "__main__": + test_default() + test_parts() diff --git a/test/hoctests/vardimtests/test2.py b/test/hoctests/vardimtests/test2.py index 0f20505b9e..7801faa702 100644 --- a/test/hoctests/vardimtests/test2.py +++ b/test/hoctests/vardimtests/test2.py @@ -34,3 +34,30 @@ h("""objref oarray[4][4]""") # cannot reach line we want because of earlier array check expect_err("h.oarray[2] = []") + + +# test assignment/evaluation of mod file array variable from python +s = h.Section() +s.nseg = 3 +s.insert("atst") +for seg in s: + ar = seg.atst.arrayrng + assert len(ar) == 4 + for i in range(len(ar)): + ar[i] = i + seg.x + seg.arrayrng_atst[i] = ar[i] +for seg in s: + ar = seg.atst.arrayrng + for i in range(len(ar)): + assert ar[i] == i + seg.x + assert seg.arrayrng_atst[i] == i + seg.x + +ar = s.arrayrng_atst +assert len(ar) == 4 +for i in range(len(ar)): + ar[i] = i +for i in range(len(ar)): + assert ar[i] == float(i) + +expect_err("print(ar[10])") +expect_err("ar[10] = 1") diff --git a/test/nmodl/test_random.py b/test/nmodl/test_random.py new file mode 100644 index 0000000000..75a6ca093a --- /dev/null +++ b/test/nmodl/test_random.py @@ -0,0 +1,129 @@ +from math import isclose + +from neuron import h + +pc = h.ParallelContext() + + +def model(): + pc.gid_clear() + for s in h.allsec(): + h.delete_section(sec=s) + s = h.Section() + s.L = 10 + s.diam = 10 + s.insert("hh") + ic = h.IClamp(s(0.5)) + ic.delay = 0.1 + ic.dur = 0.1 + ic.amp = 0.5 * 0 + syn = h.ExpSyn(s(0.5)) + nc = h.NetCon(None, syn) + nc.weight[0] = 0.001 + return {"s": s, "ic": ic, "syn": syn, "nc": nc} + + +def test_netstim_noise(): + cells = {gid: (h.NetStim()) for gid in range(pc.id(), 5, pc.nhost())} + for gid, cell in cells.items(): + pc.set_gid2node(gid, pc.id()) + pc.cell(gid, h.NetCon(cell, None)) + + cell.interval = gid + 1 + cell.number = 100 + cell.start = 0 + cell.noise = 1 + + # Initialize RANDOM variable ids and initial sequence. + cell.ranvar.set_ids(gid, 2, 3).set_seq(0) + + spiketime = h.Vector() + spikegid = h.Vector() + pc.spike_record(-1, spiketime, spikegid) + + pc.set_maxstep(10) + tstop = 5 + + h.finitialize() + pc.psolve(tstop) + + spiketime_result = spiketime.c() + spikegid_result = spikegid.c() + + spikegid_ref = [ + 1.0, + 0.0, + 2.0, + 0.0, + 0.0, + 0.0, + 4.0, + 3.0, + 0.0, + 4.0, + 1.0, + 0.0, + 0.0, + 2.0, + 2.0, + ] + spiketime_ref = [ + 0.038647213491710304, + 0.08268113588796304, + 0.5931985927363619, + 0.7687313066056471, + 0.867367543646173, + 1.1822988033563793, + 1.3476448598895432, + 1.748395215773899, + 1.9382702939333631, + 2.3381219031177376, + 2.9858151911753863, + 3.3721447007688603, + 3.4714402733585277, + 4.130940076465841, + 4.406639959683753, + ] + + # check if gid and spike time matches + for i in range(int(spiketime_result.size())): + assert spikegid_ref[i] == spikegid_result[i] and isclose( + spiketime_ref[i], spiketime_result[i] + ) + + +def test_random(): + pc.gid_clear() + ncell = 10 + cells = [] + gids = range(pc.id(), ncell, pc.nhost()) + rng = [] + for gid in gids: + pc.set_gid2node(gid, pc.id()) + cell = h.NetStim() + + cell.ranvar.set_ids(gid, 2, 3).set_seq(0) + + r = cell.erand() + rng.append(r) + + rng_ref = [ + 0.08268113588796304, + 0.019323606745855152, + 0.19773286424545394, + 0.4370988039434747, + 0.26952897197790865, + 0.27785183823847076, + 1.4834024918566038, + 1.1439786359830195, + 0.13094521398166833, + 0.3743746759204156, + ] + + for x, y in zip(rng, rng_ref): + assert isclose(x, y) + + +if __name__ == "__main__": + test_netstim_noise() + test_random() diff --git a/test/pyinit/CMakeLists.txt b/test/pyinit/CMakeLists.txt new file mode 100644 index 0000000000..824ac03bde --- /dev/null +++ b/test/pyinit/CMakeLists.txt @@ -0,0 +1,331 @@ +# Make sure that exit codes are propagated as expected, and that the expected number of arguments is +# processed. Start out with checking that the right amount of code is executed, and that error codes +# are propagated. Here, we don't bother to duplicate for all explicit/implicit Python versions. +nrn_add_test_group( + NAME pyinit + MODFILE_PATTERNS NONE # no special, for now + SIM_DIRECTORY test/pyinit) +set(nrniv_args_common -notatty) +set(nrniv_args ${nrniv_args_common}) +# Passing a .py file without -python takes a different code path to passing a .py file *with* +# -python +nrn_add_test( + GROUP pyinit + NAME nrniv_script.py + COMMAND nrniv ${nrniv_args} do_nothing.py + SCRIPT_PATTERNS do_nothing.py) +set_property(TEST pyinit::nrniv_script.py PROPERTY PASS_REGULAR_EXPRESSION + "[\r\n]doing nothing[\r\n]") +# If the script returns an error code, nrniv should return that code too +nrn_add_test( + GROUP pyinit + NAME nrniv_script.py_error + COMMAND nrniv ${nrniv_args} assert_false.py + SCRIPT_PATTERNS assert_false.py) +set_property(TEST pyinit::nrniv_script.py_error PROPERTY WILL_FAIL ON) +# Passing two .py files *without* -python executes both of them, but that behaviour is not really +# documented and should perhaps be deprecated due to ambiguity about how to match python's semantics +# for sys.path. +nrn_add_test( + GROUP pyinit + NAME nrniv_two_scripts.py + COMMAND nrniv ${nrniv_args} do_nothing.py do_nothing.py + SCRIPT_PATTERNS do_nothing.py) +set_property(TEST pyinit::nrniv_two_scripts.py + PROPERTY PASS_REGULAR_EXPRESSION "[\r\n]doing nothing[\r\n]doing nothing[\r\n]") +# Unfortunately we can't easily add CTest tests that check exit codes *and* regex output. So we just +# run the tests twice. +foreach(check code output) + nrn_add_test( + GROUP pyinit + NAME nrniv_two_scripts.py_error_check_${check} + COMMAND nrniv ${nrniv_args} do_nothing.py assert_false.py + SCRIPT_PATTERNS do_nothing.py assert_false.py) + if(check STREQUAL code) + set(property WILL_FAIL ON) + else() + set(property PASS_REGULAR_EXPRESSION "[\r\n]doing nothing[\r\n]") + endif() + set_property(TEST pyinit::nrniv_two_scripts.py_error_check_${check} PROPERTY ${property}) +endforeach() +# When we pass -python then we have the option of passing a .py script file or a -c command; first +# test -c +nrn_add_test( + GROUP pyinit + NAME nrniv_python_command + COMMAND nrniv ${nrniv_args} -python -c "print('foo')") +set_property(TEST pyinit::nrniv_python_command PROPERTY PASS_REGULAR_EXPRESSION "[\r\n]foo[\r\n]") +nrn_add_test( + GROUP pyinit + NAME nrniv_python_command_error + COMMAND nrniv ${nrniv_args} -python -c "assert False") +set_property(TEST pyinit::nrniv_python_command_error PROPERTY WILL_FAIL ON) +# Then test -python foo.py +nrn_add_test( + GROUP pyinit + NAME nrniv_python_script.py + COMMAND nrniv ${nrniv_args} -python do_nothing.py + SCRIPT_PATTERNS do_nothing.py) +set_property(TEST pyinit::nrniv_python_script.py PROPERTY PASS_REGULAR_EXPRESSION + "[\r\n]doing nothing[\r\n]") +nrn_add_test( + GROUP pyinit + NAME nrniv_python_script.py_error + COMMAND nrniv ${nrniv_args} -python assert_false.py + SCRIPT_PATTERNS assert_false.py) +set_property(TEST pyinit::nrniv_python_script.py_error PROPERTY WILL_FAIL ON) +# With -python then NEURON has the same semantics as python, i.e only one file.py or -c "code" +# argument is processed and the remainder is just passed as arguments. So asserting False in a +# second -c should not cause a non-zero exit code. +nrn_add_test( + GROUP pyinit + NAME nrniv_python_command_twice + COMMAND nrniv ${nrniv_args} -python -c "print('hello')" -c "assert False") +set_property(TEST pyinit::nrniv_python_command_twice PROPERTY PASS_REGULAR_EXPRESSION + "[\r\n]hello[\r\n]") +nrn_add_test( + GROUP pyinit + NAME nrniv_python_script.py_twice + COMMAND nrniv ${nrniv_args} -python do_nothing.py assert_false.py + SCRIPT_PATTERNS do_nothing.py assert_false.py) +set_property(TEST pyinit::nrniv_python_script.py_twice PROPERTY PASS_REGULAR_EXPRESSION + "[\r\n]doing nothing[\r\n]") +string( + JOIN + "$ " + python_cmd + "import sys" + "actual = sys.argv[-2:]" + "print(actual)" + "assert actual == ['foo', 'bar']") +nrn_add_test( + GROUP pyinit + NAME nrniv_python_command_args + COMMAND nrniv ${nrniv_args} -python -c "${python_cmd}" foo bar) +nrn_add_test( + GROUP pyinit + NAME nrniv_python_script.py_args + COMMAND nrniv ${nrniv_args} -python assert_last_args_are_foo_and_bar.py foo bar + SCRIPT_PATTERNS assert_last_args_are_foo_and_bar.py) +# With -nopython then there should be a dummy PythonObject implementation, and Python-related +# settings should be ignored +set(PythonObject_cmd -c "objref x" -c "x=new PythonObject()") +nrn_add_test( + GROUP pyinit + NAME nrniv_nopython_PythonObject + COMMAND nrniv ${nrniv_args} -nopython ${PythonObject_cmd}) +nrn_add_test( + GROUP pyinit + NAME nrniv_nopython_PythonObject_pyexe + COMMAND nrniv ${nrniv_args} -nopython -pyexe /deep/thought ${PythonObject_cmd}) +nrn_add_test( + GROUP pyinit + NAME nrniv_nopython_PythonObject_pyenv + COMMAND NRN_PYTHONVERSION=life NRN_PYLIB=the-universe NRN_PYTHONEXE=and-everything nrniv + ${nrniv_args} -nopython ${PythonObject_cmd}) +# +# * nrniv -pyexe /path/to/python -python ... +# * NRN_PYTHONEXE=... nrniv -python ... +# * PATH=nothing/with/python/init nrniv -python ... +# * python something_importing_neuron.py +# * python -c "import neuron; other stuff" +# * nrniv script_using_multiprocessing.py +# * special -python -c "import neuron" [where /path/to/special = {venv_root}/some_name/special] +# +# plus variants of the above when a virtual environment is active, or an explicit path to a virtual +# environment python is given. +# +# these should assert that the variants launched with `python` and `nrniv` end up with consistent +# values of `sys.path`, `sys.[base_][exec_]prefix`, `sys.std{err,in,out}` encoding and Python +# version number. If we are building (with dynamic Python) for multiple Python versions, replicate +# the tests for all of them. TODO use NRN_PYTHON_EXTRA_FOR_TESTS. +foreach(val RANGE ${NRN_PYTHON_COUNT}) + # val = 0 .. NRN_PYTHON_COUNT-1 means explicitly use that Python; val = NRN_PYTHON_COUNT means + # don't specify and check we get the expected default. + set(nrniv_args ${nrniv_args_common}) + if(val EQUAL NRN_PYTHON_COUNT) + # Figure out which Python we expect nrniv -python to pick up in these tests. First, for dynamic + # Python: + # + # * we know we're not passing -pyexe here + # * we know(?) that NRN_PYTHONEXE is not set + # * the order of precedence is: python python3 pythonX0.Y0 .. pythonXn.Yn + # * disable the tests if that yields nothing + # + # python3.999 is to make sure we test skipping names in the search path that don't exist. + # Secondly, if dynamic Python is disabled, then we know it will use the default. + if(NRN_ENABLE_PYTHON_DYNAMIC) + set(search_names python python3 python3.999) + foreach(val2 RANGE ${NRN_PYTHON_ITERATION_LIMIT}) + list(GET NRN_PYTHON_VERSIONS ${val2} pyver) + list(APPEND search_names python${pyver}) + endforeach() + unset(pyexe) + foreach(candidate ${search_names}) + nrn_find_python(NAME "${candidate}" PREFIX nrnpy) + if(nrnpy_EXECUTABLE STREQUAL "nrnpy_EXECUTABLE-NOTFOUND") + # Not an error if a user's system doesn't have all the names in ${search_names} + continue() + endif() + set(nrnpy_VERSION "${nrnpy_VERSION_MAJOR}.${nrnpy_VERSION_MINOR}") + if(NOT nrnpy_VERSION IN_LIST NRN_PYTHON_VERSIONS) + # e.g. python or python3 points to a version we didn't build against + continue() + endif() + # Right, so the default should be nrnpy_EXECUTABLE, which is nrnpy_VERSION + set(pyexe "${nrnpy_EXECUTABLE}") + set(pyver "${nrnpy_VERSION}") + break() + endforeach() + else() + # non-dynamic Python, so nrniv -python will always use this. -pyexe in this case just steers + # the value of sys.executable, but we're not passing that here anyway. + set(pyexe "${NRN_DEFAULT_PYTHON_EXECUTABLE}") + set(pyver "${NRN_DEFAULT_PYTHON_VERSION}") + endif() + if(NOT DEFINED pyexe) + message( + WARNING "Couldn't figure out what Python version nrniv -python will choose by default") + # Skip these tests + continue() + endif() + set(pyexe_name "def") + else() + list(GET NRN_PYTHON_EXECUTABLES ${val} pyexe) + list(GET NRN_PYTHON_VERSIONS ${val} pyver) + list(APPEND nrniv_args -pyexe "${pyexe}") + set(pyexe_name "${pyver}") + endif() + set(nrnivpy nrniv_py${pyexe_name}) + # Now move on to checking things about which Python is actually used. First, make sure the version + # matches our expectation. + string( + JOIN + "$ " + python_cmd + "import sys" + "actual = '{}.{}'.format(*sys.version_info[:2])" + "print(actual)" + "assert actual == '${pyver}'") + nrn_add_test( + GROUP pyinit + NAME ${nrnivpy}_python_command_version_check + COMMAND nrniv ${nrniv_args} -python -c "${python_cmd}") + # Now check that some other Python things match between `nrniv -python` and `${exe}` directly + foreach( + attr + # executable # seems tricky to get exact matches portably, maybe replace with a looser "they all + # look like python not nrniv/special" check + path + prefix + exec_prefix + base_prefix + base_exec_prefix + stderr.encoding + stdin.encoding + stdout.encoding) + # Make sure that sys.${attr} matches when we run `python a.py` and `nrniv -python a.py` + nrn_add_test( + GROUP pyinit + NAME ${nrnivpy}_python_check_sys_${attr} + PRECOMMAND "${pyexe}" dump_sys_attr.py ref.json ${attr} + COMMAND nrniv ${nrniv_args} -python check_sys_attr.py ref.json ${attr} + SCRIPT_PATTERNS check_sys_attr.py dump_sys_attr.py) + # Replicate this with nrnpython(...) inside a .hoc script too. This checks that the Python + # environment inside nrnpython(...) inside foo.hoc is the same as in foo.py -- which seems like + # a reasonable choice -- except for path, which has an explicit exception documented in (for + # now) hoc_moreinput -- there nrnpython(...) inside foo.hoc sees sys.path[0] == '' for backward + # compatibility reasons. + nrn_add_test( + GROUP pyinit + NAME ${nrnivpy}_nrnpython_check_sys_${attr} + PRECOMMAND "${pyexe}" dump_sys_attr.py override_sys_path_0_to_be_empty ref.json ${attr} + COMMAND nrniv ${nrniv_args} -c "strdef attr, fname" -c "attr=\"${attr}\"" -c + "fname=\"ref.json\"" check_sys_attr.hoc + SCRIPT_PATTERNS check_sys_attr.hoc dump_sys_attr.py) + endforeach() + # Make sure that we can import neuron + nrn_add_test( + GROUP pyinit + NAME ${nrnivpy}_python_command_import_neuron + COMMAND nrniv ${nrniv_args} -python -c "import neuron") + nrn_add_test( + GROUP pyinit + NAME py${pyexe_name}_command_import_neuron + COMMAND "${pyexe}" -c "import neuron" + PRELOAD_SANITIZER) + # Check using Python from HOC using nrnpython(...). The only reasonable approach here seems to be + # that sys.path should match the code calling nrnpython(...), i.e. + # + # * foo.hoc or foo.py: sys.path[0] should be the directory containing the script, with symlinks + # resolved + # * nrniv -c "hoc_code_including_nrnpython" or nrniv -python -c "from neuron import h; + # h.nrnpython(...)": sys.path[0] should be an empty string + # + nrn_add_test( + GROUP pyinit + NAME ${nrnivpy}_hoc_command_nrnpython + COMMAND nrniv ${nrniv_args} -c "nrnpython(\"import sys$ print(sys.path)\")") +endforeach() +if(NRN_PYTHON_EXTRA_FOR_TESTS) + # Run some tests using Python versions that are installed on the system, but which NEURON is *not* + # built with support for. + foreach(val RANGE ${NRN_PYTHON_EXTRA_FOR_TESTS_ITERATION_LIMIT}) + list(GET NRN_PYTHON_EXTRA_FOR_TESTS_EXECUTABLES ${val} pyexe) + list(GET NRN_PYTHON_EXTRA_FOR_TESTS_VERSIONS ${val} pyver) + # As above, we can't easily add CTest tests that check exit codes *and* regex output. So we just + # run the tests twice. + foreach(check code output) + # Should not be able to import NEURON + nrn_add_test( + GROUP pyinit + NAME python${pyver}_import_fail_check_${check} + COMMAND "${pyexe}" -c "import neuron" + PRELOAD_SANITIZER) + if(check STREQUAL code) + set(properties WILL_FAIL ON) + else() + # output + string(REPLACE "." "\\." pyver_escaped "${pyver}") + set(properties + PASS_REGULAR_EXPRESSION + "Python ${pyver_escaped} is not supported by this NEURON installation \\(supported:") + endif() + set_tests_properties(pyinit::python${pyver}_import_fail_check_${check} + PROPERTIES ${properties}) + # Telling nrniv to use this Python should fail if dynamic Python is enabled. TODO: figured out + # the desired behaviour for non-dynamic Python. + if(NRN_ENABLE_PYTHON_DYNAMIC) + nrn_add_test( + GROUP pyinit + NAME nrniv_py${pyver}_import_fail_check_${check} + COMMAND nrniv ${nrniv_args_common} -pyexe ${pyexe} -python -c "import neuron") + # Setting the full set of environment variables (NRN_PYLIB, NRN_PYTHONEXE and + # NRN_PYTHONVERSION) will prevent nrnpyenv.sh from being run, but we should still get an + # error if NRN_PYTHONVERSION is not valid for this NEURON. + execute_process( + COMMAND "${pyexe}" -c "import sysconfig; print(sysconfig.get_config_var('LIBDIR'))" + RESULT_VARIABLE code + OUTPUT_VARIABLE libdir + OUTPUT_STRIP_TRAILING_WHITESPACE) + set(pylib + "${libdir}/${CMAKE_SHARED_LIBRARY_PREFIX}python${pyver}${CMAKE_SHARED_LIBRARY_SUFFIX}") + if(code OR NOT EXISTS "${pylib}") + message( + WARNING + "Could not identify libpythonX.Y for NRN_PYTHON_EXTRA_FOR_TESTS test (tried ${pylib})" + ) + continue() + endif() + nrn_add_test( + GROUP pyinit + NAME nrniv_py${pyver}_forced_from_env_check_${check} + COMMAND NRN_PYTHONVERSION=${pyver} "NRN_PYLIB=${pylib}" "NRN_PYTHONEXE=${pyexe}" nrniv + -python -c "import neuron") + set_tests_properties( + pyinit::nrniv_py${pyver}_import_fail_check_${check} + pyinit::nrniv_py${pyver}_forced_from_env_check_${check} PROPERTIES ${properties}) + endif() + endforeach() + endforeach() +endif() diff --git a/test/pyinit/assert_false.py b/test/pyinit/assert_false.py new file mode 100644 index 0000000000..85f740c8ff --- /dev/null +++ b/test/pyinit/assert_false.py @@ -0,0 +1,2 @@ +print("About to assert False") +assert False diff --git a/test/pyinit/assert_last_args_are_foo_and_bar.py b/test/pyinit/assert_last_args_are_foo_and_bar.py new file mode 100644 index 0000000000..9666c727bc --- /dev/null +++ b/test/pyinit/assert_last_args_are_foo_and_bar.py @@ -0,0 +1,3 @@ +import sys + +assert sys.argv[-2:] == ["foo", "bar"] diff --git a/test/pyinit/check_sys_attr.hoc b/test/pyinit/check_sys_attr.hoc new file mode 100644 index 0000000000..534f7882f2 --- /dev/null +++ b/test/pyinit/check_sys_attr.hoc @@ -0,0 +1,13 @@ +{nrnpython("from functools import reduce")} +{nrnpython("import json")} +{nrnpython("import sys")} +strdef setattr, setfname +{sprint(setattr, "attr='%s'", attr)} +{sprint(setfname, "fname='%s'", fname)} +{nrnpython(setattr)} +{nrnpython(setfname)} +{nrnpython("data = reduce(getattr, [sys] + attr.split('.'))")} +{nrnpython("print('checking sys.{} = {} against reference file {}'.format(attr, data, fname))")} +{nrnpython("with open(fname) as ifile: ref_data = json.load(ifile)")} +{nrnpython("print('reference value is {}'.format(ref_data))")} +quit(!nrnpython("assert data == ref_data")) diff --git a/test/pyinit/check_sys_attr.py b/test/pyinit/check_sys_attr.py new file mode 100644 index 0000000000..5099f1c428 --- /dev/null +++ b/test/pyinit/check_sys_attr.py @@ -0,0 +1,11 @@ +from functools import reduce +import json +import sys + +fname, attr = sys.argv[-2:] +data = reduce(getattr, [sys] + attr.split(".")) +print("checking sys.{} = {} against reference file {}".format(attr, data, fname)) +with open(fname) as ifile: + ref_data = json.load(ifile) +print("reference value is {}".format(ref_data)) +assert data == ref_data diff --git a/test/pyinit/do_nothing.py b/test/pyinit/do_nothing.py new file mode 100644 index 0000000000..4d16db430d --- /dev/null +++ b/test/pyinit/do_nothing.py @@ -0,0 +1 @@ +print("doing nothing") diff --git a/test/pyinit/dump_sys_attr.py b/test/pyinit/dump_sys_attr.py new file mode 100644 index 0000000000..13f3da67c3 --- /dev/null +++ b/test/pyinit/dump_sys_attr.py @@ -0,0 +1,11 @@ +from functools import reduce +import json +import sys + +fname, attr = sys.argv[-2:] +data = reduce(getattr, [sys] + attr.split(".")) +if attr == "path" and sys.argv[-3] == "override_sys_path_0_to_be_empty": + data[0] = "" +print("dumping sys.{} = {} to {}".format(attr, data, fname)) +with open(fname, "w") as ofile: + json.dump(data, ofile) diff --git a/test/pynrn/test_units.py b/test/pynrn/test_units.py deleted file mode 100644 index 41092ef5f5..0000000000 --- a/test/pynrn/test_units.py +++ /dev/null @@ -1,101 +0,0 @@ -from neuron import h - - -def switch_units(legacy): - try: - h.nrnunit_use_legacy(legacy) - except: - pass - - -def test_mod_legacy(): - s = h.Section() - ut = h.UnitsTest(s(0.5)) - h.ion_style("na_ion", 1, 2, 1, 1, 0, sec=s) - switch_units(1) - h.finitialize() - names = ["mole", "e", "faraday", "planck", "hbar", "gasconst"] - legacy_hex_values = [ - "0x1.fe18fef60659ap+78", - "0x1.7a4e7164efbbcp-63", - "0x1.78e54cccccccdp+16", - "0x1.b85f8c5445f02p-111", - "0x1.18779454e3d48p-113", - "0x1.0a10624dd2f1bp+3", - ] - for i, n in enumerate(names): - val = eval("ut." + n) - print("%s = %s (%s)" % (n, str(val.hex()), str(val))) - legacy_value = float.fromhex(legacy_hex_values[i]) - assert val == legacy_value - assert ut.avogadro == float.fromhex(legacy_hex_values[0]) - ghk_std = h.ghk(-50, 0.001, 10, 2) - erev_std = h.nernst(s(0.5).nai, s(0.5).nao, 1) - assert ut.ghk == ghk_std - assert ut.erev == erev_std - switch_units(0) - h.finitialize() - ghk_std = h.ghk(-50, 0.001, 10, 2) - erev_std = h.nernst(s(0.5).nai, s(0.5).nao, 1) - assert ut.mole != float.fromhex(legacy_hex_values[0]) - assert ut.e * ut.avogadro == ut.faraday - assert abs(ut.faraday - h.FARADAY) < 1e-10 - assert ut.gasconst == h.R - assert ut.k * ut.avogadro == ut.gasconst - assert abs(ut.planck - ut.hbar * 2.0 * h.PI) < 1e-49 - assert ut.avogadro == h.Avogadro_constant - assert ut.ghk == h.ghk(-50, 0.001, 10, 2) - assert ut.erev == h.nernst(s(0.5).nai, s(0.5).nao, 1) - - -def test_hoc_legacy(): - switch_units(1) # legacy - print("R = %s" % str(h.R)) - print("FARADAY = %s" % str(h.FARADAY)) - celsius = 6.3 - ghk = h.ghk(30, 0.01, 10, 1) # nernst requires a Section to get voltage - print("ghk = %s" % str(ghk)) - - assert h.R == 8.31441 - assert h.FARADAY == 96485.309 - assert ghk == -483.7914803097116 - - switch_units(0) # Modern - print("R = %s" % str(h.R)) - print("FARADAY = %s" % str(h.FARADAY)) - ghk = h.ghk(30, 0.01, 10, 1) - print("ghk = %s" % str(ghk)) - assert h.R == 8.31446261815324 - assert ghk == -483.8380971405879 - - -def test_env_legacy(): - import os, subprocess, sys - - for i in [0, 1]: - exe = os.environ.get("NRN_PYTHON_EXECUTABLE", sys.executable) - env = os.environ.copy() - env["NRNUNIT_USE_LEGACY"] = str(i) - try: - env[os.environ["NRN_SANITIZER_PRELOAD_VAR"]] = os.environ[ - "NRN_SANITIZER_PRELOAD_VAL" - ] - except: - pass - a = subprocess.check_output( - [ - exe, - "-c", - "from neuron import h; print(h.nrnunit_use_legacy())", - ], - env=env, - shell=False, - ) - a = int(float(a.decode().split()[0])) - assert a == i - - -if __name__ == "__main__": - test_mod_legacy() - test_hoc_legacy() - test_env_legacy() diff --git a/share/demo/release/mcna.mod b/test/pytest/mcna.mod old mode 100755 new mode 100644 similarity index 100% rename from share/demo/release/mcna.mod rename to test/pytest/mcna.mod diff --git a/test/pytest/test_nrntest_thread.json b/test/pytest/test_nrntest_thread.json new file mode 100644 index 0000000000..abf11443ea --- /dev/null +++ b/test/pytest/test_nrntest_thread.json @@ -0,0 +1,617 @@ +{ + "mcna": { + "Cell[0]": { + "ina": [ + -0.0014729768894164211, + -0.0014729768894164211, + -0.0022466404230051043, + -0.0033716528915311234, + -0.004665802645758657, + -0.006125902682920101, + -0.007751989788858758, + -0.009538042601053821, + -0.011473240419943817, + -0.01354346508291712, + -0.0157325108677103, + -0.01802302911054964, + -0.02039725508721644, + -0.02283755802107442, + -0.025326848213157328, + -0.027848869447458968, + -0.030388399914848506, + -0.03293138078412269, + -0.03546498810775662, + -0.03797766087910741, + -0.04045909566829222, + -0.04290021627983071, + -0.04529312523178993, + -0.04763104249869352, + -0.04990823584231545, + -0.052119946136303164, + -0.054262310339140536, + -0.05633228415727357, + -0.05832756594274856, + -0.06024652296770067, + -0.06208812089493223, + -0.06385185700579447, + -0.06553769754207914, + -0.06714601935800396, + -0.06867755595362493, + -0.07013334786547111, + -0.07151469731835476, + -0.07282312698959305, + -0.0740603426995179, + -0.0752281998170374, + -0.07632867315359483, + -0.077363830111055, + -0.07833580684712281, + -0.07924678722447406, + -0.08009898431572314, + -0.08089462424475835, + -0.08163593215510774, + -0.08232512010727784, + -0.08296437671896946, + -0.08355585837435985, + -0.0841016818409673, + -0.08460391814477024, + -0.08506458756607693, + -0.08548565563002045, + -0.08586902997640126, + -0.08621655800386809, + -0.08653002519308065, + -0.08681115402252573, + -0.08706160339905616, + -0.08728296853300385, + -0.0874767811948981, + -0.08764451029742255, + -0.08778756275229467, + -0.08790728455728188, + -0.08800496207360468, + -0.08808182345855674, + -0.08813904022232041, + -0.08817772888170948, + -0.08819895268695509, + -0.08820372340069767, + -0.08819300311108356, + -0.08816770606331578, + -0.08812870049619911, + -0.08807681047217454, + -0.08801281769107602, + -0.08793746327938595, + -0.08785144954813254, + -0.08775544171377833, + -0.08765006957751202, + -0.08753592915928894, + -0.08741358428378122, + -0.08728356811611229, + -0.08714638464586752, + -0.08700251011840972, + -0.0868523944129883, + -0.08669646236752636, + -0.0865351150503063, + -0.0863687309790586, + -0.08619766728819704, + -0.08602226084514124, + -0.08584282931682993, + -0.08565967218765874, + -0.08547307173018064, + -0.08528329392998621, + -0.0850905893662401, + -0.08489519404939158, + -0.08469733021760183, + -0.08449720709344362, + -0.08429502160242981, + -0.08409095905491834, + -0.08388519379292485, + -0.08367788980335068, + -0.08346920129910489, + -0.08325927326956531, + -0.08304824200178684, + -0.08283623557382472, + -0.08262337432149866, + -0.08240977127987992, + -0.08219553260073849, + -0.0819807579471422, + -0.08176554086635397, + -0.08154996914212757, + -0.0813341251274574, + -0.08111808605879268, + -0.08090192435268291, + -0.08068570788577802, + -0.08046950025906499, + -0.08025336104718163, + -0.0800373460336088, + -0.07982150743250349, + -0.07960589409789846, + -0.07939055172095864, + -0.07917552301594914, + -0.07896084789553742, + -0.07874656363602066, + -0.0785327050330376, + -0.07831930454829669, + -0.0781063924478236, + -0.07789399693220432, + -0.07768214425927585, + -0.07747085885969097, + -0.0772601634457617, + -0.07705007911396335, + -0.07684062544146072, + -0.076631820576998, + -0.07642368132647506, + -0.07621622323351547, + -0.0760094606553135, + -0.07580340683403311, + -0.07559807396401466, + -0.07539347325503246, + -0.07518961499183144, + -0.07498650859015882, + -0.07478416264949436, + -0.07458258500267123, + -0.07438178276256836, + -0.07418176236604523, + -0.07398252961527993, + -0.07378408971666221, + -0.07358644731738455, + -0.07338960653986598, + -0.07319357101413576, + -0.07299834390829656, + -0.07280392795717959, + -0.07261032548929851, + -0.07241753845220139, + -0.07222556843631571, + -0.07203441669737452, + -0.0718440841775078, + -0.07165457152507732, + -0.07146587911332952, + -0.07127800705793594, + -0.0710909552334868, + -0.07090472328899987, + -0.07071931066250264, + -0.07053471659474264, + -0.07035094014207757, + -0.07016798018859371, + -0.06998583545749824, + -0.06980450452182889, + -0.06962398581452069, + -0.06944427763786873, + -0.06926537817242216, + -0.06908728548534337, + -0.06890999753826428, + -0.06873351219466929, + -0.06855782722683332, + -0.06838294032234102, + -0.06820884909021245, + -0.06803555106665837, + -0.06786304372048708, + -0.0676913244581839, + -0.0675203906286825, + -0.06735023952784634, + -0.06718086840267791, + -0.06701227445527147, + -0.06684445484652486, + -0.0666774066996248, + -0.06651112710331897, + -0.06634561311498768, + -0.06618086176352726, + -0.06601687005205599, + -0.06585363496045363, + -0.06569115344774418, + -0.06552942245433134, + -0.06536843890409547, + -0.06520819970636033, + -0.06504870175773722, + -0.06488994194385442, + -0.06473191714097774, + -0.06457462421752995 + ] + }, + "Cell[9]": { + "ina": [ + -0.0014729768894164211, + -0.0014729768894164211, + -0.019174648087849844, + -0.04235808873219049, + -0.08023512559936206, + -0.12458081872272934, + -0.1708445388989667, + -0.21589347642713883, + -0.25763306016894516, + -0.29482965930710403, + -0.3269064870781198, + -0.35374935575540517, + -0.3755463014070531, + -0.39266599372220595, + -0.4055709003345621, + -0.4147583019945483, + -0.42072221435613744, + -0.4239303141862922, + -0.42481125539828357, + -0.42374894308068695, + -0.4210812988930837, + -0.41710179097459665, + -0.4120625472289518, + -0.40617826312428057, + -0.39963039168063325, + -0.3925712950538585, + -0.38512816792928295, + -0.3774066305854567, + -0.36949394692584964, + -0.3614618593040726, + -0.3533690541959858, + -0.34526328535950496, + -0.33718318732952857, + -0.32915981418268786, + -0.3212179380286283, + -0.3133771396964279, + -0.30565272129182797, + -0.2980564671646288, + -0.2905972766388642, + -0.28328168879898874, + -0.27611431679332105, + -0.2690982065600178, + -0.2622351326163468, + -0.25552584157376124, + -0.24897025233203074, + -0.24256762044175242, + -0.2363166728797019, + -0.2302157184291132, + -0.22426273797167243, + -0.2184554582563413, + -0.21279141209093275, + -0.20726798738695992, + -0.20188246706038016, + -0.19663206143635553, + -0.1915139345130368, + -0.18652522519740716, + -0.1816630644267555, + -0.17692458892513024, + -0.17230695220905928, + -0.16780733334585524, + -0.16342294387671355, + -0.15915103324207322, + -0.15498889298542645, + -0.1509338599615517, + -0.1469833187340146, + -0.14313470331311015, + -0.1393854983578601, + -0.13573323994313202, + -0.1321755159745026, + -0.12870996631840367, + -0.12533428270275562, + -0.12204620843320892, + -0.11884353796187012, + -0.11572411633865093, + -0.1126858385698669, + -0.10972664890421167, + -0.10684454006254768, + -0.1040375524249465, + -0.10130377318594673, + -0.09864133548698649, + -0.09604841753331744, + -0.09352324170136177, + -0.09106407364136634, + -0.08866922137930801, + -0.08633703442126008, + -0.08406590286282509, + -0.08185425650573981, + -0.07970056398334964, + -0.07760333189631469, + -0.07556110395963221, + -0.07357246016183508, + -0.07163601593703647, + -0.06975042135033922, + -0.0679143602969991, + -0.0661265497156274, + -0.06438573881563074, + -0.06269070831901438, + -0.06104026971661516, + -0.05943326453878117, + -0.057868563640474484, + -0.05634506650073784, + -0.054861700536439253, + -0.05341742043018448, + -0.05201120747226669, + -0.050642068916508104, + -0.04930903734983359, + -0.048011170075405125, + -0.04674754850913802, + -0.0455172775894106, + -0.044319485199774016, + -0.043153321604464195, + -0.042017958896512754, + -0.040912590458252296, + -0.039836430434007954, + -0.03878871321476648, + -0.037768692934612484, + -0.036775642978721226, + -0.03580885550269736, + -0.034867640963048986, + -0.03395132765858745, + -0.033059261282543835, + -0.032190804485194686, + -0.03134533644679053, + -0.030522252460582638, + -0.02972096352574507, + -0.028940895949991368, + -0.02818149096168674, + -0.027442204331259393, + -0.02672250600171652, + -0.026021879728073394, + -0.025339822725505747, + -0.02467584532603903, + -0.024029470643590137, + -0.02340023424718015, + -0.022787683842139302, + -0.02219137895912834, + -0.02161089065080258, + -0.02104580119594888, + -0.020495703810927413, + -0.0199602023682537, + -0.019438911122159196, + -0.018931454440971062, + -0.01843746654615516, + -0.017956591257868897, + -0.01748848174687343, + -0.0170328002926575, + -0.01658921804762812, + -0.016157414807226052, + -0.015737078785826728, + -0.015327906398290177, + -0.014929602047025984, + -0.014541877914442475, + -0.014164453760651364, + -0.013797056726302336, + -0.013439421140424299, + -0.013091288333152637, + -0.012752406453224672, + -0.012422530290127552, + -0.012101421100785678, + -0.011788846440677056, + -0.011484579999270383, + -0.011188401439677038, + -0.010900096242414547, + -0.010619455553180338, + -0.010346276034536857, + -0.010080359721411351, + -0.009821513880315862, + -0.009569550872195054, + -0.009324288018811576, + -0.009085547472580866, + -0.008853156089769231, + -0.008626945306971029, + -0.008406751020782765, + -0.008192413470593891, + -0.007983777124415878, + -0.007780690567672994, + -0.007583006394880148, + -0.007390581104134756, + -0.007203274994351458, + -0.007020952065170169, + -0.006843479919469591, + -0.006670729668419993, + -0.0065025758390106145, + -0.006338896283988653, + -0.006179572094148321, + -0.006024487512909897, + -0.00587352985313026, + -0.005726589416087723, + -0.005583559412585493, + -0.005444335886119337, + -0.005308817638056472, + -0.005176906154773949, + -0.005048505536706111, + -0.00492352242925192, + -0.004801865955494234, + -0.004683447650684243, + -0.004568181398445432, + -0.004455983368652725, + -0.00434677195694332, + -0.004240467725817082, + -0.004136993347285229 + ] + }, + "t": [ + 0.0, + 0.025, + 0.05, + 0.075, + 0.09999999999999999, + 0.12499999999999999, + 0.15, + 0.17500000000000002, + 0.20000000000000004, + 0.22500000000000006, + 0.25000000000000006, + 0.2750000000000001, + 0.3000000000000001, + 0.3250000000000001, + 0.35000000000000014, + 0.37500000000000017, + 0.4000000000000002, + 0.4250000000000002, + 0.45000000000000023, + 0.47500000000000026, + 0.5000000000000002, + 0.5250000000000001, + 0.55, + 0.575, + 0.5999999999999999, + 0.6249999999999998, + 0.6499999999999997, + 0.6749999999999996, + 0.6999999999999995, + 0.7249999999999994, + 0.7499999999999993, + 0.7749999999999992, + 0.7999999999999992, + 0.8249999999999991, + 0.849999999999999, + 0.8749999999999989, + 0.8999999999999988, + 0.9249999999999987, + 0.9499999999999986, + 0.9749999999999985, + 0.9999999999999984, + 1.0249999999999984, + 1.0499999999999983, + 1.0749999999999982, + 1.099999999999998, + 1.124999999999998, + 1.149999999999998, + 1.1749999999999978, + 1.1999999999999977, + 1.2249999999999976, + 1.2499999999999976, + 1.2749999999999975, + 1.2999999999999974, + 1.3249999999999973, + 1.3499999999999972, + 1.3749999999999971, + 1.399999999999997, + 1.424999999999997, + 1.4499999999999968, + 1.4749999999999968, + 1.4999999999999967, + 1.5249999999999966, + 1.5499999999999965, + 1.5749999999999964, + 1.5999999999999963, + 1.6249999999999962, + 1.6499999999999961, + 1.674999999999996, + 1.699999999999996, + 1.7249999999999959, + 1.7499999999999958, + 1.7749999999999957, + 1.7999999999999956, + 1.8249999999999955, + 1.8499999999999954, + 1.8749999999999953, + 1.8999999999999952, + 1.9249999999999952, + 1.949999999999995, + 1.974999999999995, + 1.999999999999995, + 2.024999999999995, + 2.0499999999999954, + 2.0749999999999957, + 2.099999999999996, + 2.1249999999999964, + 2.149999999999997, + 2.174999999999997, + 2.1999999999999975, + 2.224999999999998, + 2.2499999999999982, + 2.2749999999999986, + 2.299999999999999, + 2.3249999999999993, + 2.3499999999999996, + 2.375, + 2.4000000000000004, + 2.4250000000000007, + 2.450000000000001, + 2.4750000000000014, + 2.5000000000000018, + 2.525000000000002, + 2.5500000000000025, + 2.575000000000003, + 2.600000000000003, + 2.6250000000000036, + 2.650000000000004, + 2.6750000000000043, + 2.7000000000000046, + 2.725000000000005, + 2.7500000000000053, + 2.7750000000000057, + 2.800000000000006, + 2.8250000000000064, + 2.8500000000000068, + 2.875000000000007, + 2.9000000000000075, + 2.925000000000008, + 2.950000000000008, + 2.9750000000000085, + 3.000000000000009, + 3.0250000000000092, + 3.0500000000000096, + 3.07500000000001, + 3.1000000000000103, + 3.1250000000000107, + 3.150000000000011, + 3.1750000000000114, + 3.2000000000000117, + 3.225000000000012, + 3.2500000000000124, + 3.275000000000013, + 3.300000000000013, + 3.3250000000000135, + 3.350000000000014, + 3.375000000000014, + 3.4000000000000146, + 3.425000000000015, + 3.4500000000000153, + 3.4750000000000156, + 3.500000000000016, + 3.5250000000000163, + 3.5500000000000167, + 3.575000000000017, + 3.6000000000000174, + 3.6250000000000178, + 3.650000000000018, + 3.6750000000000185, + 3.700000000000019, + 3.725000000000019, + 3.7500000000000195, + 3.77500000000002, + 3.8000000000000203, + 3.8250000000000206, + 3.850000000000021, + 3.8750000000000213, + 3.9000000000000217, + 3.925000000000022, + 3.9500000000000224, + 3.9750000000000227, + 4.000000000000023, + 4.0250000000000234, + 4.050000000000024, + 4.075000000000024, + 4.1000000000000245, + 4.125000000000025, + 4.150000000000025, + 4.175000000000026, + 4.200000000000026, + 4.225000000000026, + 4.250000000000027, + 4.275000000000027, + 4.300000000000027, + 4.325000000000028, + 4.350000000000028, + 4.375000000000028, + 4.400000000000029, + 4.425000000000029, + 4.4500000000000295, + 4.47500000000003, + 4.50000000000003, + 4.5250000000000306, + 4.550000000000031, + 4.575000000000031, + 4.600000000000032, + 4.625000000000032, + 4.650000000000032, + 4.675000000000033, + 4.700000000000033, + 4.725000000000033, + 4.750000000000034, + 4.775000000000034, + 4.8000000000000345, + 4.825000000000035, + 4.850000000000035, + 4.8750000000000355, + 4.900000000000036, + 4.925000000000036, + 4.950000000000037, + 4.975000000000037, + 5.000000000000037 + ] + } +} diff --git a/test/pytest/test_nrntest_thread.py b/test/pytest/test_nrntest_thread.py new file mode 100644 index 0000000000..c86d5ca5fb --- /dev/null +++ b/test/pytest/test_nrntest_thread.py @@ -0,0 +1,116 @@ +""" +Tests that used to live in the thread/ subdirectory of the +https://github.com/neuronsimulator/nrntest repository +""" +import os +import pytest +from neuron import h +from neuron.tests.utils import ( + num_threads, + parallel_context, +) +from neuron.tests.utils.checkresult import Chk + + +@pytest.fixture(scope="module") +def chk(): + """Manage access to JSON reference data.""" + dir_path = os.path.dirname(os.path.realpath(__file__)) + checker = Chk(os.path.join(dir_path, "test_nrntest_thread.json")) + yield checker + # Save results to disk if they've changed; this is called after all tests + # using chk have executed + checker.save() + + +class Cell: + def __init__(self, id, ncell): + self.id = id + self.soma = h.Section(name="soma", cell=self) + self.soma.pt3dclear() + self.soma.pt3dadd(0, 0, 0, 1) + self.soma.pt3dadd(15, 0, 0, 1) + self.soma.L = self.soma.diam = 5.6419 + self.soma.insert("MCna") + for seg in self.soma: + seg.MCna.gnabar = 0.12 + seg.MCna.lp = 1.9 + seg.MCna.ml = 0.75 + seg.MCna.nm = 0.3 + self.sc = h.SEClamp(self.soma(0.5)) + self.sc.dur1 = 100 + self.sc.amp1 = (100 * id / ncell) - 50 + self.ina_vec = h.Vector() + self.ina_vec.record(self.soma(0.5)._ref_ina) + self.tv = h.Vector() + self.tv.record(h._ref_t) + + def __str__(self): + return "Cell[{:d}]".format(self.id) + + def data(self): + return {"ina": list(self.ina_vec), "t": list(self.tv)} + + +# TODO: fix coreneuron compilation and execution of this test, see +# https://github.com/neuronsimulator/nrn/issues/2397. +simulators = ["neuron"] + + +@pytest.mark.parametrize("simulator", simulators) +@pytest.mark.parametrize("threads", [1, 3]) +def test_mcna(chk, simulator, threads): + """ + Derived from nrntest/thread/mcna.hoc, which used to be run with neurondemo. + Old comment "test GLOBAL counter". + """ + tstop = 5 # ms + ncell = 10 + cells = [Cell(id, ncell) for id in range(ncell)] + with parallel_context() as pc, num_threads(pc, threads=threads): + pc.set_maxstep(10) + h.finitialize() + # the nrn_cur kernel gets called once in finitialize, and it calls the + # code that increments cnt1 twice + assert h.cnt1_MCna == 2 * ncell + assert h.cnt2_MCna == 3 + pc.psolve(tstop) + time_steps = round(tstop / h.dt) + assert h.cnt1_MCna == 2 * ncell * (time_steps + 1) # +1 b/c of finitialize + assert h.cnt2_MCna == 2 * ncell * time_steps + 3 + t_vector = None + model_data = {} + cell_names = set() + for n, cell in enumerate(cells): + cell_data = cell.data() + # The time vector should be identical for all cells + cell_times = cell_data.pop("t") + assert t_vector is None or t_vector == cell_times + t_vector = cell_times + # The other data should vary across cells; just check first/last + if n == 0 or n == ncell - 1: + model_data[str(cell)] = cell_data + cell_names.add(str(cell)) + model_data["t"] = t_vector + # Make sure the whole model is deleted. If this test breaks in future with + # mismatches in cnt1 between the first execution (e.g. 1 thread) and later + # executions (e.g. 3 threads) then that might be because this has broken + # and the later executions include relics of the earlier ones. + del cell, cells + ref_data = chk.get("mcna", None) + if ref_data is None: # pragma: no cover + # bootstrapping + chk("mcna", model_data) + return + # Compare this run to the reference data. + assert model_data["t"] == ref_data["t"] + for cell_name in cell_names: + assert model_data[cell_name]["ina"] == pytest.approx( + ref_data[cell_name]["ina"], abs=1e-15, rel=5e-10 + ) + + +if __name__ == "__main__": + # python test_nrntest_thread.py will run all the tests in this file + # e.g. __file__ --> __file__ + "::test_mcna" would just run test_mcna + pytest.main([__file__]) diff --git a/test/pynrn/follower.mod b/test/pytest_coreneuron/follower.mod similarity index 100% rename from test/pynrn/follower.mod rename to test/pytest_coreneuron/follower.mod diff --git a/test/pynrn/gap.mod b/test/pytest_coreneuron/gap.mod similarity index 100% rename from test/pynrn/gap.mod rename to test/pytest_coreneuron/gap.mod diff --git a/test/pynrn/gui_pycallobject.py b/test/pytest_coreneuron/gui_pycallobject.py similarity index 100% rename from test/pynrn/gui_pycallobject.py rename to test/pytest_coreneuron/gui_pycallobject.py diff --git a/test/pytest_coreneuron/name_clashes.mod b/test/pytest_coreneuron/name_clashes.mod new file mode 100644 index 0000000000..c8af945068 --- /dev/null +++ b/test/pytest_coreneuron/name_clashes.mod @@ -0,0 +1,38 @@ +: This mechanism is intended to check for issues in .mod -> .cpp translation +NEURON { + SUFFIX NameClashes + RANGE cache, container, data_handle, detail, ends_with_underscore_, field_index, generic_data_handle, legacy, mechanism, model_sorted_token, neuron, nrn, std + THREADSAFE +} + +ASSIGNED { + cache + container + data_handle + detail + ends_with_underscore_ + field_index + generic_data_handle + legacy + mechanism + model_sorted_token + neuron + nrn + std +} + +BREAKPOINT { + cache = 1 + container = 1 + data_handle = 1 + detail = 1 + ends_with_underscore_ = 1 + field_index = 1 + generic_data_handle = 1 + legacy = 1 + mechanism = 1 + model_sorted_token = 1 + neuron = 1 + nrn = 1 + std = 1 +} diff --git a/test/pynrn/run_pytest.py b/test/pytest_coreneuron/run_pytest.py similarity index 100% rename from test/pynrn/run_pytest.py rename to test/pytest_coreneuron/run_pytest.py diff --git a/test/pynrn/test_a_neuronoptions.py b/test/pytest_coreneuron/test_a_neuronoptions.py similarity index 100% rename from test/pynrn/test_a_neuronoptions.py rename to test/pytest_coreneuron/test_a_neuronoptions.py diff --git a/test/pynrn/test_basic.py b/test/pytest_coreneuron/test_basic.py similarity index 95% rename from test/pynrn/test_basic.py rename to test/pytest_coreneuron/test_basic.py index 3349f89d49..5ec4fe01e1 100644 --- a/test/pynrn/test_basic.py +++ b/test/pytest_coreneuron/test_basic.py @@ -263,7 +263,13 @@ def test_deleted_sec(): # for valid section first. words = str(m).split() print("m is " + words[4] + "." + words[2]) - expect_err("m()") + if "name" != words[2]: + expect_err("m()") + + # Mere printing of an invalid object is not supposed to be an error. + print(s) + print(seg) + print(mech) assert str(s) == "" assert str(seg) == "" @@ -384,8 +390,9 @@ def test_nosection(): def test_nrn_mallinfo(): - # figure out if ASan was enabled, see comment in unit_test.cpp - if "address" in config.arguments["NRN_SANITIZERS"]: + # figure out if ASan or TSan was enabled, see comment in unit_test.cpp + sanitizers = config.arguments["NRN_SANITIZERS"] + if "address" in sanitizers or "thread" in sanitizers: print("Skipping nrn_mallinfo checks because ASan was enabled") return assert h.nrn_mallinfo(0) > 0 @@ -395,7 +402,7 @@ def test_errorcode(): import os, sys, subprocess process = subprocess.run('nrniv -c "1/0"', shell=True) - assert process.returncode > 0 + assert process.returncode != 0 exe = os.environ.get("NRN_PYTHON_EXECUTABLE", sys.executable) env = os.environ.copy() @@ -408,7 +415,7 @@ def test_errorcode(): process = subprocess.run( [exe, "-c", "from neuron import h; h.sqrt(-1)"], env=env, shell=False ) - assert process.returncode > 0 + assert process.returncode != 0 def test_hocObj_error_in_construction(): @@ -448,8 +455,8 @@ def test_help(): assert h.Vector().to_python.__doc__.startswith( "Syntax:\n ``pythonlist = vec.to_python()" ) - assert h.Vector().__doc__.startswith("This class was imple") - assert h.Vector.__doc__.startswith("This class was imple") + assert h.Vector().__doc__.startswith("class neuron.hoc.HocObject") + assert h.Vector.__doc__.startswith("class neuron.hoc.HocObject") assert h.finitialize.__doc__.startswith("Syntax:\n ``h.finiti") assert h.__doc__.startswith("\n\nneuron.h\n====") diff --git a/test/pynrn/test_bbss.py b/test/pytest_coreneuron/test_bbss.py similarity index 100% rename from test/pynrn/test_bbss.py rename to test/pytest_coreneuron/test_bbss.py diff --git a/test/pynrn/test_fast_imem.py b/test/pytest_coreneuron/test_fast_imem.py similarity index 67% rename from test/pynrn/test_fast_imem.py rename to test/pytest_coreneuron/test_fast_imem.py index 4413635b86..6570cc9ea0 100644 --- a/test/pynrn/test_fast_imem.py +++ b/test/pytest_coreneuron/test_fast_imem.py @@ -5,8 +5,12 @@ import os from neuron import config, gui, h - -h.load_file("stdrun.hoc") # for h.cvode_active +from neuron.tests.utils import ( + cvode_enabled, + fast_imem, + parallel_context, + num_threads, +) class Cell: @@ -195,17 +199,14 @@ def run(tstop, ics, tolerance): def test_fastimem(): cells = [Cell(id, 10) for id in range(2)] # h.topology() - cvode = h.CVode() ics = h.List("IClamp") syns = h.List("ExpSyn") - cvode.use_fast_imem(1) - h.finitialize(-65) - run(1.0, ics, 1e-13) - total_syn_g(syns) - h.cvode_active(1) - run(1.0, ics, 1e-12) - cvode.use_fast_imem(0) - h.cvode_active(0) + with fast_imem(True): + h.finitialize(-65) + run(1.0, ics, 1e-13) + total_syn_g(syns) + with cvode_enabled(True): + run(1.0, ics, 1e-12) def coreneuron_available(): @@ -213,7 +214,6 @@ def coreneuron_available(): return False # But can it be loaded? cvode = h.CVode() - cvode.cache_efficient(1) pc = h.ParallelContext() h.finitialize() result = 0 @@ -228,59 +228,11 @@ def coreneuron_available(): except Exception as e: pass sys.stderr = original_stderr - cvode.cache_efficient(0) return result -def print_fast_imem(): - ix = h.Vector() - imem = h.Vector() - for sec in h.allsec(): - for seg in sec.allseg(): - if seg.x == 0.0 and sec.parentseg() is not None: - continue # don't count twice - ix.append(seg.node_index()) - imem.append(seg.i_membrane_) - si = ix.sortindex() - ix.index(ix.c(), si) - imem.index(imem.c(), si) - f = open("fastimem.nrn", "w") - f.write("%d\n" % int(ix.size())) - for i, x in enumerate(imem): - assert i == int(ix[i]) - f.write("%d %.20g\n" % (i, x)) - f.close() - - def test_fastimem_corenrn(): - pc = h.ParallelContext() ncell = 5 - cvode = h.CVode() - cvode.cache_efficient(0) - # If the gui has been imported (possibly by another test) then there is a - # thread asynchronously calling process_events -- make sure that doesn't - # happen partway through creating cells - with gui.disabled(): - cells = [Cell(id, 10) for id in range(ncell)] - cvode.use_fast_imem(1) - - # When nthread changes, or internal model data needs to be reallocated, - # pointers need to be updated. Use of i_membrane_ requires that the user - # update the pointers to i_membrane_. - imem = [] - - def imem_update(): - nonlocal imem - with gui.disabled(): - imem = [ - h.Vector().record(cell.ics[0], cell.secs[3](0.5)._ref_i_membrane_) - for cell in cells - ] - - imem_updater = h.PtrVector(1) - imem_updater.ptr_update_callback(imem_update) - imem_update() - tstop = 1.0 def init_v(): @@ -293,21 +245,7 @@ def init_v(): seg.v = -65.0 + r.uniform(0, 5) h.finitialize() - def run(tstop): - pc.set_maxstep(10) - with gui.disabled(): - init_v() - pc.psolve(tstop) - - # standard - run(tstop) - imem_std = [vec.c() for vec in imem] - max_abs_imem = [max(abs(x) for x in vec) for vec in imem_std] - if not all(x > 0 for x in max_abs_imem): - print(max_abs_imem, flush=True) - assert False - - def compare(name, rel_tol=0.0): + def compare(name, imem, imem_std, rel_tol=0.0): print("Comparing {}".format(name), flush=True) keep_going = True for i, (ref_vec, new_vec) in enumerate(zip(imem_std, imem)): @@ -334,69 +272,72 @@ def compare(name, rel_tol=0.0): new_vec.resize(0) assert keep_going - # null comparison with the side effect of clearing imem - compare("cache inefficient NEURON") - - cvode.cache_efficient(1) - for nth in [2, 1]: # leaves us in 1-threaded mode - pc.nthread(nth) - run(tstop) - compare("cache efficient NEURON with {} threads".format(nth)) - - # This leaves nthread=1, other values cause errors in the CoreNEURON tests below - if coreneuron_available(): - cvode.cache_efficient(1) # coreneuron_available() resets this - from neuron import coreneuron - - coreneuron.enable = True - coreneuron.verbose = 0 - coreneuron.gpu = strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false")) - tolerance = 5e-11 - run(tstop) - compare("CoreNEURON online mode", rel_tol=tolerance) - coreneuron.enable = False - - tvec = h.Vector().record(h._ref_t) - init_v() - while h.t < tstop - h.dt / 2: - dt_above = 1.1 * h.dt # comfortably above dt to avoid 0 step advance - coreneuron.enable = True - told = h.t - pc.psolve(h.t + dt_above) - assert h.t > told - coreneuron.enable = False - pc.psolve(h.t + dt_above) - compare("Checking i_membrane_ trajectories", rel_tol=tolerance) - - print( - "For file mode (offline) coreneuron comparison of i_membrane_ initialization", - flush=True, - ) - - init_v() - print_fast_imem() - - # The cells must have gids. - for i, cell in enumerate(cells): - pc.set_gid2node(i, pc.id()) - sec = cell.secs[0] - pc.cell(i, h.NetCon(sec(0.5)._ref_v, None, sec=sec)) - - # Write the data files - init_v() - pc.nrncore_write("./corenrn_data") - - # args needed for offline run of coreneuron - coreneuron.enable = True - coreneuron.file_mode = True - - arg = coreneuron.nrncore_arg(tstop) - coreneuron.enable = False - pc.gid_clear() - print(arg) + # If the gui has been imported (possibly by another test) then there is a + # thread asynchronously calling process_events -- make sure that doesn't + # happen partway through creating cells + with gui.disabled(), parallel_context() as pc, fast_imem(True): + cells = [Cell(id, 10) for id in range(ncell)] + # Set up recording of i_membrane_; now that this is data_handle-based there is no need to + # use a pointer-updating callback. + imem = [ + h.Vector().record(cell.ics[0], cell.secs[3](0.5)._ref_i_membrane_) + for cell in cells + ] + + def run(tstop): + pc.set_maxstep(10) + init_v() + pc.psolve(tstop) - del imem_updater, imem - cvode.use_fast_imem(0) + # standard run with 1 thread + with num_threads(pc, 1): + run(tstop) + # save that as the reference + imem_std = [vec.c() for vec in imem] + + # basic check that the reference is not obviously wrong + max_abs_imem = [max(abs(x) for x in vec) for vec in imem_std] + assert all(x > 0 for x in max_abs_imem) + + def cmp(name, **kwargs): + compare(name, imem, imem_std, **kwargs) + + # null comparison with the side effect of clearing imem + cmp("cache efficient NEURON with 1 thread") + + # compare with 2 threads + with num_threads(pc, 2): + run(tstop) + cmp("cache efficient NEURON with 2 threads") + + if coreneuron_available(): + from neuron import coreneuron + + enable_gpu = strtobool(os.environ.get("CORENRN_ENABLE_GPU", "false")) + with coreneuron(verbose=0, gpu=enable_gpu): + tolerance = 5e-11 + with coreneuron(enable=True): + run(tstop) + cmp("CoreNEURON online mode", rel_tol=tolerance) + + init_v() + while h.t < tstop - h.dt / 2: + dt_above = ( + 1.1 * h.dt + ) # comfortably above dt to avoid 0 step advance + with coreneuron(enable=True): + told = h.t + pc.psolve(h.t + dt_above) + assert h.t > told + pc.psolve(h.t + dt_above) + cmp("Checking i_membrane_ trajectories", rel_tol=tolerance) + # olupton 2023-06-19: removed some logic to dump a file of fast imem values from + # NEURON that could in principle be compared offline to a similar file produced by + # a patched version of CoreNEURON. + # See https://github.com/BlueBrain/CoreNeuron/pull/630. It seems that this test was + # never automated, and it is not straightforward to do so. + + del imem if __name__ == "__main__": diff --git a/test/pynrn/test_hoc_po.py b/test/pytest_coreneuron/test_hoc_po.py similarity index 93% rename from test/pynrn/test_hoc_po.py rename to test/pytest_coreneuron/test_hoc_po.py index abb1f6b8b9..c8ae8a3085 100644 --- a/test/pynrn/test_hoc_po.py +++ b/test/pytest_coreneuron/test_hoc_po.py @@ -1,10 +1,12 @@ # Test proper management of HOC PythonObject (no memory leaks) # Expanded for more testing and coverage of BBSaveState. - +from glob import iglob from neuron import h from neuron.units import ms, mV import numpy as np +import os +import shutil import subprocess @@ -165,45 +167,36 @@ def test_2(): # BBSaveState for mixed (hoc and python cells) Ring. # some helpers copied from ../parallel_tests/test_bas.py -def subprocess_run(cmd): - subprocess.run(cmd, shell=True).check_returncode() - - def rmfiles(): if pc.id() == 0: - subprocess_run("rm -r -f bbss_out") - subprocess_run("rm -r -f in") - subprocess_run("rm -r -f binbufout") - subprocess_run("rm -r -f binbufin") - subprocess_run("mkdir binbufout") - subprocess_run("rm -f allcell-bbss.dat") + shutil.rmtree("bbss_out", ignore_errors=True) + shutil.rmtree("in", ignore_errors=True) + shutil.rmtree("binbufout", ignore_errors=True) + shutil.rmtree("binbufin", ignore_errors=True) + os.mkdir("binbufout") + try: + os.unlink("allcell-bbss.dat") + except FileNotFoundError: + pass pc.barrier() def cp_out_to_in(): - out2in_sh = r""" -#!/usr/bin/env bash -out=bbss_out -rm -r -f in -mkdir in -cat $out/tmp > in/tmp -for f in $out/tmp.*.* ; do - i=`echo "$f" | sed 's/.*tmp\.\([0-9]*\)\..*/\1/'` - if test ! -f in/tmp.$i ; then - cnt=`ls $out/tmp.$i.* | wc -l` - echo $cnt > in/tmp.$i - cat $out/tmp.$i.* >> in/tmp.$i - fi -done -""" if pc.id() == 0: - import tempfile - - with tempfile.NamedTemporaryFile("w") as scriptfile: - scriptfile.write(out2in_sh) - scriptfile.flush() - subprocess.check_call(["/bin/bash", scriptfile.name]) - + shutil.rmtree("in", ignore_errors=True) + os.mkdir("in") + shutil.copyfile("bbss_out/tmp", "in/tmp") + for f in iglob("bbss_out/tmp.*.*"): + # Get A from bbss_out/tmp.A.B + i = os.path.basename(f).split(".", 2)[1] + if not os.path.isfile("in/tmp.{}".format(i)): + files = list(iglob("bbss_out/tmp.{}.*".format(i))) + cnt = len(files) + with open("in/tmp.{}".format(i), "w") as ofile: + ofile.write("{}\n".format(cnt)) + for fname in files: + with open(fname, "r") as ifile: + shutil.copyfileobj(ifile, ofile) pc.barrier() @@ -228,8 +221,7 @@ def prun(tstop, mode=None): bbss = h.BBSaveState() bbss.restore_test() elif mode == "restore_test_bin": - subprocess_run("mkdir binbufin") - subprocess_run("cp binbufout/* binbufin") + shutil.copytree("binbufout", "binbufin") bbss = h.BBSaveState() bbss.restore_test_bin() elif mode == "restore": diff --git a/test/pytest_coreneuron/test_inheritance.py b/test/pytest_coreneuron/test_inheritance.py new file mode 100644 index 0000000000..1fb524e32a --- /dev/null +++ b/test/pytest_coreneuron/test_inheritance.py @@ -0,0 +1,21 @@ +import neuron + + +def test_builtin_templates(): + assert isinstance(neuron.hoc.Vector, type), "Type instance expected for hoc.Vector" + assert isinstance(neuron.hoc.CVode, type), "Type instance expected for hoc.CVode" + assert isinstance(neuron.hoc.List, type), "Type instance expected for hoc.List" + assert isinstance(neuron.hoc.Deck, type), "Type instance expected for hoc.Deck" + + assert neuron.h.Vector is neuron.hoc.Vector, "Redirect to hoc.Vector failed" + assert neuron.h.Deck is neuron.hoc.Deck, "Redirect to hoc.Deck failed" + assert neuron.h.List is neuron.hoc.List, "Redirect to hoc.List failed" + + +def test_inheritance_builtin(): + v = neuron.h.Vector() + assert isinstance(v, neuron.hoc.HocObject), "hoc.HocObject should be parent." + assert isinstance(v, neuron.hoc.Vector), "Should be instance of its class" + assert not isinstance(v, neuron.hoc.Deck), "Should not be instance of another class" + assert type(v) is neuron.hoc.Vector, "Type should be class" + assert type(v) is not neuron.hoc.Deck, "Type should not be another class" diff --git a/test/pytest_coreneuron/test_loadbal.py b/test/pytest_coreneuron/test_loadbal.py new file mode 100644 index 0000000000..4af57e9708 --- /dev/null +++ b/test/pytest_coreneuron/test_loadbal.py @@ -0,0 +1,18 @@ +from neuron import h +import sys + + +def test_is_ion(): + mt = h.MechanismType(0) + + # check for non-ion type + mt.select("hh") + assert mt.is_ion() == False + + # check for ion type + mt.select("k_ion") + assert mt.is_ion() == True + + +if __name__ == "__main__": + test_is_ion() diff --git a/test/pytest_coreneuron/test_memory_usage.py b/test/pytest_coreneuron/test_memory_usage.py new file mode 100644 index 0000000000..893a2d4e93 --- /dev/null +++ b/test/pytest_coreneuron/test_memory_usage.py @@ -0,0 +1,9 @@ +from neuron import h + + +def test_memory_usage_hoc(): + assert h("print_local_memory_usage()") + + +def test_memory_usage(): + h.print_local_memory_usage() diff --git a/test/pynrn/test_multigid.py b/test/pytest_coreneuron/test_multigid.py similarity index 92% rename from test/pynrn/test_multigid.py rename to test/pytest_coreneuron/test_multigid.py index 6638192cf8..18e66efd53 100644 --- a/test/pynrn/test_multigid.py +++ b/test/pytest_coreneuron/test_multigid.py @@ -15,7 +15,6 @@ def coreneuron_available(): return False # But can it be loaded? cvode = h.CVode() - cvode.cache_efficient(1) pc = h.ParallelContext() h.finitialize() result = 0 @@ -30,7 +29,6 @@ def coreneuron_available(): except Exception as e: pass sys.stderr = original_stderr - cvode.cache_efficient(0) return result @@ -101,11 +99,9 @@ def test_multigid(): if cn_avail: coreneuron.enable = True coreneuron.verbose = 0 - h.CVode().cache_efficient(1) run(10.0) raster_eq(std, net.raster) coreneuron.enable = False - h.CVode().cache_efficient(0) print("test_multigid coreneuron success") s = None @@ -121,10 +117,8 @@ def test_multigid(): pc.set_gid2node(10003, pc.id()) pc.cell(10003, h.NetCon(s(0.5).hh._ref_m, None, sec=s), 0) coreneuron.enable = True - h.CVode().cache_efficient(1) expect_err("run(10)") coreneuron.enable = False - h.CVode().cache_efficient(0) pc.gid_clear() del s, net, std @@ -138,10 +132,8 @@ def test_nogid(): nc = h.NetCon(s(0.5)._ref_v, syn, sec=s) if cn_avail: coreneuron.enable = True - h.CVode().cache_efficient(1) expect_err("run(10)") coreneuron.enable = False - h.CVode().cache_efficient(0) pc.gid_clear() del nc, syn, s, net locals() diff --git a/test/pynrn/test_netpar.py b/test/pytest_coreneuron/test_netpar.py similarity index 100% rename from test/pynrn/test_netpar.py rename to test/pytest_coreneuron/test_netpar.py diff --git a/test/pynrn/test_nlayer.py b/test/pytest_coreneuron/test_nlayer.py similarity index 100% rename from test/pynrn/test_nlayer.py rename to test/pytest_coreneuron/test_nlayer.py diff --git a/test/pynrn/test_nrnste.py b/test/pytest_coreneuron/test_nrnste.py similarity index 100% rename from test/pynrn/test_nrnste.py rename to test/pytest_coreneuron/test_nrnste.py diff --git a/test/pynrn/test_nrntest_fast.json b/test/pytest_coreneuron/test_nrntest_fast.json similarity index 100% rename from test/pynrn/test_nrntest_fast.json rename to test/pytest_coreneuron/test_nrntest_fast.json diff --git a/test/pynrn/test_nrntest_fast.py b/test/pytest_coreneuron/test_nrntest_fast.py similarity index 100% rename from test/pynrn/test_nrntest_fast.py rename to test/pytest_coreneuron/test_nrntest_fast.py diff --git a/test/pynrn/test_partrans.py b/test/pytest_coreneuron/test_partrans.py similarity index 97% rename from test/pynrn/test_partrans.py rename to test/pytest_coreneuron/test_partrans.py index 8ee420565d..b4aa11e136 100644 --- a/test/pynrn/test_partrans.py +++ b/test/pytest_coreneuron/test_partrans.py @@ -341,10 +341,8 @@ def test_partrans(): transfer1() # following is a bit tricky and need some user help in the docs. - # cannot be cache_efficient if general sparse matrix solver in effect. cvode = h.CVode() assert cvode.use_mxb(0) == 0 - assert cvode.cache_efficient(1) == 1 pc.setup_transfer() h.finitialize(-65) @@ -364,7 +362,9 @@ def test_partrans(): teardown() del s - # There are single thread circumstances where target POINT_PROCESS is needed + # There used to be single thread circumstances where target POINT_PROCESS is needed + # With the new data_handle scheme, the pointer update that used to need the target + # POINT_PROCESS is no longer made. s = h.Section("dend") pc.set_gid2node(rank, rank) pc.cell(rank, h.NetCon(s(0.5)._ref_v, None, sec=s)) @@ -372,7 +372,7 @@ def test_partrans(): ic = h.IClamp(s(0.5)) pc.target_var(ic._ref_amp, rank) pc.setup_transfer() - expect_error(h.finitialize, (-65,)) + h.finitialize(-65) teardown() del ic, s diff --git a/test/pynrn/test_py2nrnstring.py b/test/pytest_coreneuron/test_py2nrnstring.py similarity index 100% rename from test/pynrn/test_py2nrnstring.py rename to test/pytest_coreneuron/test_py2nrnstring.py diff --git a/test/pynrn/test_pycallobject.py b/test/pytest_coreneuron/test_pycallobject.py similarity index 100% rename from test/pynrn/test_pycallobject.py rename to test/pytest_coreneuron/test_pycallobject.py diff --git a/test/pynrn/test_pyobj.py b/test/pytest_coreneuron/test_pyobj.py similarity index 62% rename from test/pynrn/test_pyobj.py rename to test/pytest_coreneuron/test_pyobj.py index cd4faff413..ad4f3332fa 100644 --- a/test/pynrn/test_pyobj.py +++ b/test/pytest_coreneuron/test_pyobj.py @@ -2,13 +2,20 @@ import pytest +def test_builtin(): + with pytest.raises(TypeError): + + class MyList(neuron.HocBaseObject, hoc_type=neuron.h.List): + pass + + def test_hocbase(): - class MyList(neuron.HocBaseObject, hoc_type=neuron.h.Vector): + class MyStim(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): pass - assert issubclass(MyList, neuron.hoc.HocObject) - assert issubclass(MyList, neuron.HocBaseObject) - assert MyList._hoc_type == neuron.h.Vector + assert issubclass(MyStim, neuron.hoc.HocObject) + assert issubclass(MyStim, neuron.HocBaseObject) + assert MyStim._hoc_type == neuron.h.NetStim def test_hoc_template_hclass(): @@ -64,25 +71,25 @@ def test_pyobj_constructor(): # Test that __new__ is required when __init__ is overridden with pytest.raises(TypeError): - class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List): - def __init__(self, first): + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): + def __init__(self, freq): super().__init__() self.append(first) - class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List): - def __new__(cls, first): + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): + def __new__(cls, freq): return super().__new__(cls) - def __init__(self, first): + def __init__(self, freq): super().__init__() - self.append(first) + self.interval = 1000 / freq - p = PyObj(neuron.h.List()) - assert p.count() == 1 + p = PyObj(4) + assert p.interval == 250 def test_pyobj_def(): - class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List): + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): def my_method(self, a): return a * 2 @@ -91,20 +98,29 @@ def my_method(self, a): def test_pyobj_overloading(): - class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List): - def append(self, i): - self.last_appended = i - return self.baseattr("append")(i) + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.PatternStim): + def play(self, i): + self.played = True + v = neuron.h.Vector([i]) + return self.baseattr("play")(v, v) + + p = PyObj() + p.play(2) + assert hasattr(p, "played") + + +@pytest.mark.xfail(reason="inf. recursion because baseattr finds Python attrs") +def test_bad_overload(): + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.PatternStim): + def not_on_base(self): + return p.baseattr("not_on_base")() p = PyObj() - p2 = PyObj() - assert p.append(p) == 1 - assert p.count() == 1 - assert p[0] == p + p.not_on_base() def test_pyobj_inheritance(): - class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.List): + class PyObj(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): pass class MyObj(PyObj): @@ -116,7 +132,7 @@ class MyObj2(PyObj): def __init__(self, arg): pass - class List(neuron.HocBaseObject, hoc_type=neuron.h.List): + class List(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): def __new__(cls, *args, **kwargs): super().__new__(cls) @@ -126,17 +142,17 @@ def __init__(self, *args): for arg in args: self.append(arg) - l = InitList(neuron.h.List(), neuron.h.List()) + l = InitList(neuron.h.NetStim(), neuron.h.NetStim()) def test_pyobj_composition(): - class A(neuron.HocBaseObject, hoc_type=neuron.h.List): + class A(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): pass - class B(neuron.HocBaseObject, hoc_type=neuron.h.List): + class B(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): pass - class C(neuron.HocBaseObject, hoc_type=neuron.h.Vector): + class C(neuron.HocBaseObject, hoc_type=neuron.h.ExpSyn): pass with pytest.raises(TypeError): @@ -147,7 +163,7 @@ class D(A, C): class E(A, B): pass - assert E._hoc_type == neuron.h.List + assert E._hoc_type == neuron.h.NetStim class PickleTest(neuron.HocBaseObject, hoc_type=neuron.h.NetStim): diff --git a/test/pynrn/test_swc.py b/test/pytest_coreneuron/test_swc.py similarity index 100% rename from test/pynrn/test_swc.py rename to test/pytest_coreneuron/test_swc.py diff --git a/test/pynrn/test_template_err.py b/test/pytest_coreneuron/test_template_err.py similarity index 100% rename from test/pynrn/test_template_err.py rename to test/pytest_coreneuron/test_template_err.py diff --git a/test/pynrn/test_vector_api.py b/test/pytest_coreneuron/test_vector_api.py similarity index 100% rename from test/pynrn/test_vector_api.py rename to test/pytest_coreneuron/test_vector_api.py diff --git a/test/pynrn/test_version_macros.py b/test/pytest_coreneuron/test_version_macros.py similarity index 97% rename from test/pynrn/test_version_macros.py rename to test/pytest_coreneuron/test_version_macros.py index f2374a91dd..545760f357 100644 --- a/test/pynrn/test_version_macros.py +++ b/test/pytest_coreneuron/test_version_macros.py @@ -20,7 +20,6 @@ def test_version_macros(): strtobool(os.environ.get("NRN_CORENEURON_ENABLE", "false")) ) coreneuron.verbose = True - h.CVode().cache_efficient(True) h.finitialize() pc.set_maxstep(10) pc.psolve(0.1) diff --git a/test/pynrn/test_zptrlist.py b/test/pytest_coreneuron/test_zptrlist.py similarity index 98% rename from test/pynrn/test_zptrlist.py rename to test/pytest_coreneuron/test_zptrlist.py index d00200b91f..3f5b11b4cf 100644 --- a/test/pynrn/test_zptrlist.py +++ b/test/pytest_coreneuron/test_zptrlist.py @@ -4,7 +4,6 @@ def test_random_play(): # for coverage of ptrlist changes #1815 cv = h.CVode() cv.active(0) - cv.cache_efficient(0) h.secondorder = 0 s = h.Section() s.L = 10 diff --git a/test/pynrn/unitstest.mod b/test/pytest_coreneuron/unitstest.mod similarity index 82% rename from test/pynrn/unitstest.mod rename to test/pytest_coreneuron/unitstest.mod index eb367faf04..2a6e09ad67 100644 --- a/test/pynrn/unitstest.mod +++ b/test/pytest_coreneuron/unitstest.mod @@ -1,6 +1,6 @@ NEURON { POINT_PROCESS UnitsTest - RANGE mole, e, faraday, planck, hbar, gasconst, avogadro, k + RANGE mole, e, faraday, planck, hbar, gasconst, gasconst_exact, avogadro, k RANGE erev, ghk USEION na READ ena WRITE ina } @@ -15,6 +15,7 @@ UNITS { h = (planck) (joule-sec) hb = (hbar) (joule-sec) R = (k-mole) (joule/degC) + Rexact = 8.313424 (joule/degC) boltzmann = (k) (joule/degC) (avogadro) = (mole) @@ -28,6 +29,7 @@ ASSIGNED { planck (joule-sec) hbar (joule-sec) gasconst (joule/degC) + gasconst_exact (joule/degC) avogadro (1) k (joule/degC) erev (mV) @@ -43,6 +45,7 @@ INITIAL { planck = h hbar = hb gasconst = R + gasconst_exact = Rexact avogadro = avo k = boltzmann erev = ena diff --git a/test/pynrn/version_macros.mod b/test/pytest_coreneuron/version_macros.mod similarity index 100% rename from test/pynrn/version_macros.mod rename to test/pytest_coreneuron/version_macros.mod diff --git a/test/ringtest/ring.hoc b/test/ringtest/ring.hoc index 8299b18742..1925a86100 100644 --- a/test/ringtest/ring.hoc +++ b/test/ringtest/ring.hoc @@ -107,7 +107,6 @@ spikerecord() // Simulation control ///////////////////// nthread = 2 -{cvode.cache_efficient(1)} // required for nrncore {pc.nthread(nthread, 0)} // lowest_level_load stores nthread groups of cells tstop = 100 diff --git a/test/rxd/3d/test_multigridding_allowed.py b/test/rxd/3d/test_multigridding_allowed.py index e6f868e032..45a0e87972 100644 --- a/test/rxd/3d/test_multigridding_allowed.py +++ b/test/rxd/3d/test_multigridding_allowed.py @@ -71,21 +71,21 @@ def should_work2(h, rxd): h.fadvance() -def test_no_overlap(neuron_instance): - h, rxd, data, save_path = neuron_instance +def test_no_overlap(neuron_nosave_instance): + h, rxd, save_path = neuron_nosave_instance should_work(h, rxd) -def test_neighbors_with_different_dx_fails(neuron_instance): - h, rxd, data, save_path = neuron_instance +def test_neighbors_with_different_dx_fails(neuron_nosave_instance): + h, rxd, save_path = neuron_nosave_instance expect_hocerr(should_not_work, (h, rxd)) -def test_overlapping_dx_fails(neuron_instance): - h, rxd, data, save_path = neuron_instance +def test_overlapping_dx_fails(neuron_nosave_instance): + h, rxd, save_path = neuron_nosave_instance expect_hocerr(should_not_work2, (h, rxd)) -def test_overlap_same_dx(neuron_instance): - h, rxd, data, save_path = neuron_instance +def test_overlap_same_dx(neuron_nosave_instance): + h, rxd, save_path = neuron_nosave_instance should_work2(h, rxd) diff --git a/test/rxd/3d/test_soma_outlines.py b/test/rxd/3d/test_soma_outlines.py index a35082022f..89607849d0 100644 --- a/test/rxd/3d/test_soma_outlines.py +++ b/test/rxd/3d/test_soma_outlines.py @@ -32,7 +32,7 @@ def __init__(self, shift=(0, 0, 0)): for i in range(sec.n3d()) ] sec.pt3dclear() - for (x, y, z, diam) in pts: + for x, y, z, diam in pts: sec.pt3dadd(x + sx, y + sy, z + sz, diam) yield (h, rxd, data, save_path, Cell) diff --git a/test/rxd/conftest.py b/test/rxd/conftest.py index 77d315f98d..1fbe8ee0cc 100644 --- a/test/rxd/conftest.py +++ b/test/rxd/conftest.py @@ -38,8 +38,6 @@ def neuron_nosave_instance(neuron_import): h.load_file("stdrun.hoc") h.load_file("import3d.hoc") - h.nrnunit_use_legacy(True) - # pytest fixtures at the function scope that require neuron_instance will go # out of scope after neuron_instance. So species, sections, etc. will go # out of scope after neuron_instance is torn down. @@ -67,11 +65,17 @@ def neuron_nosave_instance(neuron_import): s().__del__() gc.enable() rxd.region._all_regions = [] + rxd.rxd.node._states = numpy.array([]) + rxd.rxd.node._volumes = numpy.array([]) + rxd.rxd.node._surface_area = numpy.array([]) + rxd.rxd.node._diffs = numpy.array([]) + rxd.rxd.node._states = numpy.array([]) rxd.region._region_count = 0 rxd.region._c_region_lookup = None rxd.species._species_counts = 0 rxd.section1d._purge_cptrs() rxd.initializer.has_initialized = False + rxd.initializer.is_initializing = False rxd.rxd.free_conc_ptrs() rxd.rxd.free_curr_ptrs() rxd.rxd.rxd_include_node_flux1D(0, None, None, None) diff --git a/test/rxd/ecs/test_ecs_reinit.py b/test/rxd/ecs/test_ecs_reinit.py index 7b85fe48d5..5de8e36dfb 100644 --- a/test/rxd/ecs/test_ecs_reinit.py +++ b/test/rxd/ecs/test_ecs_reinit.py @@ -2,10 +2,10 @@ @pytest.fixture -def simple_model(neuron_instance): +def simple_model(neuron_nosave_instance): """A simple rxd model with species and extracellular regions.""" - h, rxd, data, save_path = neuron_instance + h, rxd, save_path = neuron_nosave_instance dend = h.Section(name="dend") dend.diam = 2 dend.nseg = 5 @@ -24,14 +24,14 @@ def simple_model(neuron_instance): paramB = rxd.Parameter([ecs], initial=0) decay = rxd.Rate(k, -0.1 * k) model = (dend, cyt, ecs, k, paramA, paramB, decay) - yield (neuron_instance, model) + yield (neuron_nosave_instance, model) def test_ecs_reinit(simple_model): """Test rxd.re_init updates extracellular node values from NEURON values""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, ecs, k, paramA, paramB, decay = model h.finitialize(-65) dend(0.2).ko = 0 @@ -44,8 +44,8 @@ def test_ecs_reinit_cvode(simple_model): """Test rxd.re_init updates extracellular node values from NEURON segments with CVode""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, ecs, k, paramA, paramB, decay = model h.CVode().active(True) h.finitialize(-65) diff --git a/test/rxd/test_currents.py b/test/rxd/test_currents.py index 4415fae8f0..32c052fbbe 100644 --- a/test/rxd/test_currents.py +++ b/test/rxd/test_currents.py @@ -1,5 +1,10 @@ import pytest from .testutils import compare_data, tol +from platform import platform + + +def applearm(): + return "macOS-" in platform() and "-arm64-" in platform() @pytest.fixture @@ -63,10 +68,7 @@ def test_currents(model_pump): neuron_instance, model = model_pump h, rxd, data, save_path = neuron_instance - # check changing the units after initialization - h.nrnunit_use_legacy(False) h.finitialize(-65) - h.nrnunit_use_legacy(True) h.continuerun(10) if not save_path: max_err = compare_data(data) @@ -78,11 +80,35 @@ def test_currents_cvode(model_pump): neuron_instance, model = model_pump h, rxd, data, save_path = neuron_instance - # check changing the units after initialization h.CVode().active(True) - h.nrnunit_use_legacy(False) h.finitialize(-65) - h.nrnunit_use_legacy(True) + h.continuerun(10) + if not save_path: + max_err = compare_data(data) + assert max_err < (1e-8 if applearm() else tol) + + +def test_currents_stucture_change(model_pump): + """Test currents generated by a Na/K-pump with change to structure""" + + neuron_instance, model = model_pump + h, rxd, data, save_path = neuron_instance + # check changing structure_change_cnt after initialization + h.finitialize(-65) + h.MechanismStandard("pas", 0) + h.continuerun(10) + if not save_path: + max_err = compare_data(data) + assert max_err < tol + + +def test_currents_model_change(model_pump): + """Test currents generated by a Na/K-pump with post-initialization changes to the model""" + neuron_instance, model = model_pump + h, rxd, data, save_path = neuron_instance + # check changing the model after initialization + h.finitialize(-65) + model[0].nseg = 11 h.continuerun(10) if not save_path: max_err = compare_data(data) diff --git a/test/rxd/test_nodelist.py b/test/rxd/test_nodelist.py new file mode 100644 index 0000000000..8011f76ff6 --- /dev/null +++ b/test/rxd/test_nodelist.py @@ -0,0 +1,90 @@ +def test_only_nodes(neuron_instance): + """Test to make sure node lists only contain nodes""" + + h, rxd, data, save_path = neuron_instance + + dend = h.Section("dend") + r = rxd.Region(h.allsec()) + hydrogen = rxd.Species(r, initial=1) + water = rxd.Species(r, initial=1) + + h.finitialize(-65) + + nodelist = hydrogen.nodes + + # test that should not work, so an append that succeeds is an error + + try: + nodelist.append(water.nodes) # append nodelist + raise Exception("should not get here") + except TypeError: + ... + + try: + nodelist.extend([1, 2, 3, water.nodes[0]]) # extend with non-nodes + raise Exception("should not get here") + except TypeError: + ... + + try: + nodelist[0] = 17 + raise Exception("should not get here") + except TypeError: + ... + + try: + nl = rxd.nodelist.NodeList( + [1, 2, 3, water.nodes[0]] + ) # create NodeList with non-nodes + raise Exception("should not get here") + except TypeError: + ... + + try: + nodelist.insert(1, "llama") # insert non-node into nodelist + raise Exception("should not get here") + except TypeError: + ... + + # test that should work, so getting in the except is an error + try: + nodelist.append(water.nodes[0]) # append node + except TypeError: + raise Exception("should not get here") + + try: + original_length = len(nodelist) # extend nodes + nodelist.extend(item for item in water.nodes) + assert len(nodelist) == original_length + len(water.nodes) + except TypeError: + raise Exception("should not get here") + + try: + nodelist[0] = water.nodes[0] + except TypeError: + raise Exception("should not get here") + + try: + nl = rxd.nodelist.NodeList( + [water.nodes[0], water.nodes[0]] + ) # create nodelist with nodes + except TypeError: + raise Exception("should not get here") + + try: + nodelist.insert(1, water.nodes[0]) # insert node into nodelist + except TypeError: + raise Exception("should not get here") + + try: + nl = rxd.nodelist.NodeList([]) # create empty nodelist + except TypeError: + raise Exception("should not get here") + + try: + nl = rxd.nodelist.NodeList( + item for item in [water.nodes[0], water.nodes[0]] + ) # create nodelist with nodes generator + assert len(nl) == 2 + except TypeError: + raise Exception("should not get here") diff --git a/test/rxd/test_nodelist_include_flux.py b/test/rxd/test_nodelist_include_flux.py new file mode 100644 index 0000000000..292c576e0e --- /dev/null +++ b/test/rxd/test_nodelist_include_flux.py @@ -0,0 +1,24 @@ +def test_nodelist_include_flux(neuron_nosave_instance): + h, rxd, _ = neuron_nosave_instance + dend1 = h.Section("dend1") + diff = 1e-15 + cyt = rxd.Region(dend1.wholetree(), nrn_region="i") + ca1 = rxd.Species(cyt, name="ca1", charge=2, initial=0) + ca2 = rxd.Species(cyt, name="ca2", charge=2, initial=0) + + ca1.nodes(dend1(0.5)).include_flux(1e-16, units="mmol/ms") + ca2.nodes(dend1(0.5))[0].include_flux(1e-16, units="mmol/ms") + node1 = ca1.nodes(dend1(0.5))[0] + node2 = ca2.nodes(dend1(0.5))[0] + + h.finitialize(-65) + h.fadvance() + assert abs(node1.concentration - 1.2732395447351626e-10) < diff + assert abs(node2.concentration - 1.2732395447351626e-10) < diff + h.fadvance() + assert abs(node1.concentration - 2.546479089470325e-10) < diff + assert abs(node2.concentration - 2.546479089470325e-10) < diff + h.fadvance() + assert abs(node1.concentration - 3.819718634205488e-10) < diff + assert abs(node2.concentration - 3.819718634205488e-10) < diff + h.fadvance() diff --git a/test/rxd/test_pltvar.py b/test/rxd/test_pltvar.py new file mode 100644 index 0000000000..99e3a80791 --- /dev/null +++ b/test/rxd/test_pltvar.py @@ -0,0 +1,67 @@ +import pytest +import plotly +from neuron import units +from matplotlib import pyplot + + +def test_plt_variable(neuron_instance): + """Test to make sure species with multiple regions is not plotted""" + + h, rxd, _, _ = neuron_instance + + dend1 = h.Section("dend1") + dend2 = h.Section("dend2") + dend2.connect(dend1(1)) + + dend1.nseg = dend1.L = dend2.nseg = dend2.L = 11 + dend1.diam = dend2.diam = 2 * units.µm + + cyt = rxd.Region(dend1.wholetree(), nrn_region="i") + cyt2 = rxd.Region(dend2.wholetree(), nrn_region="i") + + ca = rxd.Species( + [cyt, cyt2], + name="ca", + charge=2, + initial=0 * units.mM, + d=1 * units.µm**2 / units.ms, + ) + + ca.nodes(dend1(0.5))[0].include_flux(1e-13, units="mmol/ms") + + h.finitialize(-65 * units.mV) + h.fadvance() + + ps = h.PlotShape(False) + + # Expecting an error for matplotlib + with pytest.raises(Exception, match="Please specify region for the species."): + ps.variable(ca) + ps.plot(pyplot) + + # Expecting an error for plotly + with pytest.raises(Exception, match="Please specify region for the species."): + ps.variable(ca) + ps.plot(plotly) + + cb = rxd.Species( + [cyt], + name="cb", + charge=2, + initial=0 * units.mM, + d=1 * units.µm**2 / units.ms, + ) + + # Scenarios that should work + ps.variable(ca[cyt]) + ps.plot(plotly) # No error expected here + + ps.variable(ca[cyt]) + ps.plot(pyplot) # No error expected here + + # Test plotting with only one region + ps.variable(cb) + ps.plot(plotly) # No Error expected here + + ps.variable(cb) + ps.plot(pyplot) # No Error expected here diff --git a/test/rxd/test_pure_diffusion.py b/test/rxd/test_pure_diffusion.py index 1e627d67ee..423a21e366 100644 --- a/test/rxd/test_pure_diffusion.py +++ b/test/rxd/test_pure_diffusion.py @@ -1,4 +1,9 @@ from .testutils import compare_data, tol +from platform import platform + + +def applearm(): + return "macOS-" in platform() and "-arm64-" in platform() def test_pure_diffusion(neuron_instance): @@ -52,4 +57,4 @@ def test_pure_diffusion_cvode(neuron_instance): h.continuerun(t) if not save_path: max_err = compare_data(data) - assert max_err < tol + assert max_err < (2e-10 if applearm() else tol) diff --git a/test/rxd/test_rangevarplot.py b/test/rxd/test_rangevarplot.py index b989ea6ea5..f60404c6a1 100644 --- a/test/rxd/test_rangevarplot.py +++ b/test/rxd/test_rangevarplot.py @@ -35,7 +35,7 @@ def test_rangevarplot(model_rangevarplot): dend, cyt, c, g = make_rvp() vec = h.Vector() g.to_vector(vec) - assert vec.to_python() == [0, 1, 1, 0, 0] + assert vec.to_python() == [1, 1, 1, 0, 0] def test_rangevarplot_no_species(model_rangevarplot): diff --git a/test/rxd/test_reinit.py b/test/rxd/test_reinit.py index f8363f27c8..c9a08175cb 100644 --- a/test/rxd/test_reinit.py +++ b/test/rxd/test_reinit.py @@ -2,10 +2,10 @@ @pytest.fixture -def simple_model(neuron_instance): +def simple_model(neuron_nosave_instance): """A simple rxd model with species and regions and reactions.""" - h, rxd, data, save_path = neuron_instance + h, rxd, save_path = neuron_nosave_instance dend = h.Section(name="dend") dend.diam = 2 dend.nseg = 5 @@ -16,14 +16,14 @@ def simple_model(neuron_instance): paramB = rxd.Parameter([cyt], initial=0) decay = rxd.Rate(k, -0.1 * k) model = (dend, cyt, k, paramA, paramB, decay) - yield (neuron_instance, model) + yield (neuron_nosave_instance, model) def test_reinit(simple_model): """Test rxd.re_init updates node values from NEURON values""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, k, paramA, paramB, decay = model h.finitialize(-65) dend(0.5).ki = 0 @@ -34,8 +34,8 @@ def test_reinit(simple_model): def test_reinit_cvode(simple_model): """Test rxd.re_init updates node values from NEURON values with CVode""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, k, paramA, paramB, decay = model h.finitialize(-65) h.CVode().active(True) @@ -47,8 +47,8 @@ def test_reinit_cvode(simple_model): def test_reinit_3d(simple_model): """Test rxd.re_init updates node values from NEURON values in 3D""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, k, paramA, paramB, decay = model rxd.set_solve_type(dimension=3) # check changing the units after initialization @@ -63,8 +63,8 @@ def test_reinit_3d_cvode(simple_model): """Test rxd.re_init updates node values from NEURON values in 3D with CVode""" - neuron_instance, model = simple_model - h, rxd, data, save_path = neuron_instance + neuron_nosave_instance, model = simple_model + h, rxd, save_path = neuron_nosave_instance dend, cyt, k, paramA, paramB, decay = model rxd.set_solve_type(dimension=3) h.CVode().active(True) diff --git a/test/rxd/testdata b/test/rxd/testdata index be297655ab..30c76babbb 160000 --- a/test/rxd/testdata +++ b/test/rxd/testdata @@ -1 +1 @@ -Subproject commit be297655abf0f98be95a051576d43aad23cebbf0 +Subproject commit 30c76babbbc2cff005fd3f6f649c8b81da41ee58 diff --git a/test/rxd/testutils.py b/test/rxd/testutils.py index 7e1683337e..2c8ca2670c 100644 --- a/test/rxd/testutils.py +++ b/test/rxd/testutils.py @@ -63,9 +63,14 @@ def collect_data(h, rxd, data, save_path, num_record=10): data["data"] = [] data["record_count"] = 1 # remove previous record if h.t is the same - if data["record_count"] > 1 and h.t == data["data"][-len(local_data)]: - data["record_count"] -= 1 - del data["data"][-len(local_data) :] + if data["record_count"] > 1: + if len(local_data) > len(data["data"]): + # model changed -- reset data collection + data["data"] = [] + data["record_count"] = 1 + elif h.t == data["data"][-len(local_data)]: + data["record_count"] -= 1 + del data["data"][-len(local_data) :] # add new data record data["data"].extend(local_data) if data["record_count"] == 2: diff --git a/test/unit_tests/basic.cpp b/test/unit_tests/basic.cpp index d1c021b833..2ca0e4cad5 100644 --- a/test/unit_tests/basic.cpp +++ b/test/unit_tests/basic.cpp @@ -1,21 +1,24 @@ +#include <../../nrnconf.h> #include "code.h" #include "neuron.h" #include "ocfunc.h" #include "section.h" +#if HAVE_IV +#include "ivoc.h" +#endif #include SCENARIO("Test fast_imem calculation", "[Neuron][fast_imem]") { GIVEN("A section") { REQUIRE(hoc_oc("create s\n") == 0); - WHEN("fast_imem and cachevec is allocated") { + WHEN("fast_imem is allocated") { nrn_use_fast_imem = true; - use_cachevec = 1; nrn_fast_imem_alloc(); THEN("nrn_fast_imem should not be nullptr") { for (int it = 0; it < nrn_nthread; ++it) { - NrnThread* nt = &nrn_threads[it]; - REQUIRE(nt->_nrn_fast_imem != nullptr); + REQUIRE(nrn_threads[it].node_sav_d_storage()); + REQUIRE(nrn_threads[it].node_sav_rhs_storage()); } } } @@ -31,8 +34,9 @@ SCENARIO("Test fast_imem calculation", "[Neuron][fast_imem]") { } THEN("The current in this section is 0") { for (NrnThread* nt = nrn_threads; nt < nrn_threads + nrn_nthread; ++nt) { + auto const vec_sav_rhs = nt->node_sav_rhs_storage(); for (int i = 0; i < nt->end; ++i) { - REQUIRE(nt->_nrn_fast_imem->_nrn_sav_rhs[i] == 0.0); + REQUIRE(vec_sav_rhs[i] == 0.0); } } } @@ -47,9 +51,17 @@ TEST_CASE("Test return code of execerror", "[NEURON][execerror]") { REQUIRE(hoc_oc("execerror(\"test error\")") > 0); } +#if HAVE_IV +TEST_CASE("Test Oc::run(cmd)", "[NEURON]") { + Oc oc; + REQUIRE(oc.run("foo", 1) == 1); + REQUIRE(oc.run("foo", 0) == 1); +} +#endif + // AddressSanitizer seems to intercept the mallinfo[2]() system calls and return -// null values from them. -#ifndef NRN_ASAN_ENABLED +// null values from them. ThreadSanitizer seems to do the same. +#if !defined(NRN_ASAN_ENABLED) && !defined(NRN_TSAN_ENABLED) TEST_CASE("Test nrn_mallinfo returns non-zero", "[NEURON][nrn_mallinfo]") { SECTION("HOC") { REQUIRE( diff --git a/test/unit_tests/container/container.cpp b/test/unit_tests/container/container.cpp new file mode 100644 index 0000000000..4ebadf1624 --- /dev/null +++ b/test/unit_tests/container/container.cpp @@ -0,0 +1,293 @@ +#include "neuron/container/soa_container.hpp" +#include "neuron/container/view_utils.hpp" +#include "neuron/model_data.hpp" +#include "nrn_ansi.h" + +#include + +#include + +using namespace neuron::container; + +// Mock up a neuron::container::soa<...>-based data structure that includes features that are not +// currently tested in the real NEURON data structure code. + +namespace { +namespace field { +/** + * @brief Tag type that has zero-parameter array_dimension() and no num_variables(). + */ +struct A { + using type = float; + [[nodiscard]] int array_dimension() const { + return 42; + } +}; + +/** @brief Tag type for just one double per row. + */ +struct B { + using type = double; +}; + +/** @brief Tag type with multiple fields of differing array_dimension. + */ +struct C { + using type = double; + + size_t num_variables() const { + return 3; + } + + int array_dimension(int field_index) const { + return field_index + 1; + } +}; + +/** @brief Tag type for an optional field, intended to be Off. + */ +struct DOff { + static constexpr bool optional = true; + using type = double; +}; + +/** @brief Tag type for an optional field, intended to be On. + */ +struct DOn { + static constexpr bool optional = true; + using type = double; +}; + +} // namespace field +template +struct handle_interface: handle_base { + using base_type = handle_base; + using base_type::base_type; + /** + * @brief Return the above-diagonal element. + */ + [[nodiscard]] field::A::type& a() { + return this->template get(); + } +}; +struct storage: soa {}; +using owning_handle = handle_interface>; +} // namespace + +TEST_CASE("Tag type with array_dimension and without num_variables", "[Neuron][data_structures]") { + GIVEN("A standalone soa container") { + storage data; // Debian's GCC 10.2 doesn't like a {} before the ; + THEN("Can we create a handle to a row in it") { + REQUIRE_NOTHROW([&data]() { owning_handle instance{data}; }()); + } + } +} + +TEST_CASE("Multi-threaded calls to nrn_ensure_model_data_are_sorted()", + "[Neuron][data_structures]") { + GIVEN("An initialised model (albeit empty for the moment)") { + REQUIRE(hoc_oc("create s\nfinitialize(-65)\n") == 0); + REQUIRE(neuron::model().node_data().size() == 3); + THEN("Call nrn_ensure_model_data_are_sorted multiple times concurrently") { + // Calling nrn_ensure_model_data_are_sorted() in multiple threads should + // succeed in all of them, but the underlying sort operations should be + // serialised. + constexpr auto num_threads = 7; + std::vector threads; + // Accumulate the tokens returned by nrn_ensure_model_data_are_sorted. + std::mutex token_mutex{}; + std::vector tokens; + // Make sure the data are not already sorted, otherwise we won't follow the complicated + // codepath + neuron::model().node_data().mark_as_unsorted(); + for (auto i = 0; i < num_threads; ++i) { + threads.emplace_back([&tokens, &token_mutex]() { + auto token = nrn_ensure_model_data_are_sorted(); + std::unique_lock _{token_mutex}; + tokens.push_back(token); + }); + } + // Wait for all the threads to end + for (auto& thread: threads) { + thread.join(); + } + REQUIRE(tokens.size() == num_threads); + } + REQUIRE(hoc_oc("delete_section()") == 0); + REQUIRE(neuron::model().node_data().size() == 0); + } +} + +TEST_CASE("soa::get_array_dims", "[Neuron][data_structures]") { + storage data; + + data.set_field_status(true); + data.set_field_status(false); + + auto c = field::C{}; + + for (size_t field_index = 0; field_index < c.num_variables(); ++field_index) { + CHECK(data.template get_array_dims()[field_index] == + c.array_dimension(field_index)); + CHECK(data.template get_array_dims(field_index) == + c.array_dimension(field_index)); + } + + CHECK(data.template get_array_dims(0) == 1ul); + CHECK(data.template get_array_dims(0) == 1ul); +} + +TEST_CASE("soa::get_num_variables", "[Neuron][data_structures]") { + storage data; + + data.set_field_status(true); + data.set_field_status(false); + + auto c = field::C{}; + + CHECK(data.get_num_variables() == c.num_variables()); + CHECK(data.get_num_variables() == 1ul); + CHECK(data.get_num_variables() == 1ul); +} + +TEST_CASE("defer delete storage pointer", "[Neuron][internal][data_structures]") { + REQUIRE(detail::defer_delete_storage != nullptr); + + auto usage_before = detail::compute_defer_delete_storage_size(); + { storage data; } + auto usage_after = detail::compute_defer_delete_storage_size(); + + CHECK(usage_after.size - usage_before.size > 0); + CHECK(usage_after.capacity > 0); + CHECK(usage_before.size <= usage_before.capacity); + CHECK(usage_after.size <= usage_after.capacity); +} + +template +std::size_t compute_row_size(const Storage& data) { + std::size_t local_size = 0ul; + auto tag = data.template get_tag(); + for (int field_index = 0; field_index < detail::get_num_variables(tag); ++field_index) { + local_size += data.template get_array_dims()[field_index] * sizeof(typename Tag::type); + } + + return local_size; +} + +TEST_CASE("container memory usage", "[Neuron][internal][data_structures]") { + storage data; + data.set_field_status(true); + data.set_field_status(false); + + std::size_t row_size = compute_row_size(data) + compute_row_size(data) + + compute_row_size(data) + compute_row_size(data); + + auto r1 = owning_handle{data}; + auto r2 = owning_handle{data}; + auto r3 = owning_handle{data}; + + auto n_rows = data.size(); + + auto usage = memory_usage(data); + + CHECK(usage.heavy_data.size == row_size * n_rows); + CHECK(usage.heavy_data.size <= usage.heavy_data.capacity); + + CHECK(usage.stable_identifiers.size % n_rows == 0); + CHECK(usage.stable_identifiers.size >= n_rows * sizeof(std::size_t*)); + CHECK(usage.stable_identifiers.size < n_rows * 4 * sizeof(std::size_t*)); + CHECK(usage.stable_identifiers.size <= usage.stable_identifiers.capacity); +} + +TEST_CASE("model memory usage", "[Neuron][internal][data_structures]") { + auto& model = neuron::model(); + + auto& nodes = model.node_data(); + auto node1 = neuron::container::Node::owning_handle{nodes}; + + auto& foo = model.add_mechanism(0, + "foo", + std::vector{{"a", 1}, + {"b", 2}, + {"c", 1}}); + auto foo1 = neuron::container::Mechanism::owning_handle{foo}; + auto foo2 = neuron::container::Mechanism::owning_handle{foo}; + + auto& bar = model.add_mechanism(1, + "bar", + std::vector{{"a", 1}}); + auto bar1 = neuron::container::Mechanism::owning_handle{bar}; + auto bar2 = neuron::container::Mechanism::owning_handle{bar}; + + auto usage = neuron::container::memory_usage(model); + CHECK(usage.nodes.heavy_data.size > 0); + CHECK(usage.nodes.heavy_data.size <= usage.nodes.heavy_data.capacity); + CHECK(usage.nodes.stable_identifiers.size > 0); + CHECK(usage.nodes.stable_identifiers.size <= usage.nodes.stable_identifiers.capacity); + + CHECK(usage.mechanisms.heavy_data.size > 0); + CHECK(usage.mechanisms.heavy_data.size <= usage.mechanisms.heavy_data.capacity); + CHECK(usage.mechanisms.stable_identifiers.size > 0); + CHECK(usage.mechanisms.stable_identifiers.size <= usage.mechanisms.stable_identifiers.capacity); +} + +TEST_CASE("cache::model memory_usage", "[Neuron][internal][data_structures]") { + auto& model = neuron::cache::model; + + // We can't manipulate `cache::Model`, hence there nothing to check other + // than the fact that it compiles and runs without throwing. + auto usage = neuron::container::memory_usage(model); +} + +TEST_CASE("format_memory", "[Neuron][internal]") { + size_t kb = 1e3; + size_t mb = 1e6; + size_t gb = 1e9; + size_t tb = 1e12; + + CHECK(neuron::container::format_memory(0) == " 0 "); + CHECK(neuron::container::format_memory(1) == " 1 "); + CHECK(neuron::container::format_memory(999) == " 999 "); + CHECK(neuron::container::format_memory(kb) == " 1.00 kB"); + CHECK(neuron::container::format_memory(999 * kb) == "999.00 kB"); + CHECK(neuron::container::format_memory(mb) == " 1.00 MB"); + CHECK(neuron::container::format_memory(gb) == " 1.00 GB"); + CHECK(neuron::container::format_memory(tb) == " 1.00 TB"); +} + +neuron::container::MemoryUsage dummy_memory_usage() { + auto model = + neuron::container::ModelMemoryUsage{neuron::container::StorageMemoryUsage{{1, 11}, {2, 12}}, + neuron::container::StorageMemoryUsage{{3, 13}, + {4, 14}}}; + auto cache_model = neuron::container::cache::ModelMemoryUsage{{5, 15}, {6, 16}}; + + auto stable_pointers = neuron::container::VectorMemoryUsage(7, 17); + auto stable_identifiers = neuron::container::VectorMemoryUsage(8, 18); + + auto memory_usage = MemoryUsage{model, cache_model, stable_pointers}; + + return memory_usage; +} + + +TEST_CASE("total memory usage", "[Neuron][internal][data_structures]") { + auto memory_usage = dummy_memory_usage(); + auto total = memory_usage.compute_total(); + CHECK(total.size == (7 * 8) / 2); + CHECK(total.capacity == total.size + 7 * 10); +} + +TEST_CASE("memory usage summary", "[Neuron][data_structures]") { + auto usage = dummy_memory_usage(); + auto summary = neuron::container::MemoryUsageSummary(usage); + auto total = usage.compute_total(); + + size_t summary_total = summary.required + summary.convenient + summary.oversized + + summary.leaked; + CHECK(summary.required <= total.size); + CHECK(summary.convenient <= total.size); + CHECK(summary.leaked <= total.size); + CHECK(summary.oversized == total.capacity - total.size); + CHECK(summary_total == total.capacity); +} diff --git a/test/unit_tests/container/generic_data_handle.cpp b/test/unit_tests/container/generic_data_handle.cpp new file mode 100644 index 0000000000..2a24ae5684 --- /dev/null +++ b/test/unit_tests/container/generic_data_handle.cpp @@ -0,0 +1,153 @@ +#include "neuron/container/generic_data_handle.hpp" +#include "neuron/container/node.hpp" +#include "neuron/model_data.hpp" + +#include + +#include +#include + +using namespace neuron::container; + +template +static std::string to_str(T const& x) { + std::ostringstream oss; + oss << x; + return oss.str(); +} + +TEST_CASE("generic_data_handle", "[Neuron][data_structures][generic_data_handle]") { + // Checks that apply to both typeless-null and double-typed null handles. + auto const check_double_or_typeless_null_handle = [](auto& handle) { + THEN("Check it does not claim to refer to a modern container") { + REQUIRE_FALSE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check it can be converted to a null data_handle") { + auto const typed_null = static_cast>(handle); + REQUIRE_FALSE(typed_null); + REQUIRE(typed_null == data_handle{}); + } + THEN("Check it can be made typeless-null by assigning nullptr") { + REQUIRE_NOTHROW(handle = nullptr); + AND_THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == "generic_data_handle{raw=nullptr type=typeless_null}"); + } + } + THEN("Check it can be made double*-null by assigning a null double*") { + REQUIRE_NOTHROW(handle = static_cast(nullptr)); + AND_THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == "generic_data_handle{raw=nullptr type=double*}"); + } + } + THEN("Check it can be assigned a literal int value") { + REQUIRE_NOTHROW(handle = 42); + AND_THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == "generic_data_handle{raw=0x2a type=int}"); + } + } + THEN("Check it can be assigned a literal double value") { + REQUIRE_NOTHROW(handle = 42.0); + AND_THEN("Check it has the expected string representation") { + // this is 42.0 interpreted as a 64-bit integer... + REQUIRE(to_str(handle) == + "generic_data_handle{raw=0x4045000000000000 type=double}"); + } + } + }; + auto const check_typeless_null = [&](auto& handle) { + check_double_or_typeless_null_handle(handle); + THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == "generic_data_handle{raw=nullptr type=typeless_null}"); + } + THEN("Check it can be converted to data_handle") { + REQUIRE_NOTHROW(static_cast>(handle)); + } + }; + GIVEN("A typeless null generic handle") { + generic_data_handle handle{}; + check_typeless_null(handle); + } + GIVEN("A typeless null generic handle constructed from nullptr") { + generic_data_handle handle{nullptr}; + check_typeless_null(handle); + } + GIVEN("A double*-typed null generic handle") { + // construct a double*-typed null handle + generic_data_handle null_handle{data_handle{}}; + check_double_or_typeless_null_handle(null_handle); + THEN("Check it cannot be converted to data_handle") { + REQUIRE_THROWS(static_cast>(null_handle)); + } + THEN("Check it has the expected string representation") { + REQUIRE(to_str(null_handle) == "generic_data_handle{raw=nullptr type=double*}"); + } + } + GIVEN("A handle wrapping a raw pointer (compatibility mode)") { + auto* const raw_ptr = reinterpret_cast(0xdeadbeefdeadbeef); + data_handle typed_handle{raw_ptr}; + generic_data_handle handle{typed_handle}; + THEN("Check it remembered the double type") { + REQUIRE(handle.type_name() == "double*"); + } + THEN("Check it can be converted back to data_handle") { + REQUIRE_NOTHROW(static_cast>(handle)); + } + THEN("Check it cannot be converted to data_handle") { + REQUIRE_THROWS(static_cast>(handle)); + } + THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == "generic_data_handle{raw=0xdeadbeefdeadbeef type=double*}"); + } + THEN("Check it does not claim to refer to a modern container") { + REQUIRE_FALSE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check we can't get another type out of it") { + REQUIRE_THROWS(handle.get()); + REQUIRE_THROWS(handle.literal_value()); + } + } + GIVEN("A generic_handle referring to an entry in an SOA container") { + auto& node_data = neuron::model().node_data(); + REQUIRE(node_data.size() == 0); + std::optional node{node_data}; + auto typed_handle = node->v_handle(); + THEN("Match typed_handle as true") { + REQUIRE(typed_handle); + } + generic_data_handle handle{typed_handle}; + THEN("Check it remembered the double type") { + REQUIRE(handle.type_name() == "double*"); + } + THEN("Check it can be converted back to data_handle") { + REQUIRE_NOTHROW(static_cast>(handle)); + } + THEN("Check it cannot be converted to data_handle") { + REQUIRE_THROWS(static_cast>(handle)); + } + THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == + "generic_data_handle{Node::field::Voltage row=0/1 type=double*}"); + } + THEN("Check that it knows it refers to a modern data structure") { + REQUIRE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check that we can't get another type out of it") { + REQUIRE_THROWS(handle.get()); + REQUIRE_THROWS(handle.get()); + } + THEN("Check we cannot obtain a literal value") { + REQUIRE_THROWS(handle.literal_value()); + REQUIRE_THROWS(handle.literal_value()); + } + WHEN("The row of the modern data structure is deleted") { + node.reset(); + THEN("Check it still reports referring to a modern data structure") { + REQUIRE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check it has the expected string representation") { + REQUIRE(to_str(handle) == + "generic_data_handle{Node::field::Voltage died/0 type=double*}"); + } + } + } +} diff --git a/test/unit_tests/container/mechanism.cpp b/test/unit_tests/container/mechanism.cpp new file mode 100644 index 0000000000..0ead73d2c2 --- /dev/null +++ b/test/unit_tests/container/mechanism.cpp @@ -0,0 +1,276 @@ +#include "neuron/container/mechanism.hpp" +#include "neuron/container/mechanism_data.hpp" +#include "neuron/container/soa_container.hpp" +#include "neuron/model_data.hpp" + +#include + +#include +#include +#include +#include +#include + +using namespace neuron::container::Mechanism; + +TEST_CASE("SOA-backed Mechanism data structure", "[Neuron][data_structures][mechanism]") { + GIVEN("A mechanism with two copies of the same tagged variable") { + // foo is scalar, bar is an array of dimension bar_values.size() + constexpr std::array bar_values{7.0, 0.7, 0.9}; + std::vector field_info{{"foo", 1}, {"bar", bar_values.size()}}; + auto const num_fields = field_info.size(); + auto const foo_index = 0; + auto const bar_index = 1; + // Have to register the storage with neuron::model(), otherwise pretty-printing of data + // handles won't work + auto const mech_type = 0; + neuron::model().delete_mechanism(mech_type); + auto& mech_data = neuron::model().add_mechanism(mech_type, "test_mechanism", field_info); + // the top-level mechanism data structure can be pretty-printed + THEN("The mechanism data structure can be pretty-printed") { + std::ostringstream oss; + oss << mech_data; + REQUIRE(oss.str() == "test_mechanism::storage{type=0, 2 fields}"); + } + REQUIRE(mech_data.get_tag().num_variables() == num_fields); + WHEN("A row is added") { + owning_handle mech_instance{mech_data}; + THEN("We cannot delete the mechanism type") { + REQUIRE_THROWS(neuron::model().delete_mechanism(mech_type)); + } + THEN("Values can be read and written") { + constexpr auto field0_value = 42.0; + mech_instance.fpfield(foo_index) = field0_value; + for (auto i = 0; i < bar_values.size(); ++i) { + mech_instance.fpfield(bar_index, i) = bar_values[i]; + } + REQUIRE_THROWS(mech_instance.fpfield(num_fields)); + REQUIRE_THROWS(mech_instance.fpfield(bar_index, bar_values.size())); + REQUIRE(mech_instance.fpfield(foo_index) == field0_value); + for (auto i = 0; i < bar_values.size(); ++i) { + REQUIRE(mech_instance.fpfield(bar_index, i) == bar_values[i]); + } + REQUIRE_THROWS(mech_instance.fpfield(num_fields)); + + REQUIRE(mech_instance.fpfield_dimension(foo_index) == 1); + REQUIRE(mech_instance.fpfield_dimension(bar_index) == bar_values.size()); + REQUIRE_THROWS(mech_instance.fpfield_dimension(num_fields)); + + auto bar_handle = mech_instance.fpfield_handle(bar_index); + REQUIRE(*bar_handle.next_array_element(2) == 0.9); + REQUIRE_THROWS(*bar_handle.next_array_element(bar_values.size())); + + + AND_THEN("Data handles give useful information when printed") { + auto const require_str = [&mech_instance](std::string_view ref, auto... args) { + auto const dh = mech_instance.fpfield_handle(args...); + std::ostringstream oss; + oss << dh; + REQUIRE(oss.str() == ref); + }; + // Slightly worried that the val=xxx formatting isn't portable, in which case we + // could generate the reference strings using the values above. + require_str("data_handle{cont=test_mechanism foo row=0/1 val=42}", + foo_index); + require_str("data_handle{cont=test_mechanism bar[0/3] row=0/1 val=7}", + bar_index, + 0); + require_str("data_handle{cont=test_mechanism bar[1/3] row=0/1 val=0.7}", + bar_index, + 1); + require_str("data_handle{cont=test_mechanism bar[2/3] row=0/1 val=0.9}", + bar_index, + 2); + // Also cover generic_data_handle printing + auto const gdh = neuron::container::generic_data_handle{ + mech_instance.fpfield_handle(foo_index)}; + std::ostringstream oss; + oss << gdh; + REQUIRE(oss.str() == + "generic_data_handle{cont=test_mechanism foo row=0/1 type=double*}"); + } + std::ostringstream actual; + actual << mech_instance; + REQUIRE(actual.str() == + "test_mechanism{row=0/1 fpfield[0]{ 42 } fpfield[1]{ 7 0.7 0.9 }}"); + } + } + WHEN("Many rows are added") { + std::vector reference_field0, reference_field1_0, reference_field1_1; + constexpr auto num_instances = 10; + std::generate_n(std::back_inserter(reference_field0), num_instances, [i = 0]() mutable { + auto const x = i++; + return x * x; + }); + std::generate_n(std::back_inserter(reference_field1_0), + num_instances, + [i = 0]() mutable { + auto const x = i++; + return x * x * x; + }); + std::generate_n(std::back_inserter(reference_field1_1), + num_instances, + [i = 0]() mutable { + auto const x = i++; + return x * x * x * x; + }); + std::vector mech_instances{}; + for (auto i = 0; i < num_instances; ++i) { + auto& mech = mech_instances.emplace_back(mech_data); + mech.fpfield(0) = reference_field0[i]; + mech.fpfield(1, 0) = reference_field1_0[i]; + mech.fpfield(1, 1) = reference_field1_1[i]; + } + REQUIRE(mech_data.empty() == mech_instances.empty()); + REQUIRE(mech_data.size() == mech_instances.size()); + enum struct StorageCheck { Skip, Match, NotMatch }; + const auto check_field_values = [&](StorageCheck storage_should_match) { + THEN("The correct values can be read back") { + std::vector current_field0, current_field1_0, current_field1_1; + std::transform(mech_instances.begin(), + mech_instances.end(), + std::back_inserter(current_field0), + [](auto const& mech) { return mech.fpfield(0); }); + std::transform(mech_instances.begin(), + mech_instances.end(), + std::back_inserter(current_field1_0), + [](auto const& mech) { return mech.fpfield(1, 0); }); + std::transform(mech_instances.begin(), + mech_instances.end(), + std::back_inserter(current_field1_1), + [](auto const& mech) { return mech.fpfield(1, 1); }); + REQUIRE(current_field0 == reference_field0); + REQUIRE(current_field1_0 == reference_field1_0); + REQUIRE(current_field1_1 == reference_field1_1); + auto const field_matches = + [&](auto const& reference, int index, int array_index = 0) { + for (auto i = 0; i < mech_data.size(); ++i) { + if (mech_data.fpfield(i, index, array_index) != reference[i]) { + return false; + } + } + return true; + }; + if (storage_should_match == StorageCheck::Match) { + AND_THEN("The underlying storage matches the reference values") { + REQUIRE(field_matches(reference_field0, 0)); + REQUIRE(field_matches(reference_field1_0, 1, 0)); + REQUIRE(field_matches(reference_field1_1, 1, 1)); + } + } else if (storage_should_match == StorageCheck::NotMatch) { + AND_THEN("The underlying storage no longer matches the reference values") { + REQUIRE_FALSE(field_matches(reference_field0, 0)); + REQUIRE_FALSE(field_matches(reference_field1_0, 1, 0)); + REQUIRE_FALSE(field_matches(reference_field1_1, 1, 1)); + } + } + } + }; + check_field_values(StorageCheck::Match); + AND_WHEN("The underlying storage is permuted") { + std::vector perm_vector(mech_instances.size()); + std::iota(perm_vector.begin(), perm_vector.end(), 0); + std::mt19937 g{42}; + std::shuffle(perm_vector.begin(), perm_vector.end(), g); + mech_data.apply_reverse_permutation(std::move(perm_vector)); + check_field_values(StorageCheck::Skip); + } + auto const apply_to_all = [&](auto callable) { + auto const impl = [](auto callable, auto&... vecs) { (callable(vecs), ...); }; + impl(std::move(callable), + mech_instances, + reference_field0, + reference_field1_0, + reference_field1_1); + }; + AND_WHEN("An element is deleted") { + apply_to_all([](auto& vec) { vec.erase(vec.begin()); }); + check_field_values(StorageCheck::Skip); + } + } + WHEN("We want to manipulate other mechanism data") { + THEN("We cannot readd the same type") { + REQUIRE_THROWS( + neuron::model().add_mechanism(mech_type, "test_mechanism_2", field_info)); + } + THEN("We cannot access a non-existing mechanism") { + auto const non_existing_mech_type = 313; + REQUIRE_THROWS(neuron::model().mechanism_data(non_existing_mech_type)); + } + THEN("We cannot get data for a null mechanism") { + auto const null_mech_type = 345; + neuron::model().add_mechanism(null_mech_type, "null_mechanism"); + neuron::model().delete_mechanism(null_mech_type); + REQUIRE_THROWS(neuron::model().mechanism_data(null_mech_type)); + } + } + } + GIVEN("A mechanism type that gets deleted") { + std::vector field_info{{"foo", 1}}; // one scalar variable + constexpr auto foo_index = 0; + constexpr auto foo_value = 29.0; + // Register the storage with neuron::model() because we want to cover codepaths in the + // pretty-printing + auto const mech_type = 0; + neuron::model().delete_mechanism(mech_type); + auto& mech_data = neuron::model().add_mechanism(mech_type, "test_mechanism", field_info); + REQUIRE(mech_data.get_tag().num_variables() == 1); + WHEN("A row is added and we take a handle to its value") { + std::optional instance{std::in_place, mech_data}; + instance->fpfield(foo_index) = foo_value; + auto foo_handle = instance->fpfield_handle(foo_index); + auto generic_foo = neuron::container::generic_data_handle{foo_handle}; + THEN("The handle yields the expected value") { + REQUIRE(*foo_handle == foo_value); + } + AND_WHEN("The row is deleted again") { + instance.reset(); + THEN("We can still print the handle") { + std::ostringstream oss; + oss << foo_handle; + REQUIRE(oss.str() == "data_handle{cont=test_mechanism foo died/0}"); + } + AND_WHEN("The mechanism type is also deleted") { + neuron::model().delete_mechanism(mech_type); + THEN("We can still print the handle, following an unusual codepath") { + { + std::ostringstream oss; + oss << foo_handle; + REQUIRE(oss.str() == "data_handle{cont=unknown died/unknown}"); + } + { + std::ostringstream oss; + oss << generic_foo; + REQUIRE(oss.str() == + "generic_data_handle{cont=unknown died/unknown type=double*}"); + } + } + } + } + } + } +} + +TEST_CASE("Model::is_valid_mechanism", "[Neuron][data_structures]") { + // Since `neuron::model` is a global we we've no clue what state it's in. + // Hence we delete what we want to use and remember that we have to assume + // there might be other mechanisms present that we shall ignore. + + auto& model = neuron::model(); + std::vector field_info{{"foo", 1}}; // just a dummy value. + + model.delete_mechanism(0); + model.add_mechanism(0, "zero", field_info); + + model.delete_mechanism(1); + model.add_mechanism(1, "one", field_info); + + model.delete_mechanism(2); + model.add_mechanism(2, "two", field_info); + + model.delete_mechanism(1); + + CHECK(model.is_valid_mechanism(0)); + CHECK(!model.is_valid_mechanism(1)); + CHECK(model.is_valid_mechanism(2)); +} diff --git a/test/unit_tests/container/node.cpp b/test/unit_tests/container/node.cpp new file mode 100644 index 0000000000..263db0c51a --- /dev/null +++ b/test/unit_tests/container/node.cpp @@ -0,0 +1,551 @@ +#include "neuron/container/node.hpp" +#include "neuron/container/soa_container.hpp" +#include "section.h" + +#include + +#include +#include +#include +#include +#include + +static_assert(std::is_default_constructible_v); +static_assert(!std::is_copy_constructible_v); +static_assert(std::is_move_constructible_v); +static_assert(!std::is_copy_assignable_v); +static_assert(std::is_move_assignable_v); +static_assert(std::is_destructible_v); + +using namespace neuron::container; +using namespace neuron::container::Node; + +// We want to check that the tests pass for all of: +// - data_handle +// - data_handle -> T* -> data_handle +// - data_handle -> generic_data_handle -> data_handle +enum struct Transform { None, ViaRawPointer, ViaGenericDataHandle }; +template +data_handle transform(data_handle handle, Transform type) { + if (type == Transform::None) { + return handle; + } else if (type == Transform::ViaRawPointer) { + return data_handle{static_cast(handle)}; + } else { + assert(type == Transform::ViaGenericDataHandle); + return static_cast>(generic_data_handle{handle}); + } +} + +constexpr static double magic_voltage_value = 42.; + +template +static std::string to_str(T const& x) { + std::ostringstream oss; + oss << x; + return oss.str(); +} + +TEST_CASE("data_handle", "[Neuron][data_structures][data_handle]") { + GIVEN("A null handle") { + data_handle handle{}; + handle = transform(handle, + GENERATE(Transform::None, + Transform::ViaRawPointer, + Transform::ViaGenericDataHandle)); + THEN("Check it is really null") { + REQUIRE_FALSE(handle); + } + THEN("Check it compares equal to a different null pointer") { + data_handle const other_handle{}; + REQUIRE(handle == other_handle); + } + THEN("Check it prints the right value") { + REQUIRE(to_str(handle) == "data_handle{null}"); + } + THEN("Check it doesn't claim to be modern") { + REQUIRE_FALSE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check it decays to a null pointer") { + auto* foo_ptr = static_cast(handle); + REQUIRE(foo_ptr == nullptr); + } + } + GIVEN("A handle wrapping a raw pointer (compatibility mode)") { + std::vector foo(10); + std::iota(foo.begin(), foo.end(), magic_voltage_value); + + data_handle handle{foo.data()}; + handle = transform(handle, + GENERATE(Transform::None, + Transform::ViaRawPointer, + Transform::ViaGenericDataHandle)); + THEN("Check it is not null") { + REQUIRE(handle); + } + THEN("Check it does not compare equal to a null handle") { + data_handle null_handle{}; + REQUIRE(handle != null_handle); + } + THEN("Check it compares equal to a different handle wrapping the same raw pointer") { + data_handle other_handle{foo.data()}; + REQUIRE(handle == other_handle); + } + THEN("Check it yields the right value") { + REQUIRE(*handle == magic_voltage_value); + } + THEN("Check it doesn't claim to be modern") { + REQUIRE_FALSE(handle.refers_to_a_modern_data_structure()); + REQUIRE_FALSE(handle.refers_to( + neuron::model().node_data())); + } + THEN("Check it decays to the right raw pointer") { + auto* foo_ptr = static_cast(handle); + REQUIRE(foo_ptr == foo.data()); + } + THEN("Check it prints the right value") { + std::ostringstream expected; + expected << "data_handle{raw=" << foo.data() << '}'; + REQUIRE(to_str(handle) == expected.str()); + } + THEN("Check that we can store/retrieve in/from unordered_map") { + std::unordered_map, std::string> map; + map[handle] = "unordered_map"; + REQUIRE(map[handle] == "unordered_map"); + } + THEN("Check that next_array_element works") { + auto next = handle.next_array_element(5); + REQUIRE(next); + REQUIRE(*next == magic_voltage_value + 5); + } + } + GIVEN("A handle to a void pointer") { + auto foo = std::make_shared(magic_voltage_value); + + data_handle handle{foo.get()}; + handle = transform(handle, + GENERATE(Transform::None, + Transform::ViaRawPointer, + Transform::ViaGenericDataHandle)); + THEN("Check it is not null") { + REQUIRE(handle); + } + THEN("Check it doesn't claim to be modern") { + REQUIRE_FALSE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check it decays to the right raw pointer") { + auto* foo_ptr = static_cast(handle); + REQUIRE(foo_ptr == foo.get()); + } + THEN("Check it matches another data_handle to same pointer") { + const data_handle other_handle{foo.get()}; + REQUIRE(handle == other_handle); + } + THEN("Check it prints the right value") { + std::ostringstream expected; + expected << "data_handle{raw=" << foo.get() << '}'; + REQUIRE(to_str(handle) == expected.str()); + } + } + GIVEN("A handle referring to an entry in an SOA container") { + REQUIRE(neuron::model().node_data().size() == 0); + std::optional<::Node> node{std::in_place}; + node->v() = magic_voltage_value; + auto handle = node->v_handle(); + const auto handle_id = handle.identifier(); + handle = transform(handle, + GENERATE(Transform::None, + Transform::ViaRawPointer, + Transform::ViaGenericDataHandle)); + THEN("Check it is not null") { + REQUIRE(handle); + } + THEN("Check it actually refers_to voltage and not something else") { + REQUIRE(handle.refers_to( + neuron::model().node_data())); + REQUIRE_FALSE(handle.refers_to( + neuron::model().node_data())); + } + THEN("Check it does not compare equal to a null handle") { + data_handle null_handle{}; + REQUIRE(handle != null_handle); + } + THEN("Check it does not compare equal to a handle in legacy mode") { + double foo{}; + data_handle foo_handle{&foo}; + REQUIRE(handle != foo_handle); + } + THEN("Check it yields the right value") { + REQUIRE(*handle == magic_voltage_value); + const auto const_handle(handle); + REQUIRE(*const_handle == magic_voltage_value); + } + THEN("Check it claims to be modern") { + REQUIRE(handle.refers_to_a_modern_data_structure()); + } + THEN("Check it prints the right value") { + REQUIRE(to_str(handle) == "data_handle{Node::field::Voltage row=0/1 val=42}"); + } + THEN("Check that getting next_array_element throws, dimension is 1") { + REQUIRE_THROWS(handle.next_array_element()); + } + THEN("Check that we can store/retrieve in/from unordered_map") { + std::unordered_map, std::string> map; + map[handle] = "unordered_map_modern_dh"; + REQUIRE(map[handle] == "unordered_map_modern_dh"); + } + THEN("Make sure we get the current logical row number") { + REQUIRE(handle.current_row() == 0); + } + THEN("Check that deleting the (Node) object it refers to invalidates the handle") { + node.reset(); // delete the underlying Node object + REQUIRE_FALSE(handle); + // REQUIRE(handle == data_handle{}); + REQUIRE(handle.refers_to_a_modern_data_structure()); + REQUIRE(to_str(handle) == "data_handle{Node::field::Voltage died/0}"); + REQUIRE(handle.refers_to( + neuron::model().node_data())); + REQUIRE_THROWS(*handle); + const auto const_handle(handle); + REQUIRE_THROWS(*const_handle); + REQUIRE(handle.identifier() == handle_id); + } + THEN( + "Check that mutating the underlying container while holding a raw pointer has the " + "expected effect") { + auto* raw_ptr = static_cast(handle); + REQUIRE(raw_ptr); + REQUIRE(*raw_ptr == magic_voltage_value); + node.reset(); // delete the underlying Node object, handle is now invalid + REQUIRE_FALSE(handle); + REQUIRE(raw_ptr); // no magic here, we have a dangling pointer + data_handle new_handle{raw_ptr}; + REQUIRE(new_handle); // handle refers to no-longer-valid memory, but we can't detect + // that + REQUIRE(handle != new_handle); + REQUIRE_FALSE(new_handle.refers_to_a_modern_data_structure()); + // dereferencing raw_ptr is undefined behaviour + } + } +} + +namespace neuron::test { +std::vector get_node_voltages(std::vector<::Node> const& nodes) { + std::vector ret{}; + std::transform(nodes.begin(), nodes.end(), std::back_inserter(ret), [](auto const& node) { + return node.v(); + }); + return ret; +} +std::tuple, std::vector> get_nodes_and_reference_voltages( + std::size_t num_nodes = 10) { + std::vector reference_voltages{}; + std::generate_n(std::back_inserter(reference_voltages), num_nodes, [i = 0]() mutable { + auto x = i++; + return x * x; + }); + std::vector<::Node> nodes{}; + std::transform(reference_voltages.begin(), + reference_voltages.end(), + std::back_inserter(nodes), + [](auto v) { + ::Node node{}; + node.v() = v; + return node; + }); + return {std::move(nodes), std::move(reference_voltages)}; +} +} // namespace neuron::test + +TEST_CASE("SOA-backed Node structure", "[Neuron][data_structures][node]") { + REQUIRE(neuron::model().node_data().size() == 0); + GIVEN("A default-constructed node") { + ::Node node{}; + THEN("Check its SOA-backed members have their default values") { + REQUIRE(node.area() == field::Area{}.default_value()); + REQUIRE(node.v() == field::Voltage{}.default_value()); + } + THEN("Check we can get a non-owning handle to it") { + auto handle = node.non_owning_handle(); + AND_THEN("Check the handle yields the corect values") { + REQUIRE(handle.area() == field::Area{}.default_value()); + REQUIRE(handle.v() == field::Voltage{}.default_value()); + } + } + } + GIVEN("A series of nodes with increasing integer voltages") { + using neuron::test::get_node_voltages; + auto nodes_and_voltages = neuron::test::get_nodes_and_reference_voltages(); + auto& nodes = std::get<0>(nodes_and_voltages); + auto& reference_voltages = std::get<1>(nodes_and_voltages); + auto& node_data = neuron::model().node_data(); + // Flag this original order as "sorted" so that the tests that it is no + // longer sorted after permutation are meaningful. + { + auto write_token = node_data.issue_frozen_token(); + node_data.mark_as_sorted(write_token); + } + auto const require_logical_match = [&]() { + THEN("Check the logical voltages still match") { + REQUIRE(get_node_voltages(nodes) == reference_voltages); + } + }; + auto const storage_match = [&]() { + for (auto i = 0; i < nodes.size(); ++i) { + if (node_data.get(i) != reference_voltages.at(i)) { + return false; + } + } + return true; + }; + auto const require_logical_and_storage_match = [&]() { + THEN("Check the logical voltages still match") { + REQUIRE(get_node_voltages(nodes) == reference_voltages); + AND_THEN("Check the underlying storage also matches") { + REQUIRE(storage_match()); + } + } + }; + auto const require_logical_match_and_storage_different = [&]() { + THEN("Check the logical voltages still match") { + REQUIRE(get_node_voltages(nodes) == reference_voltages); + AND_THEN("Check the underlying storage no longer matches") { + REQUIRE_FALSE(storage_match()); + } + } + }; + WHEN("Values are read back immediately") { + require_logical_and_storage_match(); + } + std::vector perm_vector(nodes.size()); + std::iota(perm_vector.begin(), perm_vector.end(), 0); + WHEN("The underlying storage is rotated") { + auto rotated = perm_vector; + std::rotate(rotated.begin(), std::next(rotated.begin()), rotated.end()); + auto const sorted_token = node_data.apply_reverse_permutation(std::move(rotated)); + require_logical_match_and_storage_different(); + } + WHEN("A unit reverse permutation is applied to the underlying storage") { + node_data.apply_reverse_permutation(std::move(perm_vector)); + require_logical_and_storage_match(); + // Should the data still be sorted here or not? Should + // apply_permutation bother checking if the permutation did + // anything? + } + WHEN("A random permutation is applied to the underlying storage") { + std::mt19937 g{42}; + std::shuffle(perm_vector.begin(), perm_vector.end(), g); + auto const sorted_token = node_data.apply_reverse_permutation(std::move(perm_vector)); + // the permutation is random, so we don't know if voltage_storage + // will match reference_voltages or not + require_logical_match(); + } + auto const require_exception = [&](auto perm) { + THEN("An exception is thrown") { + REQUIRE_THROWS(node_data.apply_reverse_permutation(std::move(perm))); + AND_THEN("The container is still flagged as sorted") { + REQUIRE(node_data.is_sorted()); + } + } + }; + WHEN("A too-short permutation is applied to the underlying storage") { + std::vector bad_perm(nodes.size() - 1); + std::iota(bad_perm.begin(), bad_perm.end(), 0); + require_exception(std::move(bad_perm)); + } + WHEN("A permutation with a repeated entry is applied to the underlying storage") { + std::vector bad_perm(nodes.size()); + std::iota(bad_perm.begin(), bad_perm.end(), 0); + bad_perm[0] = 1; // repeated entry + require_exception(std::move(bad_perm)); + } + WHEN("A permutation with an invalid value is applied to the underlying storage") { + std::vector bad_perm(nodes.size()); + std::iota(bad_perm.begin(), bad_perm.end(), 0); + bad_perm[0] = std::numeric_limits::max(); // out of range + require_exception(std::move(bad_perm)); + } + WHEN("The last Node is removed") { + nodes.pop_back(); + reference_voltages.pop_back(); + require_logical_and_storage_match(); + } + WHEN("The first Node is removed") { + nodes.erase(nodes.begin()); + reference_voltages.erase(reference_voltages.begin()); + require_logical_match_and_storage_different(); + } + WHEN("The middle Node is removed") { + auto const index_to_remove = nodes.size() / 2; + nodes.erase(std::next(nodes.begin(), index_to_remove)); + reference_voltages.erase(std::next(reference_voltages.begin(), index_to_remove)); + require_logical_match_and_storage_different(); + } + WHEN("The dense storage is sorted and marked read-only") { + // A rough sketch of the concept here is that if we have a + // SOA-backed quantity, like the node voltages, then referring + // stably to those values requires something like + // data_handle. We might hold some complicated structure of + // those "in the interpreter", let's say + // std::list>, and want to flatten that into + // something simpler for use in the translated MOD file code -- + // let's say std::vector -- while the data remain "sorted". + { + // Label the current order as sorted and acquire a token that + // freezes it that way. The data should be sorted until the + // token goes out of scope. + auto frozen_token = node_data.issue_frozen_token(); + node_data.mark_as_sorted(frozen_token); + REQUIRE(node_data.is_sorted()); + THEN("New nodes cannot be created") { + // Underlying node data is read-only, cannot allocate new Nodes. + REQUIRE_THROWS(::Node{}); + } + // The token enforces that values cannot move in memory, but it + // does not mean that they cannot be read from and written to + THEN("Values in existing nodes can be modified") { + auto& node = nodes.front(); + REQUIRE_NOTHROW(node.v()); + REQUIRE_NOTHROW(node.v() += 42.0); + } + THEN("The sorted-ness flag cannot be modified") { + REQUIRE_THROWS(node_data.mark_as_unsorted()); + AND_THEN("Attempts to do so fail") { + REQUIRE(node_data.is_sorted()); + } + } + THEN( + "The storage *can* be permuted if the sorted token is transferred back to the " + "container") { + node_data.apply_reverse_permutation(std::move(perm_vector), frozen_token); + } + THEN("The storage cannot be permuted when a 2nd sorted token is used") { + // Checking one of the permuting operations should be enough + REQUIRE_THROWS(node_data.apply_reverse_permutation(std::move(perm_vector))); + } + // In read-only mode we cannot delete Nodes either, but because + // we cannot throw from destructors it is not easy to test this + // in this context. There is a separate test for this below + // that is tagged with [tests_that_abort]. + } + // sorted_token out of scope, underlying data no longer read-only + THEN("After the token is discarded, new Nodes can be allocated") { + REQUIRE_NOTHROW(::Node{}); + } + } + } + REQUIRE(neuron::model().node_data().size() == 0); +} + +TEST_CASE("Fast membrane current storage", "[Neuron][data_structures][node][fast_imem]") { + REQUIRE(neuron::model().node_data().size() == 0); + + auto const set_fast_imem = [](bool new_value) { + nrn_use_fast_imem = new_value; + nrn_fast_imem_alloc(); + }; + auto const check_throws = [](auto& node) { + THEN("fast_imem fields cannot be accessed") { + CHECK_THROWS(node.sav_d()); + CHECK_THROWS(node.sav_rhs()); + CHECK_FALSE(node.sav_rhs_handle()); + } + }; + auto const check_default = [](auto& node) { + THEN("fast_imem fields have their default values") { + CHECK(node.sav_d() == 0.0); + CHECK(node.sav_rhs() == 0.0); + CHECK(*node.sav_rhs_handle() == 0.0); + } + }; + GIVEN("fast_imem calculation is disabled") { + set_fast_imem(false); + WHEN("A node is default-constructed") { + REQUIRE(neuron::model().node_data().size() == 0); + ::Node node{}; + check_throws(node); + auto handle = node.sav_rhs_handle(); + // The sav_rhs field is disabled, so the handle is a plain, completely null one. + CHECK(to_str(handle) == "data_handle{null}"); + CHECK(to_str(generic_data_handle{handle}) == + "generic_data_handle{raw=nullptr type=double*}"); + AND_WHEN("fast_imem calculation is enabled with a Node active") { + set_fast_imem(true); + check_default(node); + // The current implementation prefers simplicity to magic where possible, so handle + // will still be null. + CHECK_FALSE(handle); + } + } + } + GIVEN("fast_imem calculation is enabled") { + set_fast_imem(true); + WHEN("A node is default-constructed") { + REQUIRE(neuron::model().node_data().size() == 0); + ::Node node{}; + check_default(node); + auto handle = node.sav_rhs_handle(); + *handle = 42; // non-default value + generic_data_handle generic{handle}; + CHECK(handle); + CHECK(to_str(handle) == + "data_handle{Node::field::FastIMemSavRHS row=0/1 val=42}"); + CHECK(to_str(generic) == + "generic_data_handle{Node::field::FastIMemSavRHS row=0/1 type=double*}"); + AND_WHEN("fast_imem calculation is disabled with a Node active") { + REQUIRE(neuron::model().node_data().size() == 1); + set_fast_imem(false); + check_throws(node); + // This handle used to be valid, but it is now invalid because the optional field it + // refers to has been disabled. + CHECK_FALSE(handle); + CHECK(to_str(handle) == "data_handle{cont=deleted row=0/unknown}"); + CHECK(to_str(generic) == + "generic_data_handle{cont=deleted row=0/unknown type=double*}"); + AND_WHEN("fast_imem calculation is re-enabled") { + set_fast_imem(true); + // non-default value written above has been lost + check_default(node); + // Implementation choice was to minimise magic, so the handles are still dead + CHECK_FALSE(handle); + CHECK(to_str(handle) == "data_handle{cont=deleted row=0/unknown}"); + CHECK(to_str(generic) == + "generic_data_handle{cont=deleted row=0/unknown type=double*}"); + } + } + } + WHEN("A series of Nodes are created with non-trivial fast_imem values") { + constexpr auto num_nodes = 10; + std::vector<::Node> nodes(num_nodes); + std::vector perm_vector(num_nodes); + for (auto i = 0; i < num_nodes; ++i) { + perm_vector[i] = i; + nodes[i].sav_d() = i * i; + nodes[i].sav_rhs() = i * i * i; + } + AND_WHEN("A random permutation is applied") { + std::mt19937 g{42}; + std::shuffle(perm_vector.begin(), perm_vector.end(), g); + auto& node_data = neuron::model().node_data(); + node_data.apply_reverse_permutation(std::move(perm_vector)); + THEN("The logical values should still match") { + for (auto i = 0; i < num_nodes; ++i) { + REQUIRE(nodes[i].sav_d() == i * i); + REQUIRE(nodes[i].sav_rhs() == i * i * i); + } + } + } + } + } +} + +// Tests that cover code paths reaching std::terminate. "[.]" means they will not run by default, +// [tests_that_abort] means we have a tag to run them with. +TEST_CASE("Deleting a row from a frozen SoA container causes a fatal error", + "[.][tests_that_abort]") { + auto& node_data = neuron::model().node_data(); // SoA data store + std::optional<::Node> node{std::in_place}; // take ownership of a row in node_data + REQUIRE(node_data.size() == 1); // quick sanity check + auto const frozen_token = node_data.issue_frozen_token(); // mark node_data frozen + node.reset(); // Node destructor will trigger a call to std::terminate. +} diff --git a/test/unit_tests/hoc_python/test_StringFunctions.py b/test/unit_tests/hoc_python/test_StringFunctions.py new file mode 100644 index 0000000000..ecea51955d --- /dev/null +++ b/test/unit_tests/hoc_python/test_StringFunctions.py @@ -0,0 +1,182 @@ +from neuron import h +from neuron.expect_hocerr import expect_err + +sf = h.StringFunctions() + + +def test_substr(): + assert sf.substr("foobarfoo", "bar") == 3 + assert sf.substr("foobarfoo", "abc") == -1 + assert sf.substr("foobarfoo", "foo") == 0 + + +def test_len(): + assert sf.len("foobarfoo") == 9 + assert sf.len("") == 0 + + +def test_head(): + pattern = "f.o" + text = "foobarshi" + head = h.ref("") + assert sf.head(text, pattern, head) == 0 + assert head[0] == "" + + pattern = "b.*$" + text = "foobarshi" + head = h.ref("") + assert sf.head(text, pattern, head) == 3 + assert head[0] == "foo" + + pattern = "abc" + text = "foobarshi" + head = h.ref("") + assert sf.head(text, pattern, head) == -1 + assert head[0] == "" + + +def test_tail(): + pattern = "s.i$" + text = "foobarshi" + tail = h.ref("") + assert sf.tail(text, pattern, tail) == 9 + assert tail[0] == "" + + pattern = "^.*r" + text = "foobarshi" + tail = h.ref("") + assert sf.tail(text, pattern, tail) == 6 + assert tail[0] == "shi" + + pattern = "abc" + text = "foobarshi" + tail = h.ref("") + assert sf.tail(text, pattern, tail) == -1 + assert tail[0] == "" + + +def text_rtrim(): + text = "bar\t; \t\n" + out = h.ref("") + sf.rtrim(text, out) + assert out[0] == "bar\t;" + + sf.rtrim(text, out, " \t\n\f\v\r;") + assert out[0] == "bar" + + +def test_ltrim(): + text = " \t \n# foo" + out = h.ref("") + sf.ltrim(text, out) + assert out[0] == "# foo" + + sf.ltrim(text, out, " \t\n\f\r\v#") + assert out[0] == "foo" + + +def test_right(): + s = h.ref("foobarshi") + sf.right(s, 6) + assert s[0] == "shi" + s = h.ref("foobarshi") + sf.right(s, 0) + assert s[0] == "foobarshi" + # Out of range + # s = h.ref("foobarshi") + # sf.right(s, 10) + # assert(s[0] == "foobarshi") + + +def test_left(): + s = h.ref("foobarshi") + sf.left(s, 3) + assert s[0] == "foo" + s = h.ref("foobarshi") + sf.left(s, 0) + assert s[0] == "" + # Out of range + # s = h.ref("foobarshi") + # sf.left(s, 10) + # assert(s[0] == "foo") + + +def test_is_name(): + assert sf.is_name("xvalue") + assert not sf.is_name("xfoo") + + +def test_alias(): + v = h.Vector() + sf.alias(v, "xv", h.xvalue) + assert v.xv == h.xvalue + h("""double x[2]""") + sf.alias(v, "xy", h._ref_x[0]) + v.xy = 3.14 + assert h.x[0] == v.xy + + +def test_alias_list(): + v = h.Vector() + expect_err("sf.alias_list(v)") # no hoc String template + # after an expect error, must manually delete + del v + assert len(locals()) == 0 # sonarcloud says return value must be used + + v = h.Vector() + h.load_file("stdrun.hoc") + assert len(sf.alias_list(v)) == 0 + sf.alias(v, "xv1", h.xvalue) # Add alias + assert len(sf.alias_list(v)) == 1 + sf.alias(v, "xv2", h.xvalue) # Add alias + assert len(sf.alias_list(v)) == 2 + sf.alias(v, "xv1") # Remove 1 alias + assert len(sf.alias_list(v)) == 1 + sf.alias(v) # Remove all + assert len(sf.alias_list(v)) == 0 + + +def test_references(): + # This function prints the number of references + sf.references(None) + v = h.Vector() + + # different ways a hoc object can be referenced + h.hoc_obj_[0] = v + l = h.List() + l.append(v) + h( + """ +objref o +begintemplate Foo +public o, o2 +objref o, o2[2] +proc init() { + o = $o1 + o2[0] = o +} +endtemplate Foo + """ + ) + foo = h.Foo(v) + h.o = v + box = h.VBox() + box.ref(v) + + sf.references(foo.o) + box.ref(None) # without this, then assert error when box is destroyed + + +def test_is_point_process(): + sf = h.StringFunctions() # no destructor (removed a non-coverable line) + s = h.Section() + assert not sf.is_artificial(h.List()) + assert sf.is_point_process(h.IClamp(s(0.5))) + assert sf.is_point_process(h.NetStim()) + + +def test_is_artificial(): + s = h.Section() + assert not sf.is_artificial(h.List()) + assert not sf.is_artificial(h.IClamp(s(0.5))) + assert sf.is_artificial(h.NetStim()) diff --git a/test/unit_tests/iovec.cpp b/test/unit_tests/iovec.cpp new file mode 100644 index 0000000000..946ad7c7fa --- /dev/null +++ b/test/unit_tests/iovec.cpp @@ -0,0 +1,45 @@ +#include +#include + +#include "oc_ansi.h" + +#include + +// This function is the one that is used in all nrn-modeldb-ci +// Keep as is +int cmpdfn(double a, double b) { + return ((a) <= (b)) ? (((a) == (b)) ? 0 : -1) : 1; +} + +TEST_CASE("Test nrn_mlh_gsort output", "[nrn_gsort]") { + std::vector input{1.2, -2.5, 5.1}; + + { + std::vector indices(input.size()); + // all values from 0 to size - 1 + std::iota(indices.begin(), indices.end(), 0); + + // for comparison + auto sorted_input = input; + std::sort(sorted_input.begin(), sorted_input.end()); + + SECTION("Test sorting") { + nrn_mlh_gsort(input.data(), indices.data(), input.size(), cmpdfn); + for (auto i = 0; i < input.size(); ++i) { + REQUIRE(sorted_input[i] == input[indices[i]]); + } + } + } + + { + std::vector indices{2, 1, 1}; + std::vector expected_result{1, 1, 2}; // as -2,5 < 5.1 + + SECTION("Test sorting with repeated indices") { + nrn_mlh_gsort(input.data(), indices.data(), input.size(), cmpdfn); + for (auto i = 0; i < input.size(); ++i) { + REQUIRE(indices[i] == expected_result[i]); + } + } + } +} diff --git a/test/unit_tests/matrix.cpp b/test/unit_tests/matrix.cpp new file mode 100644 index 0000000000..52f2b88abd --- /dev/null +++ b/test/unit_tests/matrix.cpp @@ -0,0 +1,414 @@ +#include +#include +#include +#include "ivocvect.h" + +#include +using namespace Catch::literals; + +template +class ApproxOrOpposite: public Catch::MatcherBase> { + std::vector vec; + + public: + ApproxOrOpposite(std::vector vec) + : vec(vec) {} + + bool match(std::vector const& in) const override { + if (in.size() != vec.size()) { + return false; + } + bool matched = true; + for (int i = 0; i < in.size(); ++i) { + if (in[i] != approx(vec[i])) { + matched = false; + break; + } + } + if (matched) { + return true; + } + matched = true; + for (int i = 0; i < in.size(); ++i) { + if (in[i] != approx(-1 * vec[i])) { + matched = false; + break; + } + } + + return matched; + } + + std::string describe() const override { + std::ostringstream ss; + ss << "is not approx or opposite approx of " << Catch::Detail::stringify(vec); + return ss.str(); + } + template ::value>::type> + ApproxOrOpposite& epsilon(T const& newEpsilon) { + approx.epsilon(newEpsilon); + return *this; + } + template ::value>::type> + ApproxOrOpposite& margin(T const& newMargin) { + approx.margin(newMargin); + return *this; + } + template ::value>::type> + ApproxOrOpposite& scale(T const& newScale) { + approx.scale(newScale); + return *this; + } + + mutable Catch::Detail::Approx approx = Catch::Detail::Approx::custom(); +}; + +bool compareMatrix(OcMatrix& m, const std::vector>& ref) { + REQUIRE(m.nrow() == ref.size()); + for (int i = 0; i < m.nrow(); ++i) { + REQUIRE(m.ncol() == ref[i].size()); + for (int j = 0; j < m.ncol(); ++j) { + REQUIRE(m.getval(i, j) == Catch::Detail::Approx(ref[i][j]).margin(1e-10)); + } + } + return true; +} + +SCENARIO("A Matrix", "[neuron_ivoc][OcMatrix]") { + GIVEN("A 3x3 Full matrix") { + OcFullMatrix m{3, 3}; + REQUIRE(m.nrow() == 3); + REQUIRE(m.ncol() == 3); + { + m.ident(); + REQUIRE(compareMatrix(m, {{1., 0., 0.}, {0., 1., 0}, {0., 0., 1.}})); + } + { + double* value = m.mep(0, 0); + REQUIRE(*value == 1); + *value = 3; + REQUIRE(m.getval(0, 0) == 3); + } + m.resize(4, 3); + { + m.setrow(3, 2.0); + REQUIRE(compareMatrix(m, {{3., 0., 0.}, {0., 1., 0.}, {0., 0., 1.}, {2., 2., 2.}})); + } + { + std::vector x, y; + m.nonzeros(x, y); + std::vector res_x = {0, 1, 2, 3, 3, 3}; + std::vector res_y = {0, 1, 2, 0, 1, 2}; + REQUIRE(x == res_x); + REQUIRE(y == res_y); + } + { + m.setcol(1, 4.0); + REQUIRE(compareMatrix(m, {{3., 4., 0.}, {0., 4., 0.}, {0., 4., 1.}, {2., 4., 2.}})); + } + { + m.setdiag(0, 5.0); + REQUIRE(compareMatrix(m, {{5., 4., 0.}, {0., 5., 0.}, {0., 4., 5.}, {2., 4., 2.}})); + } + { + m.setdiag(1, 6.0); + REQUIRE(compareMatrix(m, {{5., 6., 0.}, {0., 5., 6.}, {0., 4., 5.}, {2., 4., 2.}})); + } + { + m.setdiag(-1, 7.0); + REQUIRE(compareMatrix(m, {{5., 6., 0.}, {7., 5., 6.}, {0., 7., 5.}, {2., 4., 7.}})); + } + + { + OcFullMatrix n(4, 3); + n.ident(); + m.add(&n, &m); + REQUIRE(compareMatrix(m, {{6., 6., 0.}, {7., 6., 6.}, {0., 7., 6.}, {2., 4., 7.}})); + } + { + OcFullMatrix n(4, 3); + m.bcopy(&n, 1, 1, 3, 2, 0, 0); + REQUIRE(compareMatrix(n, {{6., 6., 0.}, {7., 6., 0.}, {4., 7., 0.}, {0., 0., 0.}})); + } + { + OcFullMatrix n(4, 3); + m.transpose(&n); + REQUIRE(compareMatrix(n, {{6., 7., 0., 2.}, {6., 6., 7., 4.}, {0., 6., 6., 7.}})); + } + { + IvocVect v(3); + m.getrow(2, &v); + REQUIRE_THAT(v.vec(), Catch::Matchers::Approx(std::vector({0., 7., 6.}))); + m.setrow(0, &v); + REQUIRE(compareMatrix(m, {{0., 7., 6.}, {7., 6., 6.}, {0., 7., 6.}, {2., 4., 7.}})); + } + { + IvocVect v(4); + m.getcol(2, &v); + REQUIRE_THAT(v.vec(), Catch::Matchers::Approx(std::vector({6., 6., 6., 7.}))); + m.setcol(1, &v); + REQUIRE(compareMatrix(m, {{0., 6., 6.}, {7., 6., 6.}, {0., 6., 6.}, {2., 7., 7.}})); + } + { + m.resize(3, 3); + REQUIRE(compareMatrix(m, {{0., 6., 6.}, {7., 6., 6.}, {0., 6., 6.}})); + } + { + OcFullMatrix n(4, 3); + m.exp(&n); + REQUIRE(n(0, 0) == Catch::Detail::Approx(442925.)); + REQUIRE(compareMatrix(n, + {{442925., 938481., 938481.}, + {651970., 1381407., 1381407.}, + {442926., 938481., 938482.}})); + } + { + m.pow(2, &m); + REQUIRE(compareMatrix(m, {{42., 72., 72.}, {42., 114., 114.}, {42., 72., 72.}})); + } + { + int e{}; + double det = m.det(&e); + REQUIRE(det == 0.); + REQUIRE(e == 0); + } + *m.mep(2, 0) = 1; + *m.mep(2, 2) = 2; + { + int e{}; + double det = m.det(&e); + REQUIRE(det == -1.2348_a); + REQUIRE(e == 5); + } + { + OcFullMatrix n(4, 3); + m.inverse(&n); + n.resize(3, 3); // ??? + REQUIRE(compareMatrix(n, + {{0.064625850, -0.040816326, 0.}, + {-0.00024295432, -0.0000971817, 0.01428571}, + {-0.023566569, 0.0239067055, -0.014285714}})); + n.zero(); + REQUIRE(compareMatrix(n, {{0., 0., 0.}, {0., 0., 0.}, {0., 0., 0.}})); + } + { + IvocVect v(3); + m.getdiag(1, &v); + REQUIRE_THAT(v.vec(), Catch::Matchers::Approx(std::vector({72., 114., 0.}))); + v.vec() = {0., 72., 114.}; + m.setdiag(-1, &v); + REQUIRE(compareMatrix(m, {{42., 72., 72.}, {72., 114., 114.}, {1., 114., 2.}})); + } + { + IvocVect v(3); + m.getdiag(-2, &v); + REQUIRE(v.vec()[2] == Catch::Detail::Approx(1.0)); + v.vec() = {1., 0., 0.}; + m.setdiag(2, &v); + REQUIRE(compareMatrix(m, {{42., 72., 1.}, {72., 114., 114.}, {1., 114., 2.}})); + } + { + IvocVect v(3); + v.vec() = {1, 1, 1}; + IvocVect vout(3); + m.mulv(&v, &vout); + REQUIRE_THAT(vout.vec(), + Catch::Matchers::Approx(std::vector({115., 300., 117.}))); + } + { + OcFullMatrix n(3, 3); + m.copy(&n); + REQUIRE(compareMatrix(n, {{42., 72., 1.}, {72., 114., 114.}, {1., 114., 2.}})); + OcFullMatrix o(3, 3); + m.mulm(&n, &o); + REQUIRE(compareMatrix( + o, {{6949., 11346., 8252.}, {11346., 31176., 13296.}, {8252., 13296., 13001.}})); + } + { + OcFullMatrix n(3, 3); + m.muls(2, &n); + REQUIRE(compareMatrix(n, {{84., 144., 2.}, {144., 228., 228.}, {2., 228., 4.}})); + } + { + IvocVect v(3); + v.vec() = {1, 1, 1}; + IvocVect vout(3); + m.solv(&v, &vout, false); + REQUIRE_THAT(vout.vec(), + Catch::Matchers::Approx( + std::vector({0.0088700, 0.0087927, -0.00562299}))); + m.solv(&v, &vout, true); + REQUIRE_THAT(vout.vec(), + Catch::Matchers::Approx( + std::vector({0.0088700, 0.0087927, -0.00562299}))); + } + { + IvocVect v(3); + OcFullMatrix n(3, 3); + v.vec() = {1, 2, 3}; + m.setrow(0, &v); + v.vec() = {2, 1, 4}; + m.setrow(1, &v); + v.vec() = {3, 4, 1}; + m.setrow(2, &v); + m.symmeigen(&n, &v); + REQUIRE_THAT(v.vec(), + Catch::Matchers::Approx( + std::vector({7.074673, -0.88679, -3.18788}))); + n.getcol(0, &v); + REQUIRE_THAT(v.vec(), ApproxOrOpposite({0.50578, 0.5843738, 0.634577})); + n.getcol(1, &v); + REQUIRE_THAT(v.vec(), ApproxOrOpposite({-0.8240377, 0.544925, 0.154978})); + n.getcol(2, &v); + REQUIRE_THAT(v.vec(), ApproxOrOpposite({-0.255231, -0.601301, 0.7571611})); + } + { + m.resize(2, 2); + OcFullMatrix u(2, 2); + OcFullMatrix v(2, 2); + IvocVect d(2); + m.svd1(&u, &v, &d); + REQUIRE_THAT(d.vec(), Catch::Matchers::Approx(std::vector({3., 1.}))); + // For comparison of u and v and problems with signs, see: + // https://www.educative.io/blog/sign-ambiguity-in-singular-value-decomposition + IvocVect c(4); + c.vec() = {u(0, 0), u(0, 1), v(0, 0), v(0, 1)}; + CHECK_THAT(c.vec(), ApproxOrOpposite({0.70710, 0.70710, 0.70710, 0.70710})); + c.vec() = {u(1, 0), u(1, 1), v(1, 0), v(1, 1)}; + CHECK_THAT(c.vec(), ApproxOrOpposite({0.70710, -0.70710, -0.70710, 0.70710})); + } + { + m.resize(2, 3); + { + IvocVect s(3); + s.vec() = {3., 2., 2.}; + m.setrow(0, &s); + s.vec() = {2., 3., -2.}; + m.setrow(1, &s); + } + OcFullMatrix u(2, 2); + OcFullMatrix v(3, 3); + IvocVect d(2); + m.svd1(&u, &v, &d); + REQUIRE_THAT(d.vec(), Catch::Matchers::Approx(std::vector({5., 3.}))); + // For comparison of u and v and problems with signs, see: + // https://www.educative.io/blog/sign-ambiguity-in-singular-value-decomposition + IvocVect c(5); + c.vec() = {u(0, 0), u(0, 1), v(0, 0), v(0, 1), v(0, 2)}; + CHECK_THAT(c.vec(), + ApproxOrOpposite({0.70710, 0.70710, 0.70710, 0.70710, 0.}).margin(1e-10)); + c.vec() = {u(1, 0), u(1, 1), v(1, 0), v(1, 1), v(1, 2)}; + CHECK_THAT(c.vec(), + ApproxOrOpposite({0.70710, -0.70710, 0.235702, -0.235702, 0.942809})); + c.vec() = {0., 0., v(2, 0), v(2, 1), v(2, 2)}; + CHECK_THAT(c.vec(), ApproxOrOpposite({0., 0., 0.66666, -0.66666, -0.3333333})); + } + { // Try with vectors too short + IvocVect s(2); + s.vec() = {1., 2.}; + m.setrow(0, &s); + REQUIRE(compareMatrix(m, {{1., 2., 2.}, {2., 3., -2.}})); + m.setcol(0, &s); + REQUIRE(compareMatrix(m, {{1., 2., 2.}, {2., 3., -2.}})); + IvocVect d(1); + d.vec() = {1.}; + m.setdiag(0, &d); + REQUIRE(compareMatrix(m, {{1., 2., 2.}, {2., 3., -2.}})); + } + } + GIVEN("A 3x3 Sparse matrix") { + OcSparseMatrix m{3, 3}; + REQUIRE(m.nrow() == 3); + REQUIRE(m.ncol() == 3); + { + m.ident(); + REQUIRE(compareMatrix(m, {{1., 0., 0.}, {0., 1., 0}, {0., 0., 1.}})); + REQUIRE(m.sprowlen(1) == 1); + } + { + std::vector x, y, result = {0, 1, 2}; + m.nonzeros(x, y); + REQUIRE(x == result); + REQUIRE(y == result); + } + { + double* pmep = m.mep(1, 1); + REQUIRE(*pmep == 1); + pmep = m.mep(1, 0); + REQUIRE(*pmep == 0); + } + { + int col{}; + double value = m.spgetrowval(2, 0, &col); + REQUIRE(col == 2); + REQUIRE(value == Catch::Detail::Approx(1.0)); + } + { // m.zero() don't erase the matrix but only replace existing values by 0. + m.zero(); + REQUIRE(m.sprowlen(2) == 1); + REQUIRE(compareMatrix(m, {{0., 0., 0.}, {0., 0., 0}, {0., 0., 0.}})); + } + { + m.setrow(1, 2); + REQUIRE(compareMatrix(m, {{0., 0., 0.}, {2., 2., 2.}, {0., 0., 0.}})); + } + { + m.setcol(0, 3); + REQUIRE(compareMatrix(m, {{3., 0., 0.}, {3., 2., 2.}, {3., 0., 0.}})); + } + { + m.setdiag(0, 1); + REQUIRE(compareMatrix(m, {{1., 0., 0.}, {3., 1., 2.}, {3., 0., 1.}})); + } + { + m.setdiag(-1, 4); + REQUIRE(compareMatrix(m, {{1., 0., 0.}, {4., 1., 2.}, {3., 4., 1.}})); + } + { + m.setdiag(2, 5); + REQUIRE(compareMatrix(m, {{1., 0., 5.}, {4., 1., 2.}, {3., 4., 1.}})); + } + REQUIRE(m.sprowlen(1) == 3); + + { + IvocVect v(3); + v.vec() = {1, 2, 3}; + m.setrow(0, &v); + REQUIRE(compareMatrix(m, {{1., 2., 3.}, {4., 1., 2.}, {3., 4., 1.}})); + } + { + IvocVect v(3); + v.vec() = {1, 2, 3}; + m.setcol(0, &v); + REQUIRE(compareMatrix(m, {{1., 2., 3.}, {2., 1., 2.}, {3., 4., 1.}})); + } + { + IvocVect v(3); + v.vec() = {1, 2, 3}; + m.setdiag(0, &v); + REQUIRE(compareMatrix(m, {{1., 2., 3.}, {2., 2., 2.}, {3., 4., 3.}})); + } + { + IvocVect v(3); + v.vec() = {0., 1., 2.}; + m.setdiag(-1, &v); + REQUIRE(compareMatrix(m, {{1., 2., 3.}, {1., 2., 2.}, {3., 2., 3.}})); + } + { + IvocVect v(3); + v.vec() = {1, 2, 3}; + IvocVect out(3); + m.mulv(&v, &out); + REQUIRE_THAT(out.vec(), Catch::Matchers::Approx(std::vector({14., 11., 16.}))); + } + { + IvocVect v(3); + v.vec() = {1, 1, 1}; + IvocVect vout(3); + m.solv(&v, &vout, false); + REQUIRE_THAT(vout.vec(), Catch::Matchers::Approx(std::vector({0., 0.5, 0.}))); + m.solv(&v, &vout, true); + REQUIRE_THAT(vout.vec(), Catch::Matchers::Approx(std::vector({0., 0.5, 0.}))); + } + } +} diff --git a/test/unit_tests/utils/enumerate.cpp b/test/unit_tests/utils/enumerate.cpp new file mode 100644 index 0000000000..2ef2dd00bf --- /dev/null +++ b/test/unit_tests/utils/enumerate.cpp @@ -0,0 +1,85 @@ +#include + +#include "utils/enumerate.h" + +#include + + +TEST_CASE("apply_to_first", "[Neuron]") { + std::vector x{1.0, 2.0, 2.0, 3.0}; + + apply_to_first(x, 2.0, [](auto it) { *it = 5.0; }); + REQUIRE(x == std::vector({1.0, 5.0, 2.0, 3.0})); +} + +TEST_CASE("erase_first", "[Neuron]") { + std::vector x{1.0, 2.0, 2.0, 3.0}; + + erase_first(x, 2.0); + REQUIRE(x == std::vector({1.0, 2.0, 3.0})); +} + +TEST_CASE("reverse", "[Neuron]") { + std::vector x{1.0, 2.0, 3.0}; + + for (auto& i: reverse(x)) { + i *= -1.0; + } + REQUIRE(x == std::vector({-1.0, -2.0, -3.0})); +} + +TEST_CASE("reverse; no-copy", "[Neuron]") { + std::vector x{1.0, 2.0, 3.0}; + + auto reverse_iterable = reverse(x); + + for (auto& xx: x) { + xx *= -1.0; + } + + for (const auto& xx: reverse_iterable) { + REQUIRE(xx < 0.0); + } +} + +TEST_CASE("range", "[Neuron]") { + std::vector x{1.0, 2.0, 3.0}; + + std::vector v{}; + for (std::size_t i: range(x)) { + v.push_back(i); + } + REQUIRE(v == std::vector{0, 1, 2}); +} + +TEST_CASE("enumerate", "[Neuron]") { + std::vector x{1.0, 2.0, 3.0}; + + int j = 0; + for (auto&& [i, elem]: enumerate(x)) { + if (i == 0) + REQUIRE(elem == 1.0); + if (i == 1) + REQUIRE(elem == 2.0); + if (i == 2) + REQUIRE(elem == 3.0); + REQUIRE(i == j); + ++j; + } +} + +TEST_CASE("renumerate", "[Neuron]") { + std::vector x{1.0, 2.0, 3.0}; + + int j = x.size() - 1; + for (auto&& [i, elem]: renumerate(x)) { + if (i == 0) + REQUIRE(elem == 1.0); + if (i == 1) + REQUIRE(elem == 2.0); + if (i == 2) + REQUIRE(elem == 3.0); + REQUIRE(i == j); + --j; + } +}