Commit b4b1db00 by tzh

8.1

parents

Too many changes to show.

To preserve performance only 1000 of 1000+ files are displayed.

jobs:
# Configure, build, install, and test job
- job: 'windows_build'
displayName: 'Windows VS2017'
pool:
vmImage: 'vs2017-win2016'
timeoutInMinutes: 360
variables:
llvm.version: '7.0.1'
mkl.version: '2019.1'
python.version: '3.6'
cmake.build.type: 'Release'
steps:
# Install Chocolatey (https://chocolatey.org/install#install-with-powershellexe)
- powershell: |
Set-ExecutionPolicy Bypass -Scope Process -Force
iex ((New-Object System.Net.WebClient).DownloadString('https://chocolatey.org/install.ps1'))
Write-Host "##vso[task.setvariable variable=PATH]$env:PATH"
choco --version
displayName: "Install Chocolatey"
# Install Miniconda
- script: |
choco install -y miniconda3
choco install -y doxygen.install
choco install -y graphviz
choco install -y 7zip.install
choco install -y wget
set PATH=C:\tools\miniconda3\Scripts;C:\tools\miniconda3;C:\tools\miniconda3\Library\bin;%PATH%
echo '##vso[task.setvariable variable=PATH]%PATH%'
set LIB=C:\tools\miniconda3\Library\lib;%LIB%
echo '##vso[task.setvariable variable=LIB]%LIB%'
conda --version
displayName: "Install Miniconda"
# Configure Miniconda
- script: |
conda config --set always_yes yes
conda info
displayName: "Configure Miniconda"
# Create conda enviroment
# Note: conda activate doesn't work here, because it creates a new shell!
- script: |
conda install cmake ^
cython ^
ninja ^
numpy ^
swig ^
pytest ^
pytest-xdist ^
python=$(python.version)
conda list
displayName: "Install conda packages"
# Download OpenCL Headers and build the ICD loader
- script: |
setlocal EnableDelayedExpansion
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
mkdir opencl
cd opencl
wget https://www.khronos.org/registry/cl/specs/opencl-icd-1.2.11.0.tgz -O opencl-icd-1.2.11.0.tgz
7z x opencl-icd-1.2.11.0.tgz > $null
7z x opencl-icd-1.2.11.0.tar > $null
robocopy .\icd . /E /MOVE
mkdir inc\CL > $null
wget https://github.com/KhronosGroup/OpenCL-Headers/archive/master.zip
7z x master.zip
move .\OpenCL-Headers-master\CL\*.h .\inc\CL\
mkdir lib > $null
cd lib
cmake -G Ninja .. ^
-DCMAKE_CXX_COMPILER=cl.exe ^
-DCMAKE_C_COMPILER=cl.exe
cmake --build . ^
-- -j %NUMBER_OF_PROCESSORS%
displayName: "Download and install OpenCL"
workingDirectory: $(Pipeline.Workspace)
# Configure
- script: |
setlocal EnableDelayedExpansion
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
mkdir build & cd build
cmake -G Ninja ^
-DOPENCL_INCLUDE_DIR=$(Pipeline.Workspace)/opencl/inc ^
-DOPENCL_LIBRARY=$(Pipeline.Workspace)/opencl/lib/OpenCL.lib ^
-DCMAKE_BUILD_TYPE=$(cmake.build.type) ^
-DCMAKE_CXX_COMPILER=cl.exe ^
-DCMAKE_C_COMPILER=cl.exe ^
-DCMAKE_INSTALL_PREFIX=../install ^
-DOPENMM_BUILD_EXAMPLES=OFF ^
-DOPENMM_BUILD_OPENCL_TESTS=OFF ^
$(Build.SourcesDirectory)
displayName: "Configure OpenMM with CMake"
workingDirectory: $(Build.BinariesDirectory)
# Build
- script: |
call "C:\Program Files (x86)\Microsoft Visual Studio\2017\Enterprise\VC\Auxiliary\Build\vcvarsall.bat" x64
set SWIG_LIB=C:\tools\miniconda3\Library\bin\Lib
cmake --build . ^
--config $(cmake.build.type) ^
-- -j %NUMBER_OF_PROCESSORS%
cmake --build . --target install
cmake --build . --target PythonInstall
displayName: "Build OpenMM"
workingDirectory: $(Build.BinariesDirectory)/build
# Test
- script: |
python $(Build.SourcesDirectory)\devtools\run-ctest.py --job-duration 50 --parallel %NUMBER_OF_PROCESSORS%
cd python\tests
python --version
set PYTHONPATH=D:\tools\miniconda3\Lib\site-packages
dir %PYTHONPATH%
py.test -v -n %NUMBER_OF_PROCESSORS%
workingDirectory: $(Build.BinariesDirectory)/build
displayName: "Run OpenMM tests"
__pycache__
build
build?
# macOS
.DS_Store
language: python
addons:
apt:
packages:
- doxygen
- python-numpy
- python-scipy
- libfftw3-dev
env:
global:
- CCACHE=$HOME/ccache/lib/ccache/bin
jobs:
include:
- sudo: required
dist: xenial
name: "CPU OpenCL"
env: OPENCL=true
CUDA=false
CC=$CCACHE/gcc
CXX=$CCACHE/g++
CMAKE_FLAGS="
-OPENMM_BUILD_OPENCL_LIB=ON
-DOPENMM_BUILD_OPENCL_TESTS=ON
-DOPENMM_BUILD_STATIC_LIB=OFF
-DOPENMM_BUILD_CPU_LIB=OFF
-DOPENMM_BUILD_REFERENCE_TESTS=OFF
-DOPENMM_BUILD_SERIALIZATION_TESTS=OFF
-DOPENMM_BUILD_PME_PLUGIN=OFF
-DOPENMM_BUILD_AMOEBA_PLUGIN=OFF
-DOPENMM_BUILD_PYTHON_WRAPPERS=OFF
-DOPENMM_BUILD_C_AND_FORTRAN_WRAPPERS=OFF
-DOPENMM_BUILD_EXAMPLES=OFF
-DOPENCL_INCLUDE_DIR=$HOME/AMDAPPSDK/include
-DOPENCL_LIBRARY=$HOME/AMDAPPSDK/lib/x86_64/libOpenCL.so"
addons: {apt: {packages: []}}
- sudo: required
dist: xenial
name: "CUDA Compile"
env: CUDA=true
OPENCL=false
CUDA_VERSION="7.5-18"
CC=$CCACHE/gcc
CXX=$CCACHE/g++
CMAKE_FLAGS="
-DOPENMM_BUILD_CUDA_TESTS=OFF
-DOPENMM_BUILD_OPENCL_TESTS=OFF
-DOPENMM_BUILD_PYTHON_WRAPPERS=OFF
-DOPENMM_BUILD_REFERENCE_TESTS=OFF
-DOPENMM_BUILD_SERIALIZATION_TESTS=OFF
-DOPENMM_BUILD_C_AND_FORTRAN_WRAPPERS=OFF
-DOPENMM_BUILD_EXAMPLES=OFF
-DOPENCL_LIBRARY=/usr/local/cuda-7.5/lib64/libOpenCL.so
-DCUDA_CUDART_LIBRARY=/usr/local/cuda-7.5/lib64/libcudart.so
-DCUDA_NVCC_EXECUTABLE=/usr/local/cuda-7.5/bin/nvcc
-DCUDA_SDK_ROOT_DIR=/usr/local/cuda-7.5/
-DCUDA_TOOLKIT_INCLUDE=/usr/local/cuda-7.5/include
-DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-7.5/"
addons: {apt: {packages: []}}
- language: objective-c
os: osx
osx_image: xcode9.3
name: "Mac OS"
env: OPENCL=false
CUDA=false
CMAKE_FLAGS="
-DOPENMM_BUILD_OPENCL_TESTS=OFF"
addons: {apt: {packages: []}}
- sudo: false
dist: xenial
python: "3.6"
name: "Static Lib"
env: OPENCL=false
CUDA=false
CC=$CCACHE/clang
CXX=$CCACHE/clang++
CMAKE_FLAGS="-DOPENMM_BUILD_STATIC_LIB=ON"
- sudo: false
dist: xenial
python: "3.6"
name: "Python 3.6"
env: OPENCL=false
CUDA=false
CC=$CCACHE/clang
CXX=$CCACHE/clang++
DOCS_DEPLOY=true
CMAKE_FLAGS="-DOPENMM_GENERATE_API_DOCS=ON"
- sudo: false
dist: xenial
python: "3.8"
name: "Python 3.8"
env: OPENCL=false
CUDA=false
CC=$CCACHE/gcc
CXX=$CCACHE/g++
CMAKE_FLAGS=""
- sudo: required
dist: bionic
python: "3.8"
name: "PPC"
arch: ppc64le
env: OPENCL=false
CUDA=false
CMAKE_FLAGS=""
- sudo: required
dist: bionic
python: "3.8"
name: "ARM"
arch: arm64
env: OPENCL=false
CUDA=false
CMAKE_FLAGS=""
before_install:
- START_TIME=$(date +%s)
- wget http://anaconda.org/omnia/ccache/3.2.4/download/${TRAVIS_OS_NAME}-64/ccache-3.2.4-0.tar.bz2
- mkdir -p $HOME/ccache && tar xf ccache-3.2.4-0.tar.bz2 -C $HOME/ccache
- if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
brew cask uninstall oclint;
brew install fftw;
brew upgrade python;
brew install -y https://raw.githubusercontent.com/Homebrew/homebrew-core/5b680fb58fedfb00cd07a7f69f5a621bb9240f3b/Formula/doxygen.rb;
pip3 install virtualenv;
virtualenv -p python3 ~/venv;
source ~/venv/bin/activate;
sudo pip install -U pytest numpy --ignore-installed six;
fi
# The cmake version installed by apt on ARM and PPC is very old,
# so download a newer version.
- if [[ "${TRAVIS_CPU_ARCH}" == "ppc64le" ]]; then
sudo apt-get install libuv1 rhash libstdc++6;
wget https://anaconda.org/conda-forge/cmake/3.17.0/download/linux-ppc64le/cmake-3.17.0-hfb1cb51_0.tar.bz2;
mkdir $HOME/cmake;
tar -xjvf cmake-3.17.0-hfb1cb51_0.tar.bz2 -C $HOME/cmake;
export PATH=$HOME/cmake/bin:$PATH;
fi
- if [[ "${TRAVIS_CPU_ARCH}" == "arm64" ]]; then
sudo apt-get install libuv1 rhash libstdc++6;
wget https://anaconda.org/conda-forge/cmake/3.17.0/download/linux-aarch64/cmake-3.17.0-h28c56e5_0.tar.bz2;
mkdir $HOME/cmake;
tar -xjvf cmake-3.17.0-h28c56e5_0.tar.bz2 -C $HOME/cmake;
export PATH=$HOME/cmake/bin:$PATH;
fi
- if [[ "$OPENCL" == "true" ]]; then
wget http://s3.amazonaws.com/omnia-ci/AMD-APP-SDKInstaller-v3.0.130.135-GA-linux64.tar.bz2;
tar -xjf AMD-APP-SDK*.tar.bz2;
AMDAPPSDK=${HOME}/AMDAPPSDK;
export OPENCL_VENDOR_PATH=${AMDAPPSDK}/etc/OpenCL/vendors;
mkdir -p ${OPENCL_VENDOR_PATH};
sh AMD-APP-SDK*.sh --tar -xf -C ${AMDAPPSDK};
echo libamdocl64.so > ${OPENCL_VENDOR_PATH}/amdocl64.icd;
export LD_LIBRARY_PATH=${AMDAPPSDK}/lib/x86_64:${LD_LIBRARY_PATH};
chmod +x ${AMDAPPSDK}/bin/x86_64/clinfo;
${AMDAPPSDK}/bin/x86_64/clinfo;
sudo apt-get install -y libgl1-mesa-dev;
fi
# Install packages needed for Python: SWIG, Cython, and Gromacs (used by some tests).
# We do this differently on different platforms. Possibly some of this could be unified.
- if [[ "$OPENCL" == "false" && "$CUDA" == "false" && "$TRAVIS_OS_NAME" == "linux" && "${TRAVIS_CPU_ARCH}" != "ppc64le" && "${TRAVIS_CPU_ARCH}" != "arm64" ]]; then
wget http://anaconda.org/omnia/swig/3.0.7/download/linux-64/swig-3.0.7-0.tar.bz2;
mkdir $HOME/swig;
tar -xjvf swig-3.0.7-0.tar.bz2 -C $HOME/swig;
export PATH=$HOME/swig/bin:$PATH;
export SWIG_LIB=$HOME/swig/share/swig/3.0.7;
pip install cython;
sudo apt-get install gromacs;
fi
- if [[ "${TRAVIS_CPU_ARCH}" == "ppc64le" || "${TRAVIS_CPU_ARCH}" == "arm64" ]]; then
sudo apt-get install swig;
pip install cython;
fi
- if [[ "$OPENCL" == "false" && "$CUDA" == "false" && "$TRAVIS_OS_NAME" == "osx" ]]; then
wget http://anaconda.org/omnia/swig/3.0.7/download/osx-64/swig-3.0.7-0.tar.bz2;
mkdir $HOME/swig;
tar -xjvf swig-3.0.7-0.tar.bz2 -C $HOME/swig;
export PATH=$HOME/swig/bin:$PATH;
export SWIG_LIB=$HOME/swig/share/swig/3.0.7;
sudo pip install cython;
fi
- if [[ "$CUDA" == "true" ]]; then
wget "http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1404/x86_64/cuda-repo-ubuntu1404_${CUDA_VERSION}_amd64.deb";
sudo dpkg -i cuda-repo-ubuntu1404_${CUDA_VERSION}_amd64.deb;
sudo apt-get update -qq;
export CUDA_APT=${CUDA_VERSION%-*};
export CUDA_APT=${CUDA_APT/./-};
sudo apt-get install -y cuda-drivers cuda-core-${CUDA_APT} cuda-cudart-dev-${CUDA_APT} cuda-cufft-dev-${CUDA_APT};
sudo apt-get clean;
export CUDA_HOME=/usr/local/cuda-${CUDA_VERSION%%-*};
export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH};
export PATH=${CUDA_HOME}/bin:${PATH};
sudo apt-get install -y libgl1-mesa-dev;
fi
script:
- cmake . $CMAKE_FLAGS -DCMAKE_INSTALL_PREFIX=$HOME/OpenMM
- make -j2 install
- if [[ "$OPENCL" == "true" ]]; then ./TestOpenCLDeviceQuery; fi
- if [[ "$OPENCL" == "false" && "$CUDA" == "false" ]]; then
if [[ "$TRAVIS_OS_NAME" == "osx" ]]; then
sudo make PythonInstall;
else
make PythonInstall;
fi;
python -m openmm.testInstallation;
(cd python/tests && py.test -v);
fi
# Run the tests, and rerun any failing tests.
- python devtools/run-ctest.py --start-time $START_TIME
- if [[ ! -z "${DOCS_DEPLOY}" && "${DOCS_DEPLOY}" = "true" ]]; then
pip install sphinx==2.3.1 sphinxcontrib-bibtex sphinxcontrib-lunrsearch sphinxcontrib-autodoc_doxygen;
make sphinxhtml;
make sphinxpdf;
make C++ApiDocs PythonApiDocs;
mkdir -p api-docs;
mv sphinx-docs/userguide/html api-docs/userguide;
mv sphinx-docs/developerguide/html api-docs/developerguide;
mv api-python api-docs;
mv api-c++ api-docs;
fi
deploy:
- provider: s3
access_key_id:
secure: "OEY0sp5FlM4kixFNVAktN6YHwKm5ieMswWCHj3MU+rWsAeGCULl/0kyKTfwCPknVlQv+SXBaPP3I4m1fv9FwHt0bbwy5EfmO4crrW8cE4ofq4vnwHi9UG77oEKKRrbxFUZD1y7ywI2W9SyVI6qfggZlJowRy9GV9Lin5vGzhqsw="
secret_access_key:
secure: "P7DOYn77bH5Gg1obIwCxanhH0Kgh22Pv1pCGvmI6gHXOE1dxf5pnCSQGFKO6g1K6eaN5TbTjh+BmMXmxgkqByvQ4uZtkTGlPq3HI9YeRjZE2H7bRpIYjXXRwA1RMOA3ofLDw1FXNmwMo8BtRIl4jljR5Iw5rytUZmLlk3zgtcr4="
bucket: "docs.openmm.org"
skip_cleanup: true
region: us-west-1
local_dir: api-docs/
upload_dir: development
on:
branch: master
condition: '! -z "${DOCS_DEPLOY}" && "${DOCS_DEPLOY}" = "true"'
cache:
directories:
- $HOME/.ccache
This diff is collapsed. Click to expand it.
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age,
body size, disability, ethnicity, gender identity and expression, level of
experience, nationality, personal appearance, race, religion, or sexual
identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
Moreover, project maintainers will strive to offer feedback and advice to
ensure quality and consistency of contributions to the code. Contributions
from outside the group of project maintainers are strongly welcomed but the
final decision as to whether commits are merged into the codebase rests with
the team of project maintainers.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an
appointed representative at an online or offline event. Representation of a
project may be further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at 'peastman@stanford.edu'. The project team will
review and investigate all complaints, and will respond in a way that it deems
appropriate to the circumstances. The project team is obligated to maintain
confidentiality with regard to the reporter of an incident. Further details of
specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage],
version 1.4, available at
[http://contributor-covenant.org/version/1/4][version]
[homepage]: http://contributor-covenant.org
[version]: http://contributor-covenant.org/version/1/4/
## How to Contribute to OpenMM Development
We welcome anyone who wants to contribute to the project, whether by adding a feature,
fixing a bug, or improving documentation. The process is quite simple.
First, it is always best to begin by opening an issue on Github that describes the change you
want to make. This gives everyone a chance to discuss it before you put in a lot of work.
For bug fixes, we will confirm that the behavior is actually a bug and that the proposed fix
is correct. For new features, we will decide whether the proposed feature is something we
want and discuss possible designs for it.
Once everyone is in agreement, the next step is to
[create a pull request](https://help.github.com/en/articles/about-pull-requests) with the code changes.
For larger features, feel free to create the pull request even before the implementaton is
finished so as to get early feedback on the code. When doing this, put the letters "WIP" at
the start of the title of the pull request to indicate it is still a work in progress.
For new features, consult the [New Feature Checklist](https://github.com/openmm/openmm/wiki/Checklist-for-Adding-a-New-Feature),
which lists various items that need to be included before the feature can be merged (documentation,
tests, serialization, support for all APIs, etc.). Not every item is necessarily applicable to
every new feature, but usually at least some of them are.
The core developers will review the pull request and may suggest changes. Simply push the
changes to the branch that is being pulled from, and they will automatically be added to the
pull request. In addition, the full test suite is automatically run on every pull request,
and rerun every time a change is added. Once the tests are passing and everyone is satisfied
with the code, the pull request will be merged. Congratulations on a successful contribution!
## This file should be placed in the root directory of your project.
## Then modify the CMakeLists.txt file in the root directory of your
## project to incorporate the testing dashboard.
## # The following are required to uses Dart and the Cdash dashboard
## ENABLE_TESTING()
## INCLUDE(CTest)
set(CTEST_PROJECT_NAME "OpenMM")
set(CTEST_NIGHTLY_START_TIME "00:00:00 EST")
set(CTEST_DROP_METHOD "http")
set(CTEST_DROP_SITE "simdash.stanford.edu")
set(CTEST_DROP_LOCATION "/submit.php?project=OpenMM")
set(CTEST_DROP_SITE_CDASH TRUE)
[![GH Actions Status](https://github.com/openmm/openmm/workflows/CI/badge.svg)](https://github.com/openmm/openmm/actions?query=branch%3Amaster+workflow%3ACI)
[![Conda](https://img.shields.io/conda/v/conda-forge/openmm.svg)](https://anaconda.org/conda-forge/openmm)
[![Anaconda Cloud Badge](https://anaconda.org/conda-forge/openmm/badges/downloads.svg)](https://anaconda.org/conda-forge/openmm)
## OpenMM: A High Performance Molecular Dynamics Library
Introduction
------------
[OpenMM](http://openmm.org) is a toolkit for molecular simulation. It can be used either as a stand-alone application for running simulations, or as a library you call from your own code. It
provides a combination of extreme flexibility (through custom forces and integrators), openness, and high performance (especially on recent GPUs) that make it truly unique among simulation codes.
Getting Help
------------
Need Help? Check out the [documentation](http://docs.openmm.org/) and [discussion forums](https://simtk.org/forums/viewforum.php?f=161).
## How to Get Support for OpenMM
There are two main venues for getting support for OpenMM: the [discussion forum](https://simtk.org/forums/viewforum.php?f=161)
and the [Github repository](https://github.com/openmm/openmm). There is some overlap
between the two, but generally speaking the forum is for user oriented issues while the
repository is for developer oriented issues. If you have a question about how to use OpenMM
(including writing programs that access it through its public API), post on the forum. If
you want to suggest a change to the code, or if you think you have found a bug,
open an issue on Github. The core developers monitor both, so don't worry if you aren't
sure which one is most appropriate for your question. We will see it either way.
You also may want to consult the [documentation](http://docs.openmm.org/). It is quite
thorough, and you may be able to find the answer to your question.
\ No newline at end of file
os: Visual Studio 2019
platform: x64
configuration: Release
shallow_clone: true
install:
# Setup shell for VS2015, x64
- call "C:\Program Files (x86)\Microsoft Visual Studio\2019\Community\VC\Auxiliary\Build\vcvarsall.bat" amd64
# Set path to python, git-bash tools.
- "set PATH=C:\\Python35-x64;C:\\Python35-x64\\Scripts;%PATH%"
- "set PATH=C:\\Program Files (x86)\\Git\\bin;%PATH%"
- pip install pytest
- pip install numpy
- pip install cython
# Use cclash for compiler caching (experimental)
- ps: wget https://github.com/inorton/cclash/releases/download/0.3.14/cclash-0.3.14.zip -OutFile cclash-0.3.14.zip
- ps: 7z x cclash-0.3.14.zip
- "set PATH=%APPVEYOR_BUILD_FOLDER%\\cclash-0.3.14;%PATH%"
- "set CCLASH_DIR=C:\\ProgramData\\cclash"
# Download and install some OpenMM build dependencies (doxygen, swig)
- choco install -y doxygen.install swig > null
# Download OpenCL Headers and build the ICD loader
- ps: "[System.Net.ServicePointManager]::SecurityProtocol = [System.Net.SecurityProtocolType]::Tls12;"
- ps: $opencl_registry = "https://www.khronos.org/registry/cl"
- ps: $opencl_github = "KhronosGroup/OpenCL-Headers"
- ps: mkdir C:/opencl > $null
- ps: cd C:/opencl
- ps: wget $opencl_registry/specs/opencl-icd-1.2.11.0.tgz -OutFile opencl-icd-1.2.11.0.tgz
- ps: 7z x opencl-icd-1.2.11.0.tgz > $null
- ps: 7z x opencl-icd-1.2.11.0.tar > $null
- ps: mv .\icd\* .
- ps: mkdir inc/CL > $null
- ps: wget https://github.com/$opencl_github/tree/master/CL -UseBasicParsing | select -ExpandProperty links | where {$_.href -like "*.h*"} | select -ExpandProperty title | foreach{ wget https://raw.githubusercontent.com/$opencl_github/master/CL/$_ -OutFile inc/CL/$_ -UseBasicParsing}
- ps: mkdir lib > $null
- ps: cd lib
- cmake -G "NMake Makefiles" ..
- nmake
- cd %APPVEYOR_BUILD_FOLDER%
build_script:
- ps: $env:CMAKE_FLAGS =
"-DOPENMM_BUILD_PME_PLUGIN=ON
-DOPENCL_INCLUDE_DIR=C:/opencl/inc
-DOPENCL_LIBRARY=C:/opencl/lib/OpenCL.lib
-DOPENMM_BUILD_EXAMPLES=OFF
-DOPENMM_BUILD_OPENCL_TESTS=OFF
-DCMAKE_BUILD_TYPE=Release
-LA"
- mkdir build
- cd build
- cmake -G "NMake Makefiles" %CMAKE_FLAGS% -DCMAKE_CXX_FLAGS_RELEASE="/MD /Od /Ob0 /D NDEBUG" ..
- cmake --build . --target install
- cmake --build . --target PythonInstall
test_script:
- python %APPVEYOR_BUILD_FOLDER%\devtools\run-ctest.py
- cd python\tests
- py.test -v
cache:
- C:\ProgramData\cclash -> appveyor.yml
FILE(GLOB KERNEL_FILES ${KERNEL_SOURCE_DIR}/kernels/*.${KERNEL_FILE_EXTENSION})
SET(KERNEL_FILE_DECLARATIONS)
CONFIGURE_FILE(${KERNEL_SOURCE_DIR}/${KERNEL_SOURCE_CLASS}.cpp.in ${KERNELS_CPP})
# Determine file extension length
STRING(LENGTH ${KERNEL_FILE_EXTENSION} extension_length)
# add one space for the dot
MATH(EXPR extension_length ${extension_length}+1)
FOREACH(file ${KERNEL_FILES})
# Load the file contents and process it.
FILE(STRINGS ${file} file_content NEWLINE_CONSUME)
# Replace all backslashes by double backslashes as they are being put in a C string.
# Be careful not to replace the backslash before a semicolon as that is the CMAKE
# internal escaping of a semicolon to prevent it from acting as a list seperator.
STRING(REGEX REPLACE "\\\\([^;])" "\\\\\\\\\\1" file_content "${file_content}")
# Escape double quotes as being put in a C string.
STRING(REPLACE "\"" "\\\"" file_content "${file_content}")
# Split in separate C strings for each line.
STRING(REPLACE "\n" "\\n\"\n\"" file_content "${file_content}")
# Determine a name for the variable that will contain this file's contents
FILE(RELATIVE_PATH filename ${KERNEL_SOURCE_DIR}/kernels ${file})
STRING(LENGTH ${filename} filename_length)
MATH(EXPR filename_length ${filename_length}-${extension_length})
STRING(SUBSTRING ${filename} 0 ${filename_length} variable_name)
# Record the variable declaration and definition.
SET(KERNEL_FILE_DECLARATIONS ${KERNEL_FILE_DECLARATIONS}static\ const\ std::string\ ${variable_name};\n)
FILE(APPEND ${KERNELS_CPP} const\ string\ ${KERNEL_SOURCE_CLASS}::${variable_name}\ =\ \"${file_content}\"\;\n)
ENDFOREACH(file)
CONFIGURE_FILE(${KERNEL_SOURCE_DIR}/${KERNEL_SOURCE_CLASS}.h.in ${KERNELS_H})
### OPENCL_INCLUDE_DIR ###
# Try OPENCL_DIR variable before looking elsewhere
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS $ENV{OPENCL_DIR}
PATH_SUFFIXES "include"
NO_DEFAULT_PATH
)
# Next look in environment variables set by OpenCL SDK installations
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS
$ENV{CUDA_PATH}
$ENV{AMDAPPSDKROOT}
PATH_SUFFIXES "include"
NO_DEFAULT_PATH
)
# On Macs, look inside the platform SDK
if(DEFINED CMAKE_OSX_SYSROOT)
find_path(OPENCL_INCLUDE_DIR
NAMES opencl.h opencl.h
PATHS
"${CMAKE_OSX_SYSROOT}/System/Library/Frameworks/OpenCL.framework/Headers"
NO_DEFAULT_PATH
)
endif(DEFINED CMAKE_OSX_SYSROOT)
# As a last resort, look in default system areas followed by other possible locations
find_path(OPENCL_INCLUDE_DIR
NAMES OpenCL/opencl.h CL/opencl.h
PATHS
"C:/CUDA"
"/usr/local/cuda"
"/usr/local/streamsdk"
"/usr"
"${CUDA_TOOLKIT_ROOT_DIR}"
PATH_SUFFIXES "include"
)
### OPENCL_LIBRARY ###
if("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
if("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
set(path_suffixes "lib/x86_64")
else("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
set(path_suffixes "lib/x86")
endif("${CMAKE_SYSTEM_PROCESSOR}" STREQUAL "x86_64")
elseif(MSVC)
if(CMAKE_CL_64)
set(path_suffixes "lib/x64" "lib/x86_64")
else(CMAKE_CL_64)
set(path_suffixes "lib/Win32" "lib/x86")
endif(CMAKE_CL_64)
else(MSVC)
set(path_suffixes "lib")
endif("${CMAKE_SYSTEM_NAME}" MATCHES "Linux")
# Try OPENCL_DIR variable before looking elsewhere
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
$ENV{OPENCL_DIR}
${OPENCL_LIB_SEARCH_PATH}
PATH_SUFFIXES ${path_suffixes}
NO_DEFAULT_PATH
)
# Next look in environment variables set by OpenCL SDK installations
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
$ENV{CUDA_PATH}
$ENV{AMDAPPSDKROOT}
PATH_SUFFIXES ${path_suffixes}
NO_DEFAULT_PATH
)
# As a last resort, look in default system areas followed by other possible locations
find_library(OPENCL_LIBRARY
NAMES OpenCL
PATHS
"C:/CUDA"
"/usr/local/cuda"
"/usr/local/streamsdk"
"/usr"
"${CUDA_TOOLKIT_ROOT_DIR}"
PATH_SUFFIXES ${path_suffixes} "lib"
)
find_package_handle_standard_args(OpenCL DEFAULT_MSG OPENCL_LIBRARY OPENCL_INCLUDE_DIR)
if(OPENCL_FOUND)
set(OPENCL_LIBRARIES ${OPENCL_LIBRARY})
mark_as_advanced(CLEAR OPENCL_INCLUDE_DIR)
mark_as_advanced(CLEAR OPENCL_LIBRARY)
else(OPENCL_FOUND)
set(OPENCL_LIBRARIES)
mark_as_advanced(OPENCL_INCLUDE_DIR)
mark_as_advanced(OPENCL_LIBRARY)
endif(OPENCL_FOUND)
# This is from Solar CMake (https://github.com/axr/solar-cmake).
#
# Copyright (c) 2012 Petroules Corporation. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
# IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT,
# INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
# OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED
# OF THE POSSIBILITY OF SUCH DAMAGE.
# Based on the Qt 5 processor detection code, so should be very accurate
# https://github.com/qt/qtbase/blob/9a6a847/src/corelib/global/qprocessordetection.h
# Currently handles arm / aarch64 (v5, v6, v7, v8), x86 (32/64), ia64, and ppc (32/64)
# Regarding POWER/PowerPC, just as is noted in the Qt source,
# "There are many more known variants/revisions that we do not handle/detect."
set(archdetect_c_code "
#define _STR(x) #x
#define STR(x) _STR(x)
#if defined(__arm__) || defined(__TARGET_ARCH_ARM) || defined(_M_ARM) || \\
defined(_M_ARM64) || defined(__aarch64__) || defined(__ARM64__)
#if defined(__ARM_ARCH) && __ARM_ARCH > 1
#pragma message \"cmake_ARCH armv\" STR(__ARM_ARCH)
#error
#elif defined(__TARGET_ARCH_ARM) && __TARGET_ARCH_ARM > 1
#pragma message \"cmake_ARCH armv\" STR(__TARGET_ARCH_ARM)
#error
#elif defined(_M_ARM) && _M_ARM > 1
#error cmake_ARCH arm ## __M_ARM
#elif defined(__ARM64_ARCH_8__) \\
|| defined(__aarch64__) \\
|| defined(__ARMv8__) \\
|| defined(__ARMv8_A__) \\
|| defined(_M_ARM64)
#error cmake_ARCH armv8
#elif defined(__ARM_ARCH_7__) \\
|| defined(__ARM_ARCH_7A__) \\
|| defined(__ARM_ARCH_7R__) \\
|| defined(__ARM_ARCH_7M__) \\
|| defined(__ARM_ARCH_7S__) \\
|| defined(_ARM_ARCH_7) \\
|| defined(__CORE_CORTEXA__)
#error cmake_ARCH armv7
#elif defined(__ARM_ARCH_6__) \\
|| defined(__ARM_ARCH_6J__) \\
|| defined(__ARM_ARCH_6T2__) \\
|| defined(__ARM_ARCH_6Z__) \\
|| defined(__ARM_ARCH_6K__) \\
|| defined(__ARM_ARCH_6ZK__) \\
|| defined(__ARM_ARCH_6M__)
#error cmake_ARCH armv6
#elif defined(__ARM_ARCH_5TEJ__) \\
|| defined(__ARM_ARCH_5TE__)
#error cmake_ARCH armv5
#else
#error cmake_ARCH arm
#endif
#elif defined(__i386) || defined(__i386__) || defined(_M_IX86)
#error cmake_ARCH i386
#elif defined(__x86_64) || defined(__x86_64__) || defined(__amd64) || defined(_M_X64)
#error cmake_ARCH x86_64
#elif defined(__ia64) || defined(__ia64__) || defined(_M_IA64)
#error cmake_ARCH ia64
#elif defined(__ppc__) || defined(__ppc) || defined(__powerpc__) \\
|| defined(_ARCH_COM) || defined(_ARCH_PWR) || defined(_ARCH_PPC) \\
|| defined(_M_MPPC) || defined(_M_PPC)
#if defined(__ppc64__) || defined(__powerpc64__) || defined(__64BIT__)
#error cmake_ARCH ppc64
#else
#error cmake_ARCH ppc
#endif
#endif
#error cmake_ARCH unknown
")
# Set ppc_support to TRUE before including this file or ppc and ppc64
# will be treated as invalid architectures since they are no longer supported by Apple
function(target_architecture output_var)
if(APPLE AND CMAKE_OSX_ARCHITECTURES)
# On OS X we use CMAKE_OSX_ARCHITECTURES *if* it was set
# First let's normalize the order of the values
# Note that it's not possible to compile PowerPC applications if you are using
# the OS X SDK version 10.6 or later - you'll need 10.4/10.5 for that, so we
# disable it by default
# See this page for more information:
# http://stackoverflow.com/questions/5333490/how-can-we-restore-ppc-ppc64-as-well-as-full-10-4-10-5-sdk-support-to-xcode-4
# Architecture defaults to i386 or ppc on OS X 10.5 and earlier, depending on the CPU type detected at runtime.
# On OS X 10.6+ the default is x86_64 if the CPU supports it, i386 otherwise.
foreach(osx_arch ${CMAKE_OSX_ARCHITECTURES})
if("${osx_arch}" STREQUAL "ppc" AND ppc_support)
set(osx_arch_ppc TRUE)
elseif("${osx_arch}" STREQUAL "arm64")
set(osx_arch_arm64 TRUE)
elseif("${osx_arch}" STREQUAL "i386")
set(osx_arch_i386 TRUE)
elseif("${osx_arch}" STREQUAL "x86_64")
set(osx_arch_x86_64 TRUE)
elseif("${osx_arch}" STREQUAL "ppc64" AND ppc_support)
set(osx_arch_ppc64 TRUE)
else()
message(FATAL_ERROR "Invalid OS X arch name: ${osx_arch}")
endif()
endforeach()
# Now add all the architectures in our normalized order
if(osx_arch_ppc)
list(APPEND ARCH ppc)
endif()
if(osx_arch_arm64)
list(APPEND ARCH arm64)
endif()
if(osx_arch_i386)
list(APPEND ARCH i386)
endif()
if(osx_arch_x86_64)
list(APPEND ARCH x86_64)
endif()
if(osx_arch_ppc64)
list(APPEND ARCH ppc64)
endif()
else()
file(WRITE "${CMAKE_BINARY_DIR}/arch.c" "${archdetect_c_code}")
enable_language(C)
# Detect the architecture in a rather creative way...
# This compiles a small C program which is a series of ifdefs that selects a
# particular #error preprocessor directive whose message string contains the
# target architecture. The program will always fail to compile (both because
# file is not a valid C program, and obviously because of the presence of the
# #error preprocessor directives... but by exploiting the preprocessor in this
# way, we can detect the correct target architecture even when cross-compiling,
# since the program itself never needs to be run (only the compiler/preprocessor)
try_run(
run_result_unused
compile_result_unused
"${CMAKE_BINARY_DIR}"
"${CMAKE_BINARY_DIR}/arch.c"
COMPILE_OUTPUT_VARIABLE ARCH
CMAKE_FLAGS CMAKE_OSX_ARCHITECTURES=${CMAKE_OSX_ARCHITECTURES}
)
# Parse the architecture name from the compiler output
string(REGEX MATCH "cmake_ARCH ([a-zA-Z0-9_]+)" ARCH "${ARCH}")
# Get rid of the value marker leaving just the architecture name
string(REPLACE "cmake_ARCH " "" ARCH "${ARCH}")
# If we are compiling with an unknown architecture this variable should
# already be set to "unknown" but in the case that it's empty (i.e. due
# to a typo in the code), then set it to unknown
if (NOT ARCH)
set(ARCH unknown)
endif()
endif()
set(${output_var} "${ARCH}" PARENT_SCOPE)
endfunction()
# Conda Forge releases for OpenMM
## Final releases
Create a new version tag on `openmm/openmm` and publish a new GitHub release. The Conda Forge bots would auto submit a PR with the new version within a couple hours _if_ we were using the `source.url` field pointing to a GH release. Since we build off the full repo, I am not sure the automation will work, so we need to switch to manual mode.
1. If you haven't yet, fork `conda-forge/openmm-feedstock`.
2. On your `conda-forge/openmm-feedstock` **fork**, create a new branch off most recent upstream branch, be it `master` or `rc`.
3. Edit `meta.yaml`:
- [ ] Update `package.version`.
- [ ] Reset `build.number` to 0.
- [ ] Make sure `source.git_rev` points to the release tag you want to publish. This could be a git commit too, but a tag is preferred.
4. Commit and push to **your fork**. Do NOT push to `upstream`.
5. Open a new PR on `conda-forge/openmm-feedstock`. Make sure you are targeting `conda-forge/openmm-feedstock`'s `master`, from **your fork**.
6. Review the checklist and open the PR.
7. In the opened PR, post a comment with `@conda-forge-admin, please rerender`.
8. Wait for all green, reviews and then merge. Always make sure you are merging to `master`.
9. Once merged, check the CI status on the `master` branch.
- It should be green. If it's red, a network error might have happened and you need to _re-run_ the failing job (a link will appear next to it, if you click on the Details menu).
- If a CI provider was not triggered (for whatever reason), `master` might need a little _push_ (no pun intended). An empty commit to `master` will do:
```
# make sure conda-forge/openmm-feedstock is configured as `upstream`
git remote -v
git checkout master
git fetch upstream master
git merge upstream/master
git commit --allow-empty -m "Trigger CI"
git push upstream master
```
## Release candidates
> Technically, once you publish an RC tag on GitHub Releases, the bots will pick it up, but we haven't tested this yet.
Manual instructions:
1. If you haven't yet, fork `conda-forge/openmm-feedstock`.
2. Create a new branch from the most recent upstream branch, be it `master` or `rc`.
3. Edit `meta.yaml`:
- [ ] Update `package.version`. It should be the new version number plus `rcX`, `X` being a number. Check [CFEP05](https://github.com/conda-forge/cfep/blob/master/cfep-05.md) in case of doubt.
- [ ] Reset `build.number` to 0.
- [ ] Make sure `source.git_rev` points to the release tag you want to publish. This could be a git commit too, but a tag is preferred.
4. Edit `conda_build_config.yaml`. This a KEY difference: RC releases are published to a different label!
- [ ] `channel_targets` should be set to `- conda-forge openmm_rc`:
```yaml
channel_targets:
- conda-forge openmm_rc
```
5. Commit and push to **your fork**. Do NOT push to `upstream`.
6. Open a new PR on `conda-forge/openmm-feedstock`. Make sure you are targeting `conda-forge/openmm-feedstock`'s `rc`, from **your fork**. Again, we are **targeting** the `rc` branch, NOT master. This is a KEY difference. RC candidates stay on `rc`.
7. Review the checklist and open the PR.
8. In the opened PR, post a comment with `@conda-forge-admin, please rerender`.
9. Wait for all green, reviews and then merge. Always make sure you are merging to `rc`.
10. Once merged, check the CI status on the `rc` branch.
- It should be green. If it's red, a network error might have happened and you need to _re-run_ the failing job (a link will appear next to it, if you click on the Details menu).
- If a CI provider was not triggered (for whatever reason), `rc` might need a little _push_ (no pun intended). An empty commit to `rc` will do:
```
# make sure conda-forge/openmm-feedstock is configured as `upstream`
git remote -v
git checkout rc
git fetch upstream rc
git merge upstream/rc
git commit --allow-empty -m "Trigger CI"
git push upstream rc
```
## Development Releases
These releases are done "on demand" either if users request them or if we feel there are some important or useful code changes we want people to use or try out.
An advantage these releases have over the nightly omnia builds is that they are ABI compatible with the conda-forge ecosystem.
They are uploaded to conda-forge in a way that prevents users from accidentally installing them.
See [CFEP-05](https://github.com/conda-forge/cfep/blob/main/cfep-05.md) and [PR# 74](https://github.com/conda-forge/openmm-feedstock/pull/74) for more details.
1. If you haven't yet, fork `conda-forge/openmm-feedstock`.
1. Create a new branch from the upstream `dev` branch.
1. Edit `meta.yaml`"
- [ ] Update `package.version`. If there has not been a new openmm release since the last dev release, bump the dev build number. Example: If current release of openmm is `7.7.0` and the package version is currently `7.7.0dev0` change it to `7.7.0dev1`. Another example, if the current release of openmm is `7.7.1` and the package version is currently `7.7.0dev2` then change it to `7.7.1dev0`.
- [ ] Update the `source.git_rev` to the current HEAD commit of openmm.
1. Commit and push to **your fork**. Do NOT push to `upstream`.
1. Open a new PR on `conda-forge/openmm-feedstock`. Make sure you are targeting `conda-forge/openmm-feedstock`'s `dev`, from **your fork**. Again, we are **targeting** the `dev` branch, NOT master. This is a KEY difference.
1. Review the checklist and open the PR.
1. In the opened PR, post a comment with `@conda-forge-admin, please rerender`.
1. Wait for all green, reviews and then merge. Always make sure you are merging to `dev`.
1. Once merged, check the CI status on the `dev` branch.
- It should be green. If it's red, a network error might have happened and you need to _re-run_ the failing job (a link will appear next to it, if you click on the Details menu).
- If a CI provider was not triggered (for whatever reason), `dev` might need a little _push_ (no pun intended). An empty commit to `dev` will do:
```
# make sure conda-forge/openmm-feedstock is configured as `upstream`
git remote -v
git checkout rc
git fetch upstream dev
git merge upstream/dev
git commit --allow-empty -m "Trigger CI"
git push upstream dev
```
## Hosted Environments
Anaconda.org supports hosting environments (as defined in a yaml) in the Anaconda cloud.
This makes it easier for end users to create a specified conda environment as they do not need a local copy of the yaml file.
This is especially useful when distributing an environment that uses lots of channels and/or labels.
To upload en environment
1. Create the yaml file
1. Test that the yaml file works e.g. `mamba env create --file openmm-8-beta.yaml`
1. Upload the environment e.g. `anaconda upload -u openmm hosted-envs/openmm-8-beta.yaml`
Now users can create an environment with `mamba env create openmm/openmm-8-beta`
pipeline {
agent none
stages {
stage("Build and test") {
parallel {
stage("Build and test CUDA platform") {
agent {
docker {
image "swails/openmm-all:latest"
label "cuda && docker"
args '--gpus all'
alwaysPull true
}
}
steps {
sh "git clean -fxd && git checkout ."
sh "devtools/ci/jenkins/install.sh"
sh "devtools/ci/jenkins/test.sh -R 'TestCuda' --parallel 2"
}
}
stage("Build and test OpenCL platform") {
agent {
docker {
image "swails/openmm-all:latest"
label "cuda && docker"
args '--gpus all'
alwaysPull true
}
}
steps {
sh "git clean -fxd && git checkout ."
sh "devtools/ci/jenkins/install.sh"
sh "devtools/ci/jenkins/test.sh -R 'TestOpenCL' --parallel 2"
}
}
stage("Build/test CPU platforms") {
agent {
docker {
image "swails/openmm-cpu:latest"
label "docker"
alwaysPull true
}
}
steps {
sh "git clean -fxd && git checkout ."
sh "devtools/ci/jenkins/install_and_test_cpu.sh"
}
}
}
}
}
}
name: build
channels:
- conda-forge
- bioconda
dependencies:
# build
- cmake
- ccache
# host
- python
- cython
- swig
- numpy
- doxygen 1.9.1
# test
- pytest
- pytest-xdist
- pytest-timeout
name: build
channels:
- conda-forge
- bioconda
dependencies:
# build
- cmake
- ccache
# host
- python
- cython
- swig
- numpy
- doxygen 1.8.14
# test
- pytest
- pytest-xdist
- pytest-timeout
- gromacs
name: build
channels:
- conda-forge
- bioconda
dependencies:
# build
- cmake
- make
- ccache
- sysroot_linux-64 2.17
# host
- python
- cython
- swig
- numpy
- ocl-icd-system
- doxygen 1.8.14
# test
- pytest
- pytest-xdist
- pytest-timeout
- gromacs
name: build
channels:
- conda-forge
- bioconda
dependencies:
# build
- cmake
- make
- ccache
- sysroot_linux-64 2.17
# host
- pypy
- cython
- swig
- numpy
- ocl-icd-system
- doxygen 1.8.14
# test
- pytest
- pytest-xdist
- pytest-timeout
- gromacs
name: build
channels:
- conda-forge
- defaults
dependencies:
# build
- jom
- cmake
- ccache
- m2-coreutils
# host
- python
- cython
- swig
- numpy
- doxygen 1.8.14
- khronos-opencl-icd-loader
# test
- pytest
- pytest-xdist
- pytest-timeout
name: build
channels:
- conda-forge
dependencies:
# build
- cmake
- ccache
# host
- python
- pip
- numpy
- cython
- swig
- doxygen 1.8.14
- sphinx==4.0.2
- sphinxcontrib-bibtex
- breathe>=4.30,<5.0
# This script installs AMD's SDK 3.0 to provide their OpenCL implementation
# * Installation path will be ${GITHUB_WORKSPACE}/AMDAPPSDK
set -euxo pipefail
wget -q --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 5 \
http://s3.amazonaws.com/omnia-ci/AMD-APP-SDKInstaller-v3.0.130.135-GA-linux64.tar.bz2
tar -xjf AMD-APP-SDK*.tar.bz2
AMDAPPSDK=${GITHUB_WORKSPACE}/AMDAPPSDK
export OPENCL_VENDOR_PATH=${AMDAPPSDK}/etc/OpenCL/vendors
mkdir -p ${OPENCL_VENDOR_PATH}
sh AMD-APP-SDK*.sh --tar -xf -C ${AMDAPPSDK}
echo libamdocl64.so > ${OPENCL_VENDOR_PATH}/amdocl64.icd
export LD_LIBRARY_PATH=${AMDAPPSDK}/lib/x86_64:${LD_LIBRARY_PATH:-}
chmod +x ${AMDAPPSDK}/bin/x86_64/clinfo
${AMDAPPSDK}/bin/x86_64/clinfo
sudo apt-get update
sudo apt-get install -y libgl1-mesa-dev
echo "OPENCL_VENDOR_PATH=${OPENCL_VENDOR_PATH}" >> ${GITHUB_ENV}
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> ${GITHUB_ENV}
\ No newline at end of file
:: This script installs CUDA on Windows.
:: It downloads the offline installer from the Nvidia servers
:: and the relevant patches, if applicable.
:: It uses the default installation path, which is exported as CUDA_PATH
:: For CMake compatibility, CUDA_TOOLKIT_ROOT_DIR is also exported
:: It expects a %CUDA_VERSION% environment variable, set to major.minor (e.g. 10.0)
:: We define a default subset of components to be installed for faster installation times
:: and reduced storage usage (CI is limited to 10GB). Full list of components is available at
:: https://docs.nvidia.com/cuda/archive/%CUDA_VERSION%/cuda-installation-guide-microsoft-windows/index.html
set "VAR=nvcc_%CUDA_VERSION% cuobjdump_%CUDA_VERSION% nvprune_%CUDA_VERSION% cupti_%CUDA_VERSION%"
set "VAR=%VAR% memcheck_%CUDA_VERSION% nvdisasm_%CUDA_VERSION% nvprof_%CUDA_VERSION% cublas_%CUDA_VERSION%"
set "VAR=%VAR% cublas_dev_%CUDA_VERSION% cudart_%CUDA_VERSION% cufft_%CUDA_VERSION% cufft_dev_%CUDA_VERSION%"
set "VAR=%VAR% curand_%CUDA_VERSION% curand_dev_%CUDA_VERSION% cusolver_%CUDA_VERSION% cusolver_dev_%CUDA_VERSION%"
set "VAR=%VAR% cusparse_%CUDA_VERSION% cusparse_dev_%CUDA_VERSION% npp_%CUDA_VERSION% npp_dev_%CUDA_VERSION%"
set "VAR=%VAR% nvrtc_%CUDA_VERSION% nvrtc_dev_%CUDA_VERSION% nvml_dev_%CUDA_VERSION%"
set "VAR=%VAR% visual_studio_integration_%CUDA_VERSION%"
set "CUDA_COMPONENTS=%VAR%"
if "%CUDA_VERSION%" == "9.2" goto cuda92
if "%CUDA_VERSION%" == "10.0" goto cuda100
if "%CUDA_VERSION%" == "10.1" goto cuda101
if "%CUDA_VERSION%" == "10.2" goto cuda102
if "%CUDA_VERSION%" == "11.0" goto cuda110
if "%CUDA_VERSION%" == "11.1" goto cuda111
if "%CUDA_VERSION%" == "11.2" goto cuda112
echo CUDA '%CUDA_VERSION%' is not supported
exit /b 1
:: Define URLs per version
:cuda92
set "CUDA_NETWORK_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/network_installers2/cuda_9.2.148_win10_network"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=2bf9ae67016867b68f361bf50d2b9e7b"
set "CUDA_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/local_installers2/cuda_9.2.148_win10"
set "CUDA_INSTALLER_CHECKSUM=f6c170a7452098461070dbba3e6e58f1"
set "CUDA_PATCH_URL=https://developer.nvidia.com/compute/cuda/9.2/Prod2/patches/1/cuda_9.2.148.1_windows"
set "CUDA_PATCH_CHECKSUM=09e20653f1346d2461a9f8f1a7178ba2"
set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
goto cuda_common
:cuda100
set "CUDA_NETWORK_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/10.0/Prod/network_installers/cuda_10.0.130_win10_network"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=3312deac9c939bd78d0e7555606c22fc"
set "CUDA_INSTALLER_URL=https://developer.nvidia.com/compute/cuda/10.0/Prod/local_installers/cuda_10.0.130_411.31_win10"
set "CUDA_INSTALLER_CHECKSUM=90fafdfe2167ac25432db95391ca954e"
set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
goto cuda_common
:cuda101
set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.1/Prod/network_installers/cuda_10.1.243_win10_network.exe"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=fae0c958440511576691b825d4599e93"
set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.1/Prod/local_installers/cuda_10.1.243_426.00_win10.exe"
set "CUDA_INSTALLER_CHECKSUM=b54cf32683f93e787321dcc2e692ff69"
set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
goto cuda_common
:cuda102
set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/network_installers/cuda_10.2.89_win10_network.exe"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=60e0f16845d731b690179606f385041e"
set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/local_installers/cuda_10.2.89_441.22_win10.exe"
set "CUDA_INSTALLER_CHECKSUM=d9f5b9f24c3d3fc456a3c789f9b43419"
set "CUDA_PATCH_URL=http://developer.download.nvidia.com/compute/cuda/10.2/Prod/patches/1/cuda_10.2.1_win10.exe"
set "CUDA_PATCH_CHECKSUM=9d751ae129963deb7202f1d85149c69d"
set "CUDA_COMPONENTS=%CUDA_COMPONENTS% nvgraph_%CUDA_VERSION% nvgraph_dev_%CUDA_VERSION%"
goto cuda_common
:cuda110
set "CUDA_NETWORK_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/11.0.3/network_installers/cuda_11.0.3_win10_network.exe"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=1b88bf7bb8e50207bbb53ed2033f93f3"
set "CUDA_INSTALLER_URL=http://developer.download.nvidia.com/compute/cuda/11.0.3/local_installers/cuda_11.0.3_451.82_win10.exe"
set "CUDA_INSTALLER_CHECKSUM=80ae0fdbe04759123f3cab81f2aadabd"
goto cuda_common
:cuda111
set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.1.1/network_installers/cuda_11.1.1_win10_network.exe"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=7e36e50ee486a84612adfd85500a9971"
set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.1.1/local_installers/cuda_11.1.1_456.81_win10.exe"
set "CUDA_INSTALLER_CHECKSUM=a89dfad35fc1adf02a848a9c06cfff15"
goto cuda_common
:cuda112
set "CUDA_NETWORK_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.0/network_installers/cuda_11.2.0_win10_network.exe"
set "CUDA_NETWORK_INSTALLER_CHECKSUM=ab02a25eed1201cc3e414be943a242df"
set "CUDA_INSTALLER_URL=https://developer.download.nvidia.com/compute/cuda/11.2.0/local_installers/cuda_11.2.0_460.89_win10.exe"
set "CUDA_INSTALLER_CHECKSUM=92f38c37ce9c6c11d27c10701b040256"
goto cuda_common
:: The actual installation logic
:cuda_common
::We expect this CUDA_PATH
set "CUDA_PATH=C:\Program Files\NVIDIA GPU Computing Toolkit\CUDA\v%CUDA_VERSION%"
echo Downloading CUDA version %CUDA_VERSION% installer from %CUDA_INSTALLER_URL%
echo Expected MD5: %CUDA_INSTALLER_CHECKSUM%
:: Download installer
curl --retry 3 -k -L %CUDA_INSTALLER_URL% --output cuda_installer.exe
if errorlevel 1 (
echo Problem downloading installer...
exit /b 1
)
:: Check md5
openssl md5 cuda_installer.exe | findstr %CUDA_INSTALLER_CHECKSUM%
if errorlevel 1 (
echo Checksum does not match!
exit /b 1
)
:: Run installer
start /wait cuda_installer.exe -s %CUDA_COMPONENTS%
if errorlevel 1 (
echo Problem installing CUDA toolkit...
exit /b 1
)
del cuda_installer.exe
:: If patches are needed, download and apply
if not "%CUDA_PATCH_URL%"=="" (
echo This version requires an additional patch
curl --retry 3 -k -L %CUDA_PATCH_URL% --output cuda_patch.exe
if errorlevel 1 (
echo Problem downloading patch installer...
exit /b 1
)
openssl md5 cuda_patch.exe | findstr %CUDA_PATCH_CHECKSUM%
if errorlevel 1 (
echo Checksum does not match!
exit /b 1
)
start /wait cuda_patch.exe -s
if errorlevel 1 (
echo Problem running patch installer...
exit /b 1
)
del cuda_patch.exe
)
:: This should exist by now!
if not exist "%CUDA_PATH%\bin\nvcc.exe" (
echo CUDA toolkit installation failed!
exit /b 1
)
echo CUDA_PATH=%CUDA_PATH% >> %GITHUB_ENV%
echo CUDA_TOOLKIT_ROOT_DIR=%CUDA_PATH:\=/% >> %GITHUB_ENV%
:: Notes about nvcuda.dll
:: ----------------------
:: We should also provide the drivers (nvcuda.dll), but the installer will not
:: proceed without a physical Nvidia card attached (not the case in the CI).
:: Expanding `<installer.exe>\Display.Driver\nvcuda.64.dl_` to `C:\Windows\System32`
:: does not work anymore (.dl_ files are not PE-COFF according to Dependencies.exe).
:: Forcing this results in a DLL error 193. Basically, there's no way to provide
:: ncvuda.dll in a GPU-less machine without breaking the EULA (aka zipping nvcuda.dll
:: from a working installation).
\ No newline at end of file
# This script install CUDA on Ubuntu-based systemws
# It uses the Nvidia repos for Ubuntu 18.04, which as of Dec 2020
# includes packages for CUDA 10.0, 10.1, 10.2, 11.0, 11.1, 11.2
# Future versions might require an updated repo (maybe Ubuntu 20)
# It expects a $CUDA_VERSION environment variable set to major.minor (e.g. 10.0)
set -euxo pipefail
# Enable retrying
echo 'APT::Acquire::Retries "5";' | sudo tee /etc/apt/apt.conf.d/80-retries
sudo wget --retry-connrefused --waitretry=1 --read-timeout=20 --timeout=15 --tries 5 \
-O /etc/apt/preferences.d/cuda-repository-pin-600 \
https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/cuda-ubuntu1804.pin
sudo apt-key adv --fetch-keys https://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/3bf863cc.pub
sudo add-apt-repository "deb http://developer.download.nvidia.com/compute/cuda/repos/ubuntu1804/x86_64/ /"
sudo apt-get update -qq
CUDA_APT=${CUDA_VERSION/./-}
## cufft changed package names in CUDA 11
if [[ ${CUDA_VERSION} == 10.* ]]; then CUFFT="cuda-cufft"; else CUFFT="libcufft"; fi
sudo apt-get install -y \
libgl1-mesa-dev cuda-compiler-${CUDA_APT} \
cuda-drivers cuda-driver-dev-${CUDA_APT} \
cuda-cudart-${CUDA_APT} cuda-cudart-dev-${CUDA_APT} \
${CUFFT}-${CUDA_APT} ${CUFFT}-dev-${CUDA_APT} \
cuda-nvrtc-${CUDA_APT} cuda-nvrtc-dev-${CUDA_APT} \
cuda-nvprof-${CUDA_APT}
sudo apt-get clean
export CUDA_HOME=/usr/local/cuda-${CUDA_VERSION}
export LD_LIBRARY_PATH=${CUDA_HOME}/lib64:${LD_LIBRARY_PATH:-}
export PATH=${CUDA_HOME}/bin:${PATH}
echo "CUDA_HOME=${CUDA_HOME}" >> ${GITHUB_ENV}
echo "LD_LIBRARY_PATH=${LD_LIBRARY_PATH}" >> ${GITHUB_ENV}
echo "PATH=${PATH}" >> ${GITHUB_ENV}
\ No newline at end of file
# Install an older MacOS SDK
# This should guarantee OpenMM builds with extended compatibility across MacOS versions
# Adapted from conda-forge-ci-setup scripts:
# * https://github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/dde296e/recipe/run_conda_forge_build_setup_osx
# * https://github.com/conda-forge/conda-forge-ci-setup-feedstock/blob/dde296e/recipe/download_osx_sdk.sh
#
# Some possible updates might involve upgrading the download link to future MacOS releases (10.15 to something else),
# depending on the version provided by the CI
OSX_SDK_DIR="$(xcode-select -p)/Platforms/MacOSX.platform/Developer/SDKs"
export MACOSX_DEPLOYMENT_TARGET=10.9
export MACOSX_SDK_VERSION=10.9
export CMAKE_OSX_SYSROOT="${OSX_SDK_DIR}/MacOSX${MACOSX_SDK_VERSION}.sdk"
if [[ ! -d ${CMAKE_OSX_SYSROOT}} ]]; then
echo "Downloading ${MACOSX_SDK_VERSION} sdk"
curl -L -O --connect-timeout 5 --max-time 10 --retry 5 --retry-delay 0 --retry-max-time 40 --retry-connrefused --retry-all-errors \
https://github.com/phracker/MacOSX-SDKs/releases/download/10.15/MacOSX${MACOSX_SDK_VERSION}.sdk.tar.xz
tar -xf MacOSX${MACOSX_SDK_VERSION}.sdk.tar.xz -C "$(dirname ${CMAKE_OSX_SYSROOT})"
fi
if [[ "$MACOSX_DEPLOYMENT_TARGET" == 10.* ]]; then
# set minimum sdk version to our target
plutil -replace MinimumSDKVersion -string ${MACOSX_SDK_VERSION} $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist
plutil -replace DTSDKName -string macosx${MACOSX_SDK_VERSION}internal $(xcode-select -p)/Platforms/MacOSX.platform/Info.plist
fi
echo "MACOSX_DEPLOYMENT_TARGET=${MACOSX_DEPLOYMENT_TARGET}" >> ${GITHUB_ENV}
echo "CMAKE_OSX_SYSROOT=${MACOSX_DEPLOYMENT_TARGET}" >> ${GITHUB_ENV}
echo "CMAKE_OSX_SYSROOT=${CMAKE_OSX_SYSROOT}" >> ${GITHUB_ENV}
\ No newline at end of file
source /opt/conda/etc/profile.d/conda.sh
set -eo pipefail
WORKSPACE="$HOME/workspace"
# This endgroup closes the open tag in CI.yml
echo "::endgroup::"
echo "::group::Prepare build environment..."
extra_conda_packages=""
if [[ ${COMPILERS} == devtoolset* ]]; then
sudo yum install -y centos-release-scl
sudo yum install -y ${COMPILERS}
source /opt/rh/${COMPILERS}/enable
else
extra_conda_packages="${COMPILERS}"
fi
# Patch environment file
sed -E -e "s/.*gromacs.*//" \
-e "s/^- python$/- python ${PYTHON_VER}.*/" \
${WORKSPACE}/devtools/ci/gh-actions/conda-envs/build-ubuntu-latest.yml > conda-env.yml
for package in $extra_conda_packages; do
if [[ -n ${package// } ]]; then
echo "- ${package}" >> conda-env.yml
fi
done
conda env create -n build -f conda-env.yml
conda activate build || true
echo "::endgroup::"
echo "::group::Prepare ccache..."
export CCACHE_BASEDIR=${WORKSPACE}
export CCACHE_DIR=${WORKSPACE}/.ccache
export CCACHE_COMPRESS=true
export CCACHE_COMPRESSLEVEL=6
export CCACHE_MAXSIZE=400M
ccache -p
ccache -z
echo "::endgroup::"
echo "::group::Configure with CMake..."
if [[ -d /usr/local/cuda ]]; then
export CUDA_PATH="/usr/local/cuda"
export CUDA_LIB_PATH="${CUDA_PATH}/lib64/stubs"
export LD_LIBRARY_PATH="${CUDA_PATH}/lib64/stubs:${LD_LIBRARY_PATH:-}"
export PATH="${CUDA_PATH}/bin:${PATH}"
fi
rm -rf build || true
mkdir -p build
cd build
cmake ${WORKSPACE} \
-DCMAKE_INSTALL_PREFIX=${CONDA_PREFIX} \
-DCMAKE_PREFIX_PATH=${CONDA_PREFIX} \
-DCMAKE_C_COMPILER_LAUNCHER=ccache \
-DCMAKE_CXX_COMPILER_LAUNCHER=ccache \
-DOPENMM_BUILD_CUDA_TESTS=OFF \
-DOPENMM_BUILD_OPENCL_TESTS=OFF
echo "::endgroup::"
# Build
echo "::group::Build with make..."
make -j2 install PythonInstall
echo "::endgroup::"
echo "::group::Check ccache performance..."
ccache -s
echo "::endgroup::"
# Core tests
echo "::group::Run core tests..."
python ${WORKSPACE}/devtools/run-ctest.py --parallel 2 --timeout 1500 --job-duration 360 --attempts 3
test -f ${CONDA_PREFIX}/lib/libOpenMM.so
test -f ${CONDA_PREFIX}/lib/plugins/libOpenMMCPU.so
test -f ${CONDA_PREFIX}/lib/plugins/libOpenMMPME.so
if [[ ! -z ${CUDA_VER} ]]; then
test -f ${CONDA_PREFIX}/lib/plugins/libOpenMMCUDA.so
test -f ${CONDA_PREFIX}/lib/plugins/libOpenMMOpenCL.so
fi
echo "::endgroup::"
# Python tests
echo "::group::Run Python tests..."
python -m openmm.testInstallation
python -c "import openmm as mm; print('---Loaded---', *mm.pluginLoadedLibNames, '---Failed---', *mm.Platform.getPluginLoadFailures(), sep='\n')"
cd python/tests
# Gromacs is not available on condaforge for PPC/ARM
# Membrane an MTS Langevin Integrator tests timeout (>6h!), possibly due to the emulation slowdown
python -m pytest -v -k "not gromacs and not membrane and not MTSLangevinIntegrator" -n 2
echo "::endgroup::"
echo "We are done!"
touch "${WORKSPACE}/docker_steps_run_successfully"
#!/bin/bash
# This is an example script on how to debug locally with Docker!
# If it does not work, it might be out of date. In that case,
# check the steps used in /.github/workflows/CI.yml
set -euxo pipefail
# This is the image for PowerPC + CUDA
export DOCKER_IMAGE="quay.io/condaforge/linux-anvil-ppc64le-cuda:10.2"
# # Use this other one for ARM debugging
# export DOCKER_IMAGE="quay.io/condaforge/linux-anvil-aarch64"
# With Conda Forge compilers (GCC9)
export COMPILERS="compilers"
# # With RH devtoolset (GCC7)
# export COMPILERS="devtoolset-7"
# Choose your Python version
export PYTHON_VER="3.9"
# Number of CPUs to use
export CPU_COUNT=2
echo "Preparing Docker..."
docker run --rm --privileged multiarch/qemu-user-static:register --reset --credential yes
ls /proc/sys/fs/binfmt_misc/
docker info
# In order for the conda-build process in the container to write to the mounted
# volumes, we need to run with the same id as the host machine, which is
# normally the owner of the mounted volumes, or at least has write permission
export HOST_USER_ID=$(id -u)
# Check if docker-machine is being used (normally on OSX) and get the uid from
# the VM
if hash docker-machine 2> /dev/null && docker-machine active > /dev/null; then
export HOST_USER_ID=$(docker-machine ssh $(docker-machine active) id -u)
fi
docker run \
-it \
-v "$(pwd)":/home/conda/workspace:rw,z \
-e HOST_USER_ID \
-e CPU_COUNT \
-e PYTHON_VER \
-e COMPILERS \
${DOCKER_IMAGE} \
bash
# Once you are inside the Docker session, you can use this to reproduce the CI steps:
#
# bash /home/conda/workspace/devtools/ci/gh-actions/scripts/run_steps_inside_docker_image.sh
\ No newline at end of file
#!/bin/bash -ex
# This script is executed via the line:
# source devtools/ci/jenkins/install.sh
# in a bash shell with the -lex options turned on
echo "Using the following SWIG (`which swig`) version:"
swig -version
echo "Using cmake (`which cmake`) version":
cmake --version
echo "Using g++ (`which g++`) version:"
g++ --version
if [ ! -z "$OPENMM_CUDA_COMPILER" ]; then
echo "Using nvcc ($OPENMM_CUDA_COMPILER) version:"
$OPENMM_CUDA_COMPILER --version
CUDA_ARGS="-DCUDA_TOOLKIT_ROOT_DIR=${CUDA_HOME} -DOPENMM_BUILD_CUDA_LIB=true"
fi
cmake -DCMAKE_INSTALL_PREFIX="`pwd`/install" -DCMAKE_CXX_COMPILER=g++ -DCMAKE_C_COMPILER=gcc \
-DSWIG_EXECUTABLE=`which swig` $CUDA_ARGS $EXTRA_CMAKE_ARGS .
make -j6 install
#!/bin/bash -ex
EXTRA_CMAKE_ARGS="-DOPENMM_BUILD_CUDA_LIB=false -DOPENMM_BUILD_OPENCL_LIB=false"
. devtools/ci/jenkins/install.sh
python devtools/run-ctest.py --job-duration=120 --timeout 300 --in-order -R 'Test(Cpu|Reference)' --parallel 4
# Build & test Python
make PythonInstall
python -m openmm.testInstallation
cd python/tests && py.test -v
#!/bin/bash -ex
python devtools/run-ctest.py --job-duration=120 --timeout 300 --in-order $*
\ No newline at end of file
import sys
import xml.etree.ElementTree as etree
gbvalues = {'CT':(0.19,0.72),
'CX':(0.19,0.72),
'CI':(0.19,0.72),
'C':(0.1875,0.72),
'CA':(0.1875,0.72),
'CM':(0.1875,0.72),
'CS':(0.1875,0.72),
'C4':(0.1875,0.72),
'CC':(0.1875,0.72),
'CV':(0.1875,0.72),
'CW':(0.1875,0.72),
'CR':(0.1875,0.72),
'CB':(0.1875,0.72),
'C*':(0.1875,0.72),
'CN':(0.1875,0.72),
'CK':(0.1875,0.72),
'CP':(0.1875,0.72),
'C5':(0.1875,0.72),
'CQ':(0.1875,0.72),
'N':(0.1706,0.79),
'NA':(0.1706,0.79),
'NB':(0.1706,0.79),
'NC':(0.1706,0.79),
'N*':(0.1706,0.79),
'N2':(0.1706,0.79),
'N3':(0.1625,0.79),
'OW':(0.1535,0.85),
'OH':(0.1535,0.85),
'OS':(0.1535,0.85),
'O':(0.148,0.85),
'O2':(0.148,0.85),
'S':(0.1775,0.96),
'SH':(0.1775,0.96),
'H':(0.115,0.85),
'HW':(0.105,0.85),
'HO':(0.105,0.85),
'HS':(0.125,0.85),
'HA':(0.125,0.85),
'HC':(0.125,0.85),
'H0':(0.125,0.85),
'H1':(0.125,0.85),
'H2':(0.125,0.85),
'H3':(0.125,0.85),
'HP':(0.125,0.85),
'H4':(0.125,0.85),
'H5':(0.125,0.85)}
tree = etree.parse(sys.argv[1])
typeMap = {}
for type in tree.getroot().find('AtomTypes').findall('Type'):
typeMap[type.attrib['name']] = type.attrib['class']
print("<ForceField>")
print(" <GBSAOBCForce>")
for atom in tree.getroot().find('NonbondedForce').findall('Atom'):
type = atom.attrib['type']
if type in typeMap:
atomClass = typeMap[type]
if atomClass in gbvalues:
values = gbvalues[atomClass]
print(""" <Atom type="%s" charge="%s" radius="%g" scale="%g"/>""" % (type, atom.attrib['charge'], values[0], values[1]))
print(" </GBSAOBCForce>")
print("</ForceField>")
This source diff could not be displayed because it is too large. You can view the blob instead.
name: openmm-8-beta-linux
channels:
- conda-forge/label/openmm_rc
- conda-forge/label/openmm-torch_rc
- conda-forge
dependencies:
- openmm==8.0.0beta
- openmm-ml==1.0beta
- openmm-torch==1.0beta1
- nnpops==0.2
- torchani==2.2.2
- pytest
name: openmm-8-beta-mac
channels:
- conda-forge/label/openmm_rc
- conda-forge/label/openmm-torch_rc
- conda-forge
dependencies:
- openmm==8.0.0beta
- openmm-ml==1.0beta
- openmm-torch==1.0beta1
- torchani==2.2.2
- pytest
name: openmm-8-rc1-linux
channels:
- conda-forge/label/openmm_rc
- conda-forge/label/openmm-torch_rc
- conda-forge
dependencies:
- openmm==8.0.0rc1
- openmm-ml==1.0rc1
- openmm-torch==1.0rc1
- nnpops==0.3
- torchani==2.2.2
- pytest
name: openmm-8-rc1-mac
channels:
- conda-forge/label/openmm_rc
- conda-forge/label/openmm-torch_rc
- conda-forge
dependencies:
- openmm==8.0.0rc1
- openmm-ml==1.0rc1
- openmm-torch==1.0rc1
- torchani==2.2.2
- pytest
# Packaging OpenMM into ZIP installers
Set your environment variable `TAG` to the git tag for the release:
```bash
# OpenMM 7.1.1
export TAG="c1a64aa"
```
## Source
Start the docker container:
```bash
docker run -i -t --rm -e TAG -v `pwd`:/io jchodera/omnia-build-box:cuda80-amd30-clang38 bash
```
Patch the docker container for missing LaTeX files:
```
tlmgr install fncychap tabulary capt-of eqparbox environ trimspaces
```
Build the installer inside the docker container:
```bash
# Clone the OpenMM beta or release candidate tag $TAG
git clone https://github.com/pandegroup/openmm.git
cd openmm; git checkout $TAG; cd ..
# Build and package
source openmm/devtools/packaging/scripts/source/prepare.sh
source openmm/devtools/packaging/scripts/source/build.sh
source openmm/devtools/packaging/scripts/source/package.sh
# Recover the packages to host directory
cp packaging/compressed/* /io
```
## Linux
Start the docker container:
```bash
docker run -i -t --rm -e TAG -v `pwd`:/io jchodera/omnia-build-box:cuda80-amd30-clang38 bash
```
Patch the docker container for missing LaTeX files:
```
tlmgr install fncychap tabulary capt-of eqparbox environ trimspaces
```
Build the installer inside the docker container:
```bash
# Clone the OpenMM beta or release candidate tag $TAG
git clone https://github.com/pandegroup/openmm.git
cd openmm; git checkout $TAG; cd ..
# Build and package
source openmm/devtools/packaging/scripts/linux/prepare.sh
source openmm/devtools/packaging/scripts/linux/build.sh
source openmm/devtools/packaging/scripts/linux/package.sh
# Recover the packages to host directory
cp packaging/compressed/* /io
```
## OS X
On an `osx` machine with XCode and the OS X 10.9 frameworks installed:
```bash
# Clone the OpenMM beta or release candidate tag $TAG
git clone https://github.com/pandegroup/openmm.git
cd openmm; git checkout $TAG; cd ..
# Build and package
source openmm/devtools/packaging/scripts/osx/prepare.sh
source openmm/devtools/packaging/scripts/osx/build.sh
source openmm/devtools/packaging/scripts/osx/package.sh
```
#!/bin/sh
cd $(dirname $0)
# Ask the user for the install location and Python executable.
defaultInstallDir=/usr/local/openmm
printf "Enter install location (default=${defaultInstallDir}): "
read installDir
if [ -z ${installDir} ]
then
installDir=${defaultInstallDir}
fi
defaultPythonBin=$(which python)
printf "Enter path to Python executable"
if [ ${defaultPythonBin} ]
then
printf " (default=${defaultPythonBin})"
fi
printf ": "
read pythonBin
if [ -z ${pythonBin} ]
then
pythonBin=${defaultPythonBin}
fi
# Make sure it's a supported Python version.
pythonOk=$(${pythonBin} -c "import sys; v=sys.version_info; print((v[0]==2 and v[1]>6) or v[0]>2)")
if [ ${pythonOk} != "True" ]
then
echo "Unsupported Python version. Only versions 2.7 and higher are supported."
exit
fi
# Copy the files into place.
cp -R docs ${installDir}
cp -R include ${installDir}
cp -R lib ${installDir}
cp -R licenses ${installDir}
# Run the Python installer.
cd python
export OPENMM_INCLUDE_PATH=${installDir}/include
export OPENMM_LIB_PATH=${installDir}/lib
printenv
if ${pythonBin} setup.py build && ${pythonBin} setup.py install $@
then
# Print instructions to the user.
echo
echo "Installation is complete. You should now test your installation to make sure"
echo "it is working correctly by typing the following command:"
echo
echo "python -m openmm.testInstallation"
else
echo
echo "INSTALLATION FAILED"
echo
echo "An error prevented the installation from completing. See above for details."
fi
# Manifests for automated packaging of source and binary distributions
A detailed explanation of packaging protocols can be found on the developer wiki:
https://github.com/pandegroup/openmm/wiki/Packaging-OpenMM-installers
## Contents
* `source/` - directories and files to be copied from the GitHub repo for a source distribution
* `binary/` - directories and files to be copied from install directory after build for a binary distribution
docs
examples
include
lib
licenses
openmm/cmake_modules
openmm/CMakeLists.txt
openmm/docs-source
install/docs
openmm/examples
openmm/libraries
openmm/olla
openmm/openmmapi
openmm/platforms
openmm/plugins
openmm/serialization
openmm/tests
openmm/wrappers
#!/bin/bash
# Build script for Linux distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
#
# For Docker build
#
# Fix hbb issues.
# If statements needed because multiple Python versions are built in same docker image.
if [ ! -e /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-redhat-linux ]; then
ln -s /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-CentOS-linux/ /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-redhat-linux
fi
if [ ! -e /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-redhat-linux ]; then
ln -s /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-CentOS-linux/ /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-redhat-linux
fi
# Clang paths
export CLANG_PREFIX="/opt/clang"
export PATH=$PATH:$CLANG_PREFIX/bin
# enable devtoolset-2
# will return an error return code because of python 3.x incompatible code, but this error is inconsequential
#source /opt/rh/devtoolset-2/enable || true
export PATH=/opt/rh/devtoolset-2/root/usr/bin${PATH:+:${PATH}}
export MANPATH=/opt/rh/devtoolset-2/root/usr/share/man:$MANPATH
export INFOPATH=/opt/rh/devtoolset-2/root/usr/share/info${INFOPATH:+:${INFOPATH}}
export PCP_DIR=/opt/rh/devtoolset-2/root
# Some perl Ext::MakeMaker versions install things under /usr/lib/perl5
# even though the system otherwise would go to /usr/lib64/perl5.
export PERL5LIB=/opt/rh/devtoolset-2/root//usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi:/opt/rh/devtoolset-2/root/usr/lib/perl5:/opt/rh/devtoolset-2/root//usr/lib/perl5/vendor_perl/5.8.8${PERL5LIB:+:${PERL5LIB}}
# bz847911 workaround:
# we need to evaluate rpm's installed run-time % { _libdir }, not rpmbuild time
# or else /etc/ld.so.conf.d files?
rpmlibdir=`rpm --eval "%{_libdir}"`
# bz1017604: On 64-bit hosts, we should include also the 32-bit library path.
if [ "$rpmlibdir" != "${rpmlibdir/lib64/}" ]; then
rpmlibdir32=":/opt/rh/devtoolset-2/root${rpmlibdir/lib64/lib}"
fi
export LD_LIBRARY_PATH=/opt/rh/devtoolset-2/root$rpmlibdir$rpmlibdir32${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
# duplicate python site.py logic for sitepackages
pythonvers=`python -c 'import sys; print(sys.version[:3])'`
export PYTHONPATH=/opt/rh/devtoolset-2/root/usr/lib64/python$pythonvers/site-packages:/opt/rh/devtoolset-2/root/usr/lib/python$pythonvers/site-packages${PYTHONPATH:+:${PYTHONPATH}}
# CFLAGS
export MINIMAL_CFLAGS="-g -O3"
export CFLAGS="$MINIMAL_CFLAGS"
export CXXFLAGS="$MINIMAL_CFLAGS"
export LDFLAGS="$LDPATHFLAGS"
# Set relative workspace path.
export WORKSPACE=`pwd`
# Add conda binaries to path.
PATH=$WORKSPACE/miniconda/bin:$PATH
INSTALL=`pwd`/install
if [ -e $INSTALL ]; then
rm -rf $INSTALL
fi
CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=$INSTALL"
# Don't build tests
CMAKE_FLAGS+=" -DBUILD_TESTING=OFF"
# Use clang 3.8.1 inside omnia-build-box docker image
CMAKE_FLAGS+=" -DCMAKE_C_COMPILER=$CLANG_PREFIX/bin/clang -DCMAKE_CXX_COMPILER=$CLANG_PREFIX/bin/clang++"
# Ensure we build a release
CMAKE_FLAGS+=" -DCMAKE_BUILD_TYPE=Release"
# Use NVIDIA CUDA 8.0
CMAKE_FLAGS+=" -DCUDA_CUDART_LIBRARY=/usr/local/cuda-8.0/lib64/libcudart.so"
CMAKE_FLAGS+=" -DCUDA_NVCC_EXECUTABLE=/usr/local/cuda-8.0/bin/nvcc"
CMAKE_FLAGS+=" -DCUDA_SDK_ROOT_DIR=/usr/local/cuda-8.0/"
CMAKE_FLAGS+=" -DCUDA_TOOLKIT_INCLUDE=/usr/local/cuda-8.0/include"
CMAKE_FLAGS+=" -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-8.0/"
# Use AMD APP SDK 3.0
CMAKE_FLAGS+=" -DOPENCL_INCLUDE_DIR=/opt/AMDAPPSDK-3.0/include/"
CMAKE_FLAGS+=" -DOPENCL_LIBRARY=/opt/AMDAPPSDK-3.0/lib/x86_64/libOpenCL.so"
# Generate API docs
CMAKE_FLAGS+=" -DOPENMM_GENERATE_API_DOCS=ON"
# Necessary to find GL headers
CMAKE_FLAGS+=" -DCMAKE_CXX_FLAGS_RELEASE=-I/usr/include/nvidia/"
# Build in subdirectory.
if [ -e build ]; then
rm -rf build
fi
mkdir build
cd build
cmake ../openmm $CMAKE_FLAGS
make -j4 all install
export CFLAGS="$MINIMAL_CFLAGS"
export CXXFLAGS="$MINIMAL_CFLAGS"
export LDFLAGS="$LDPATHFLAGS"
make -j4 PythonInstall C++ApiDocs PythonApiDocs sphinxpdf
# Install.
make install
cd ..
#!/bin/bash
# Packaging script for Linux distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
# CONFIGURE HERE
export PACKAGE_DIR="packaging" # directory to stuff packaged source distribution
export VERSION=$(sed -nr "s/OPENMM_VERSION:STRING=(.*)/\1/p" build/CMakeCache.txt)
export PACKAGE_SUBDIR="OpenMM-${VERSION}-Linux" # directory where distribution will be unpacked
export DISTRO_PREFIX="OpenMM-${VERSION}-Linux" # prefix for source distribution (e.g. ${DISTRIBUTION_NAME}.zip)
# Perform all work in a work directory.
cd work
# Clean up.
rm -rf $PACKAGE_DIR
# Make a directory to contain packaged source distribution
mkdir $PACKAGE_DIR
mkdir $PACKAGE_DIR/$PACKAGE_SUBDIR
for filename in $( cat openmm/devtools/packaging/manifests/binary/manifest.txt ); do
CMD="cp -r install/$filename $PACKAGE_DIR/$PACKAGE_SUBDIR"
echo $CMD
`$CMD`
done
# Add the install.sh script
CMD="cp -r openmm/devtools/packaging/install.sh $PACKAGE_DIR/$PACKAGE_SUBDIR"
echo $CMD
`$CMD`
# Make Python source distribution.
echo "Building Python source distribution..."
pushd .
cd build
make PythonSdist
cd python/dist
tar zxf OpenMM-${VERSION}.tar.gz
mv OpenMM-${VERSION} python
popd
cp -r build/python/dist/python $PACKAGE_DIR/$PACKAGE_SUBDIR
# Create archives.
cd $PACKAGE_DIR
mkdir compressed
tar zcf compressed/${DISTRO_PREFIX}.tgz $PACKAGE_SUBDIR
zip -r compressed/${DISTRO_PREFIX}.zip $PACKAGE_SUBDIR
cd ..
#!/bin/bash
# Prepare for build by ensuring necessary prerequisites are locally installed.
# Set relative workspace path.
export WORKSPACE=`pwd`
# Install miniconda
export VERSION="latest"
export PLATFORM="Linux"
export ARCH="x86_64"
export MINICONDA="Miniconda3-$VERSION-$PLATFORM-$ARCH.sh"
if [ -f miniconda ];
then
echo "miniconda already exists"
else
echo "Downloading miniconda..."
rm -rf Miniconda-* miniconda ~/.condarc
wget --quiet https://repo.continuum.io/miniconda/${MINICONDA}
bash ${MINICONDA} -b -p miniconda
PIP_ARGS="-U"
fi
# Add to path.
export PATH=$WORKSPACE/miniconda/bin:$PATH
# Workaround for missing libgcrypt
yum install -y libgcrypt
# Ensure configuration is up to date.
conda config --add channels omnia
conda install --yes --quiet swig pip doxygen sphinx sphinxcontrib-bibtex sphinxcontrib-lunrsearch sphinxcontrib-autodoc_doxygen lxml cmake
#!/bin/bash
# Build script for Mac OS X distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
# Set relative workspace path.
export WORKSPACE=`pwd`
# Add conda binaries to path.
PATH=$WORKSPACE/miniconda/bin:$PATH
# Set install directory.
INSTALL=`pwd`/install
if [ -e $INSTALL ]; then
rm -rf $INSTALL
fi
CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=$INSTALL"
CMAKE_FLAGS+=" -DCMAKE_C_COMPILER=clang -DCMAKE_CXX_COMPILER=clang++"
CMAKE_FLAGS+=" -DCMAKE_OSX_DEPLOYMENT_TARGET=10.9"
CMAKE_FLAGS+=" -DCMAKE_OSX_SYSROOT=/Applications/Xcode.app/Contents/Developer/Platforms/MacOSX.platform/Developer/SDKs/MacOSX10.11.sdk"
CMAKE_FLAGS+=" -DOPENMM_GENERATE_API_DOCS=ON"
# Build in subdirectory.
if [ -e build ]; then
rm -rf build
fi
mkdir build
cd build
cmake ../openmm $CMAKE_FLAGS
make -j4 all install
make -j4 PythonInstall C++ApiDocs PythonApiDocs sphinxpdf
# Install.
make install
# Return to directory
cd $WORKSPACE
#!/bin/bash
# Packaging script for Mac OS X distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
# CONFIGURE HERE
export PACKAGE_DIR="packaging" # directory to stuff packaged source distribution
export VERSION=$(grep "OPENMM_VERSION:STRING" build/CMakeCache.txt | sed -E "s/OPENMM_VERSION:STRING=(.*)/\1/")
export PACKAGE_SUBDIR="OpenMM-${VERSION}-Mac" # directory where distribution will be unpacked
export DISTRO_PREFIX="OpenMM-${VERSION}-Mac" # prefix for source distribution (e.g. ${DISTRIBUTION_NAME}.zip)
# Clean up.
rm -rf $PACKAGE_DIR
# Make a directory to contain packaged source distribution
mkdir $PACKAGE_DIR
mkdir $PACKAGE_DIR/$PACKAGE_SUBDIR
for filename in $( cat openmm/devtools/packaging/manifests/binary/manifest.txt ); do
CMD="cp -r install/$filename $PACKAGE_DIR/$PACKAGE_SUBDIR"
echo $CMD
`$CMD`
done
# Add the install.sh script
CMD="cp -r openmm/devtools/packaging/install.sh $PACKAGE_DIR/$PACKAGE_SUBDIR"
echo $CMD
`$CMD`
# Make Python source distribution.
echo "Building Python source distribution..."
pushd .
cd build
make PythonSdist
cd python/dist
tar zxf OpenMM-${VERSION}.tar.gz
mv OpenMM-${VERSION} python
popd
cp -r build/python/dist/python $PACKAGE_DIR/$PACKAGE_SUBDIR
# Create archives.
cd $PACKAGE_DIR
mkdir compressed
tar zcf compressed/${DISTRO_PREFIX}.tgz $PACKAGE_SUBDIR
zip -r compressed/${DISTRO_PREFIX}.zip $PACKAGE_SUBDIR
cd ..
#!/bin/tcsh
# Prepare for build by ensuring necessary prerequisites are locally installed.
# Set relative workspace path.
export WORKSPACE=`pwd`
# Install miniconda
export VERSION="latest"
export PLATFORM="MacOSX"
export ARCH="x86_64"
export MINICONDA="Miniconda3-$VERSION-$PLATFORM-$ARCH.sh"
if [ -f $WORKSPACE/miniconda ];
then
echo "miniconda already exists"
else
echo "Downloading miniconda..."
rm -rf $WORKSPACE/Miniconda3-*
wget https://repo.continuum.io/miniconda/${MINICONDA}
bash ${MINICONDA} -b -p $WORKSPACE/miniconda
PIP_ARGS="-U"
fi
# Add to path.
export PATH=$WORKSPACE/miniconda/bin:$PATH
# Ensure configuration is up to date.
conda config --add channels http://conda.binstar.org/omnia
conda install --yes --quiet swig pip doxygen sphinx sphinxcontrib-bibtex sphinxcontrib-lunrsearch sphinxcontrib-autodoc_doxygen lxml cmake
pip install sphinxcontrib-bibtex sphinxcontrib-lunrsearch sphinxcontrib-autodoc_doxygen
#!/bin/bash
# Build script for Linux distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
#
# For Docker build
#
# Fix hbb issues.
# If statements needed because multiple Python versions are built in same docker image.
if [ ! -e /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-redhat-linux ]; then
ln -s /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-CentOS-linux/ /opt/rh/devtoolset-2/root/usr/lib/gcc/x86_64-redhat-linux
fi
if [ ! -e /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-redhat-linux ]; then
ln -s /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-CentOS-linux/ /opt/rh/devtoolset-2/root/usr/include/c++/4.8.2/x86_64-redhat-linux
fi
# Clang paths
export CLANG_PREFIX="/opt/clang"
export PATH=$PATH:$CLANG_PREFIX/bin
# enable devtoolset-2
# will return an error return code because of python 3.x incompatible code, but this error is inconsequential
#source /opt/rh/devtoolset-2/enable || true
export PATH=/opt/rh/devtoolset-2/root/usr/bin${PATH:+:${PATH}}
export MANPATH=/opt/rh/devtoolset-2/root/usr/share/man:$MANPATH
export INFOPATH=/opt/rh/devtoolset-2/root/usr/share/info${INFOPATH:+:${INFOPATH}}
export PCP_DIR=/opt/rh/devtoolset-2/root
# Some perl Ext::MakeMaker versions install things under /usr/lib/perl5
# even though the system otherwise would go to /usr/lib64/perl5.
export PERL5LIB=/opt/rh/devtoolset-2/root//usr/lib64/perl5/vendor_perl/5.8.8/x86_64-linux-thread-multi:/opt/rh/devtoolset-2/root/usr/lib/perl5:/opt/rh/devtoolset-2/root//usr/lib/perl5/vendor_perl/5.8.8${PERL5LIB:+:${PERL5LIB}}
# bz847911 workaround:
# we need to evaluate rpm's installed run-time % { _libdir }, not rpmbuild time
# or else /etc/ld.so.conf.d files?
rpmlibdir=`rpm --eval "%{_libdir}"`
# bz1017604: On 64-bit hosts, we should include also the 32-bit library path.
if [ "$rpmlibdir" != "${rpmlibdir/lib64/}" ]; then
rpmlibdir32=":/opt/rh/devtoolset-2/root${rpmlibdir/lib64/lib}"
fi
export LD_LIBRARY_PATH=/opt/rh/devtoolset-2/root$rpmlibdir$rpmlibdir32${LD_LIBRARY_PATH:+:${LD_LIBRARY_PATH}}
# duplicate python site.py logic for sitepackages
pythonvers=`python -c 'import sys; print(sys.version[:3])'`
export PYTHONPATH=/opt/rh/devtoolset-2/root/usr/lib64/python$pythonvers/site-packages:/opt/rh/devtoolset-2/root/usr/lib/python$pythonvers/site-packages${PYTHONPATH:+:${PYTHONPATH}}
# CFLAGS
export MINIMAL_CFLAGS="-g -O3"
export CFLAGS="$MINIMAL_CFLAGS"
export CXXFLAGS="$MINIMAL_CFLAGS"
export LDFLAGS="$LDPATHFLAGS"
# Set relative workspace path.
export WORKSPACE=`pwd`
# Add conda binaries to path.
PATH=$WORKSPACE/miniconda/bin:$PATH
INSTALL=`pwd`/install
if [ -e $INSTALL ]; then
rm -rf $INSTALL
fi
CMAKE_FLAGS="-DCMAKE_INSTALL_PREFIX=$INSTALL"
# Don't build tests
CMAKE_FLAGS+=" -DBUILD_TESTING=OFF"
# Use clang 3.8.1 inside omnia-build-box docker image
CMAKE_FLAGS+=" -DCMAKE_C_COMPILER=$CLANG_PREFIX/bin/clang -DCMAKE_CXX_COMPILER=$CLANG_PREFIX/bin/clang++"
# Ensure we build a release
CMAKE_FLAGS+=" -DCMAKE_BUILD_TYPE=Release"
# Use NVIDIA CUDA 8.0
CMAKE_FLAGS+=" -DCUDA_CUDART_LIBRARY=/usr/local/cuda-8.0/lib64/libcudart.so"
CMAKE_FLAGS+=" -DCUDA_NVCC_EXECUTABLE=/usr/local/cuda-8.0/bin/nvcc"
CMAKE_FLAGS+=" -DCUDA_SDK_ROOT_DIR=/usr/local/cuda-8.0/"
CMAKE_FLAGS+=" -DCUDA_TOOLKIT_INCLUDE=/usr/local/cuda-8.0/include"
CMAKE_FLAGS+=" -DCUDA_TOOLKIT_ROOT_DIR=/usr/local/cuda-8.0/"
# Use AMD APP SDK 3.0
CMAKE_FLAGS+=" -DOPENCL_INCLUDE_DIR=/opt/AMDAPPSDK-3.0/include/"
CMAKE_FLAGS+=" -DOPENCL_LIBRARY=/opt/AMDAPPSDK-3.0/lib/x86_64/libOpenCL.so"
# Generate API docs
CMAKE_FLAGS+=" -DOPENMM_GENERATE_API_DOCS=ON"
# Necessary to find GL headers
CMAKE_FLAGS+=" -DCMAKE_CXX_FLAGS_RELEASE=-I/usr/include/nvidia/"
# Build in subdirectory.
if [ -e build ]; then
rm -rf build
fi
mkdir build
cd build
cmake ../openmm $CMAKE_FLAGS
make -j4 all install
export CFLAGS="$MINIMAL_CFLAGS"
export CXXFLAGS="$MINIMAL_CFLAGS"
export LDFLAGS="$LDPATHFLAGS"
make -j4 PythonInstall C++ApiDocs PythonApiDocs sphinxpdf
# Install.
make install
cd ..
#!/bin/bash
# Packaging script for Linux distribution, for use in automated packaging.
# Note that this must be run from outside the checked-out openmm/ directory.
# CONFIGURE HERE
export PACKAGE_DIR="packaging" # directory to stuff packaged source distribution
export VERSION=$(sed -nr "s/OPENMM_VERSION:STRING=(.*)/\1/p" build/CMakeCache.txt)
export PACKAGE_SUBDIR="OpenMM-${VERSION}-Source" # directory where distribution will be unpacked
export DISTRO_PREFIX="OpenMM-${VERSION}-Source" # prefix for source distribution (e.g. ${DISTRIBUTION_NAME}.zip)
# Perform all work in a work directory.
cd work
# Clean up.
rm -rf $PACKAGE_DIR
# Make a directory to contain packaged source distribution
mkdir $PACKAGE_DIR
mkdir $PACKAGE_DIR/$PACKAGE_SUBDIR
for filename in $( cat openmm/devtools/packaging/manifests/source/manifest.txt ); do
CMD="cp -r $filename $PACKAGE_DIR/$PACKAGE_SUBDIR"
echo $CMD
`$CMD`
done
# Add the install.sh script
#CMD="cp -r openmm/devtools/packaging/install.sh $PACKAGE_DIR/$PACKAGE_SUBDIR"
#echo $CMD
#`$CMD`
# Make Python source distribution.
echo "Building Python source distribution..."
pushd .
cd build
make PythonSdist
cd python/dist
tar zxf OpenMM-${VERSION}.tar.gz
mv OpenMM-${VERSION} python
popd
cp -r build/python/dist/python $PACKAGE_DIR/$PACKAGE_SUBDIR
# Create archives.
cd $PACKAGE_DIR
mkdir compressed
tar zcf compressed/${DISTRO_PREFIX}.tgz $PACKAGE_SUBDIR
zip -r compressed/${DISTRO_PREFIX}.zip $PACKAGE_SUBDIR
cd ..
#!/bin/bash
# Prepare for build by ensuring necessary prerequisites are locally installed.
# Set relative workspace path.
export WORKSPACE=`pwd`
# Install miniconda
export VERSION="latest"
export PLATFORM="Linux"
export ARCH="x86_64"
export MINICONDA="Miniconda3-$VERSION-$PLATFORM-$ARCH.sh"
if [ -f miniconda ];
then
echo "miniconda already exists"
else
echo "Downloading miniconda..."
rm -rf Miniconda-* miniconda ~/.condarc
wget --quiet https://repo.continuum.io/miniconda/${MINICONDA}
bash ${MINICONDA} -b -p miniconda
PIP_ARGS="-U"
fi
# Add to path.
export PATH=$WORKSPACE/miniconda/bin:$PATH
# Workaround for missing libgcrypt
yum install -y libgcrypt
# Ensure configuration is up to date.
conda config --add channels omnia
conda install --yes --quiet swig pip doxygen sphinx sphinxcontrib-bibtex sphinxcontrib-lunrsearch sphinxcontrib-autodoc_doxygen lxml cmake
Vagrant.configure("2") do |config|
config.vm.box = "gusztavvargadr/windows-10"
config.vm.provision :shell, path: "prepare.ps1"
config.vm.provider :virtualbox do |vb|
vb.customize ["modifyvm", :id, "--memory", "2048"]
vb.customize ["modifyvm", :id, "--cpus", "2"]
vb.customize ["modifyvm", :id, "--ioapic", "on"]
end
end
mkdir build
cd build
set APPSDK=C:\Program Files (x86)\AMD APP SDK\2.9-1
"C:\Program Files\CMake\bin\cmake.exe" .. -G "NMake Makefiles JOM" -DCMAKE_BUILD_TYPE=Release -DOPENMM_GENERATE_API_DOCS=ON ^
-DOPENCL_INCLUDE_DIR="%APPSDK%\include" -DOPENCL_LIBRARY="%APPSDK%\lib\x86_64\OpenCL.lib"
jom
jom PythonInstall
jom C++ApiDocs
jom PythonApiDocs
REM jom sphinxpdf
jom install
jom PythonBdist
cd C:\Users\vagrant
# Install CUDA.
wget https://developer.nvidia.com/compute/cuda/10.1/Prod/network_installers/cuda_10.1.168_win10_network.exe -UseBasicParsing -OutFile cuda_10.1.168_win10_network.exe
.\cuda_10.1.168_win10_network.exe -s nvcc_10.1 cudart_10.1 cufft_10.1 cufft_dev_10.1 nvrtc_10.1 nvrtc_dev_10.1 | Out-Null
# Install AMD APP SDK.
wget https://s3.amazonaws.com/omnia-ci/AMD-APP-SDK-v2.9-1.599.381-GA-Full-windows-64.exe -UseBasicParsing -OutFile AMD-APP-SDK-v2.9-1.599.381-GA-Full-windows-64.exe
.\AMD-APP-SDK-v2.9-1.599.381-GA-Full-windows-64.exe /S /v/qn | Out-Null
# Install Miniconda.
wget https://repo.continuum.io/miniconda/Miniconda3-latest-Windows-x86_64.exe -UseBasicParsing -OutFile Miniconda3-latest-Windows-x86_64.exe
.\Miniconda3-latest-Windows-x86_64.exe /S /D=C:\Miniconda3 | Out-Null
[Environment]::SetEnvironmentVariable("Path", $env:Path + ";C:\Miniconda3;C:\Miniconda3\Scripts;C:\Miniconda3\Library\bin", [EnvironmentVariableTarget]::User)
# Install software with conda.
& "C:\Miniconda3\Scripts\conda.exe" config --add channels omnia --add channels conda-forge
& "C:\Miniconda3\Scripts\conda.exe" install -y jinja2 lxml sphinx sphinxcontrib-autodoc_doxygen sphinxcontrib-lunrsearch conda-build anaconda-client
& "C:\Miniconda3\Scripts\pip.exe" install sphinxcontrib.bibtex
# Install software with choco.
choco install -y doxygen.portable swig cmake doxygen.install vcbuildtools git jom patch
"""
Run test suite through CTest, with some options set for the CI environment.
- Runs with a per-test and overall timeout which can be governed by the
avialable time on the CI system.
- Reruns tests which fail (does not rerun tests which merely timeout).
"""
from __future__ import print_function
import sys
import os.path
import shutil
import time
from glob import glob
from subprocess import call
from argparse import ArgumentParser
from datetime import datetime, timedelta
from xml.etree import ElementTree
def main():
parser = ArgumentParser()
parser.add_argument(
"--start-time",
help="Time at which the overall CI job started (unix timestamp)",
type=int,
default=int(time.time()))
parser.add_argument(
"--job-duration",
help="Overall time budget for the CI job (minutes). Default=30",
default=30.,
type=float)
parser.add_argument(
"--run-percent",
help="Allocate this percent test execution time executing the main "
"test suite. The remaining fraction will be used for re-running "
"failing tests. Default=90",
type=float,
default=90.)
parser.add_argument(
'--timeout',
help="Timeout for individual tests (seconds). Default=180",
type=str,
default='180')
parser.add_argument(
'--in-order',
help='Run the tests in order',
default=False,
action='store_true')
parser.add_argument(
'--parallel',
help='Number of processors to use',
type=int,
default=1)
parser.add_argument(
'--attempts',
help='Number of times failed tests will be re-run',
type=int,
default=1
)
args, raw_args = parser.parse_known_args()
status = execute_tests(args, raw_args)
attempts = 0
if status != 0 and attempts < args.attempts:
status = execute_failed_tests(args, raw_args)
attempts += 1
return status
def execute_tests(options, raw_options):
start_time = datetime.fromtimestamp(options.start_time)
stop_time = start_time + timedelta(minutes=options.job_duration)
# timedelta for the amount of time from now until the CI job runs out
remaining = stop_time - datetime.now()
# tell CTest only to use some fraction of the remaining time for this
# invocation
stop_time = start_time + timedelta(
seconds=(options.run_percent / 100.0) * remaining.seconds)
if os.path.isdir('Testing'):
shutil.rmtree('Testing')
return call(['ctest',
'--output-on-failure',
'--parallel', str(options.parallel),
'-T', 'Test',
'--timeout', options.timeout,
'--stop-time', stop_time.strftime('%H:%M:%S')] + raw_options +
(['--schedule-random'] if options.in_order else []))
def execute_failed_tests(options, raw_options):
matches = glob('Testing/*/Test.xml')
assert len(matches) == 1
root = ElementTree.parse(matches[0])
tests = root.findall('.//Testing/Test')
def failed_without_timeout(test_node):
if test_node.get('Status') == 'failed':
return test_node.find(
'Results/NamedMeasurement[@name="Exit Code"]/Value').text != 'Timeout'
failed_tests = [t.find('Name').text
for t in tests if failed_without_timeout(t)]
print('*'*30)
print('Rerunning failing tests...')
print('*'*30)
start_time = datetime.fromtimestamp(options.start_time)
stop_time = start_time + timedelta(minutes=options.job_duration)
return call(['ctest'] + raw_options + [
'--output-on-failure',
'--parallel', str(options.parallel),
'-R', '|'.join(failed_tests),
'--timeout', options.timeout,
'--stop-time', stop_time.strftime('%H:%M:%S')] +
(['--schedule-random'] if options.in_order else []))
if __name__ == '__main__':
sys.exit(main())
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta http-equiv="REFRESH" content="0;url=api-c++/index.html"></HEAD>
<BODY>
</BODY>
</HTML>
\ No newline at end of file
#
# Build and install API documentation
#
find_package(Doxygen QUIET)
set(OPENMM_GENERATE_API_DOCS OFF CACHE BOOL "Whether to create API documentation using Doxygen")
IF(DOXYGEN_EXECUTABLE)
# Generate C++ API documentation
IF (OPENMM_GENERATE_API_DOCS)
ADD_SUBDIRECTORY(api-c++)
ENDIF (OPENMM_GENERATE_API_DOCS)
# Generate Python API documentation
IF (OPENMM_BUILD_PYTHON_WRAPPERS AND OPENMM_GENERATE_API_DOCS)
ADD_SUBDIRECTORY(api-python)
ENDIF (OPENMM_BUILD_PYTHON_WRAPPERS AND OPENMM_GENERATE_API_DOCS)
ENDIF(DOXYGEN_EXECUTABLE)
#
# Build and install the User Guide and Developer Guide
#
SET(SPHINX_BUILD_DIR "${CMAKE_BINARY_DIR}/sphinx-docs/")
FILE(MAKE_DIRECTORY "${SPHINX_BUILD_DIR}")
FILE(GLOB_RECURSE USER_GUIDE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/usersguide/*)
FILE(GLOB_RECURSE DEVELOPER_GUIDE_FILES ${CMAKE_CURRENT_SOURCE_DIR}/developerguide/*)
IF(WIN32)
SET(SPHINX_MAKE_USERGUIDE_COMMAND "${CMAKE_COMMAND}" -E env "OPENMM_VERSION=${OPENMM_MAJOR_VERSION}.${OPENMM_MINOR_VERSION}" "BUILDDIR=${SPHINX_BUILD_DIR}/userguide" make.bat)
SET(SPHINX_MAKE_DEVELOPERGUIDE_COMMAND "${CMAKE_COMMAND}" -E env "OPENMM_VERSION=${OPENMM_MAJOR_VERSION}.${OPENMM_MINOR_VERSION}" "BUILDDIR=${SPHINX_BUILD_DIR}/developerguide" make.bat)
ELSE(WIN32)
SET(SPHINX_MAKE_USERGUIDE_COMMAND "${CMAKE_MAKE_PROGRAM}" OPENMM_VERSION="${OPENMM_MAJOR_VERSION}.${OPENMM_MINOR_VERSION}" BUILDDIR="${SPHINX_BUILD_DIR}/userguide")
SET(SPHINX_MAKE_DEVELOPERGUIDE_COMMAND "${CMAKE_MAKE_PROGRAM}" OPENMM_VERSION="${OPENMM_MAJOR_VERSION}.${OPENMM_MINOR_VERSION}" BUILDDIR="${SPHINX_BUILD_DIR}/developerguide")
ENDIF(WIN32)
ADD_CUSTOM_COMMAND(
OUTPUT "${SPHINX_BUILD_DIR}/userguide/latex/OpenMMUsersGuide.pdf"
COMMAND ${SPHINX_MAKE_USERGUIDE_COMMAND} latexpdf
DEPENDS ${USER_GUIDE_FILES}
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/usersguide"
COMMENT "Generating PDF user guide"
)
ADD_CUSTOM_COMMAND(
OUTPUT "${SPHINX_BUILD_DIR}/developerguide/latex/OpenMMDeveloperGuide.pdf"
COMMAND ${SPHINX_MAKE_DEVELOPERGUIDE_COMMAND} latexpdf
DEPENDS ${DEVELOPER_GUIDE_FILES}
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/developerguide"
COMMENT "Generating PDF developer guide"
)
ADD_CUSTOM_TARGET(sphinxpdf
DEPENDS "${SPHINX_BUILD_DIR}/userguide/latex/OpenMMUsersGuide.pdf" "${SPHINX_BUILD_DIR}/developerguide/latex/OpenMMDeveloperGuide.pdf"
)
ADD_CUSTOM_COMMAND(
OUTPUT "${SPHINX_BUILD_DIR}/userguide/html/index.html"
COMMAND ${SPHINX_MAKE_USERGUIDE_COMMAND} html
DEPENDS ${USER_GUIDE_FILES}
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/usersguide"
COMMENT "Generating HTML user guide"
)
ADD_CUSTOM_COMMAND(
OUTPUT "${SPHINX_BUILD_DIR}/developerguide/html/index.html"
COMMAND ${SPHINX_MAKE_DEVELOPERGUIDE_COMMAND} html
DEPENDS ${DEVELOPER_GUIDE_FILES}
WORKING_DIRECTORY "${CMAKE_CURRENT_SOURCE_DIR}/developerguide"
COMMENT "Generating HTML developer guide"
)
ADD_CUSTOM_TARGET(sphinxhtml
DEPENDS "${SPHINX_BUILD_DIR}/userguide/html/index.html" "${SPHINX_BUILD_DIR}/developerguide/html/index.html"
)
install(FILES "${SPHINX_BUILD_DIR}/userguide/latex/OpenMMUsersGuide.pdf" "${SPHINX_BUILD_DIR}developerguide/latex/OpenMMDeveloperGuide.pdf"
DESTINATION docs/ OPTIONAL)
FILE(GLOB LICENSE_FILES "licenses/*.txt")
install(FILES ${LICENSE_FILES}
DESTINATION licenses/)
# Doxyfile 1.5.3
#---------------------------------------------------------------------------
# Project related configuration options
#---------------------------------------------------------------------------
DOXYFILE_ENCODING = UTF-8
PROJECT_NAME = @PROJECT_NAME@
PROJECT_NUMBER =
OUTPUT_DIRECTORY =
CREATE_SUBDIRS = NO
OUTPUT_LANGUAGE = English
BRIEF_MEMBER_DESC = YES
REPEAT_BRIEF = YES
ABBREVIATE_BRIEF =
ALWAYS_DETAILED_SEC = NO
INLINE_INHERITED_MEMB = NO
FULL_PATH_NAMES = NO
STRIP_FROM_PATH =
STRIP_FROM_INC_PATH =
SHORT_NAMES = NO
JAVADOC_AUTOBRIEF = YES
QT_AUTOBRIEF = NO
MULTILINE_CPP_IS_BRIEF = NO
DETAILS_AT_TOP = YES
INHERIT_DOCS = YES
SEPARATE_MEMBER_PAGES = NO
TAB_SIZE = 4
ALIASES =
OPTIMIZE_OUTPUT_FOR_C = NO
OPTIMIZE_OUTPUT_JAVA = NO
BUILTIN_STL_SUPPORT = NO
CPP_CLI_SUPPORT = NO
DISTRIBUTE_GROUP_DOC = YES
SUBGROUPING = YES
#---------------------------------------------------------------------------
# Build related configuration options
#---------------------------------------------------------------------------
EXTRACT_ALL = YES
EXTRACT_PRIVATE = NO
EXTRACT_STATIC = NO
EXTRACT_LOCAL_CLASSES = YES
EXTRACT_LOCAL_METHODS = NO
EXTRACT_ANON_NSPACES = NO
HIDE_UNDOC_MEMBERS = NO
HIDE_UNDOC_CLASSES = NO
HIDE_FRIEND_COMPOUNDS = NO
HIDE_IN_BODY_DOCS = YES
INTERNAL_DOCS = NO
CASE_SENSE_NAMES = YES
HIDE_SCOPE_NAMES = YES
SHOW_INCLUDE_FILES = YES
INLINE_INFO = YES
SORT_MEMBER_DOCS = YES
SORT_BRIEF_DOCS = NO
SORT_BY_SCOPE_NAME = NO
GENERATE_TODOLIST = YES
GENERATE_TESTLIST = YES
GENERATE_BUGLIST = YES
GENERATE_DEPRECATEDLIST= YES
ENABLED_SECTIONS =
MAX_INITIALIZER_LINES = 30
SHOW_USED_FILES = YES
SHOW_DIRECTORIES = YES
FILE_VERSION_FILTER =
SHOW_NAMESPACES = NO
SHOW_FILES = NO
SHOW_DIRECTORIES = NO
#---------------------------------------------------------------------------
# configuration options related to warning and progress messages
#---------------------------------------------------------------------------
QUIET = NO
WARNINGS = YES
WARN_IF_UNDOCUMENTED = YES
WARN_IF_DOC_ERROR = YES
WARN_NO_PARAMDOC = NO
WARN_FORMAT = "$file:$line: $text "
WARN_LOGFILE =
#---------------------------------------------------------------------------
# configuration options related to the input files
#---------------------------------------------------------------------------
INPUT = "@CMAKE_SOURCE_DIR@/openmmapi" \
"@CMAKE_SOURCE_DIR@/olla" \
"@CMAKE_SOURCE_DIR@/serialization/include/openmm/serialization/XmlSerializer.h" \
"@CMAKE_SOURCE_DIR@/plugins/drude/openmmapi/include" \
"@CMAKE_SOURCE_DIR@/plugins/rpmd/openmmapi/include" \
"@CMAKE_SOURCE_DIR@/plugins/amoeba/openmmapi/include"
INPUT_ENCODING = UTF-8
FILE_PATTERNS =
RECURSIVE = YES
EXCLUDE =
EXCLUDE_SYMLINKS = NO
EXCLUDE_PATTERNS = */tests/* \
*/openmmapi/src/* \
*/.svn/* \
*/internal/* \
*/olla/include/openmm/kernels.h \
*/DrudeKernels.h \
*/RpmdKernels.h \
*/amoebaKernels.h \
EXCLUDE_SYMBOLS =
EXAMPLE_PATH =
EXAMPLE_PATTERNS =
EXAMPLE_RECURSIVE = NO
IMAGE_PATH =
INPUT_FILTER =
FILTER_PATTERNS =
FILTER_SOURCE_FILES = NO
#---------------------------------------------------------------------------
# configuration options related to source browsing
#---------------------------------------------------------------------------
SOURCE_BROWSER = NO
INLINE_SOURCES = NO
STRIP_CODE_COMMENTS = YES
REFERENCED_BY_RELATION = YES
REFERENCES_RELATION = YES
REFERENCES_LINK_SOURCE = YES
USE_HTAGS = NO
VERBATIM_HEADERS = YES
#---------------------------------------------------------------------------
# configuration options related to the alphabetical class index
#---------------------------------------------------------------------------
ALPHABETICAL_INDEX = NO
COLS_IN_ALPHA_INDEX = 5
IGNORE_PREFIX =
#---------------------------------------------------------------------------
# configuration options related to the HTML output
#---------------------------------------------------------------------------
GENERATE_HTML = YES
HTML_OUTPUT = api-c++
HTML_FILE_EXTENSION = .html
HTML_HEADER =
HTML_FOOTER =
HTML_STYLESHEET =
HTML_ALIGN_MEMBERS = YES
GENERATE_HTMLHELP = NO
HTML_DYNAMIC_SECTIONS = YES
CHM_FILE =
HHC_LOCATION =
GENERATE_CHI = NO
BINARY_TOC = NO
TOC_EXPAND = NO
DISABLE_INDEX = NO
ENUM_VALUES_PER_LINE = 4
GENERATE_TREEVIEW = YES
TREEVIEW_WIDTH = 250
#---------------------------------------------------------------------------
# configuration options related to the LaTeX output
#---------------------------------------------------------------------------
GENERATE_LATEX = NO
LATEX_OUTPUT = latex
LATEX_CMD_NAME = latex
MAKEINDEX_CMD_NAME = makeindex
COMPACT_LATEX = NO
PAPER_TYPE = a4wide
EXTRA_PACKAGES =
LATEX_HEADER =
PDF_HYPERLINKS = NO
USE_PDFLATEX = NO
LATEX_BATCHMODE = NO
LATEX_HIDE_INDICES = NO
#---------------------------------------------------------------------------
# configuration options related to the RTF output
#---------------------------------------------------------------------------
GENERATE_RTF = NO
RTF_OUTPUT = rtf
COMPACT_RTF = NO
RTF_HYPERLINKS = NO
RTF_STYLESHEET_FILE =
RTF_EXTENSIONS_FILE =
#---------------------------------------------------------------------------
# configuration options related to the man page output
#---------------------------------------------------------------------------
GENERATE_MAN = NO
MAN_OUTPUT = man
MAN_EXTENSION = .3
MAN_LINKS = NO
#---------------------------------------------------------------------------
# configuration options related to the XML output
#---------------------------------------------------------------------------
GENERATE_XML = NO
XML_OUTPUT = xml
XML_SCHEMA =
XML_DTD =
XML_PROGRAMLISTING = YES
#---------------------------------------------------------------------------
# configuration options for the AutoGen Definitions output
#---------------------------------------------------------------------------
GENERATE_AUTOGEN_DEF = NO
#---------------------------------------------------------------------------
# configuration options related to the Perl module output
#---------------------------------------------------------------------------
GENERATE_PERLMOD = NO
PERLMOD_LATEX = NO
PERLMOD_PRETTY = YES
PERLMOD_MAKEVAR_PREFIX =
#---------------------------------------------------------------------------
# Configuration options related to the preprocessor
#---------------------------------------------------------------------------
ENABLE_PREPROCESSING = YES
MACRO_EXPANSION = YES
EXPAND_ONLY_PREDEF = YES
SEARCH_INCLUDES = YES
INCLUDE_PATH =
INCLUDE_FILE_PATTERNS =
PREDEFINED =
EXPAND_AS_DEFINED =
SKIP_FUNCTION_MACROS = YES
#---------------------------------------------------------------------------
# Configuration::additions related to external references
#---------------------------------------------------------------------------
TAGFILES =
GENERATE_TAGFILE = "api-c++/@PROJECT_NAME@DoxygenTagfile"
ALLEXTERNALS = NO
EXTERNAL_GROUPS = YES
PERL_PATH = /usr/bin/perl
#---------------------------------------------------------------------------
# Configuration options related to the dot tool
#---------------------------------------------------------------------------
CLASS_DIAGRAMS = YES
MSCGEN_PATH = /Applications/Doxygen.app/Contents/Resources/
HIDE_UNDOC_RELATIONS = YES
HAVE_DOT = NO
CLASS_GRAPH = YES
COLLABORATION_GRAPH = YES
GROUP_GRAPHS = YES
UML_LOOK = NO
TEMPLATE_RELATIONS = YES
INCLUDE_GRAPH = YES
INCLUDED_BY_GRAPH = YES
CALL_GRAPH = NO
CALLER_GRAPH = NO
GRAPHICAL_HIERARCHY = YES
DIRECTORY_GRAPH = YES
DOT_IMAGE_FORMAT = png
DOT_PATH =
DOTFILE_DIRS =
DOT_GRAPH_MAX_NODES = 50
MAX_DOT_GRAPH_DEPTH = 0
DOT_TRANSPARENT = NO
DOT_MULTI_TARGETS = NO
GENERATE_LEGEND = YES
DOT_CLEANUP = YES
#---------------------------------------------------------------------------
# Configuration::additions related to the search engine
#---------------------------------------------------------------------------
SEARCHENGINE = YES
<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.0 Transitional//EN">
<html>
<head>
<meta http-equiv="REFRESH" content="0;url=api-python/index.html"></HEAD>
<BODY>
</BODY>
</HTML>
\ No newline at end of file
set(STAGING_OUTPUT_FILES "") # Will contain all required package files
file(GLOB STAGING_INPUT_FILES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"*.rst"
"*.py"
"_static/logo.png"
"_static/custom.css"
"_templates/navigation.html"
)
set(WRAPPER_DOXYGEN_DIR "${CMAKE_CURRENT_BINARY_DIR}/doxygen")
file(MAKE_DIRECTORY "${WRAPPER_DOXYGEN_DIR}")
configure_file(
${CMAKE_CURRENT_SOURCE_DIR}/Doxyfile.in
${WRAPPER_DOXYGEN_DIR}/Doxyfile
@ONLY
)
add_custom_command(
OUTPUT "${WRAPPER_DOXYGEN_DIR}/xml/index.xml"
COMMAND "${DOXYGEN_EXECUTABLE}"
DEPENDS "${WRAPPER_DOXYGEN_DIR}/Doxyfile"
WORKING_DIRECTORY "${WRAPPER_DOXYGEN_DIR}"
COMMENT "Parsing OpenMM header files with Doxygen..."
)
foreach(INIT_FILE ${STAGING_INPUT_FILES})
set(infile "${CMAKE_CURRENT_SOURCE_DIR}/${INIT_FILE}")
set(outfile "${CMAKE_CURRENT_BINARY_DIR}/${INIT_FILE}")
add_custom_command(
OUTPUT "${outfile}"
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${infile}" "${outfile}"
DEPENDS "${infile}"
COMMENT "CMake-copying file ${infile} to ${outfile}")
set(STAGING_OUTPUT_FILES ${STAGING_OUTPUT_FILES} "${outfile}")
endforeach(INIT_FILE ${STAGING_INPUT_FILES})
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/generated/"
COMMAND "${PYTHON_EXECUTABLE}" "${CMAKE_CURRENT_BINARY_DIR}/breathe-apidoc.py"
"--generate=class"
"--members"
"--force"
"--brief-titles"
"--rename-output"
"--remove-prefix='OpenMM::'"
"--flat-output"
"--public-only"
"--quiet"
"--output-dir=generated"
"${WRAPPER_DOXYGEN_DIR}/xml/"
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/breathe-apidoc.py"
"${WRAPPER_DOXYGEN_DIR}/xml/index.xml"
)
add_custom_command(
OUTPUT "${CMAKE_BINARY_DIR}/api-c++/index.html"
COMMAND "${PYTHON_EXECUTABLE}" -m sphinx . "${CMAKE_BINARY_DIR}/api-c++" -W --keep-going # Promote warnings to errors to catch undocumented classes
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/conf.py"
"${CMAKE_CURRENT_BINARY_DIR}/generated/"
"${CMAKE_CURRENT_BINARY_DIR}/index.rst"
"${CMAKE_CURRENT_BINARY_DIR}/forces.rst"
"${CMAKE_CURRENT_BINARY_DIR}/integrators.rst"
"${CMAKE_CURRENT_BINARY_DIR}/extras.rst"
"${CMAKE_CURRENT_BINARY_DIR}/_static/logo.png"
"${CMAKE_CURRENT_BINARY_DIR}/_static/custom.css"
"${CMAKE_CURRENT_BINARY_DIR}/_templates/navigation.html"
"${WRAPPER_DOXYGEN_DIR}/xml/index.xml"
)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/api-c++/)
add_custom_target(C++ApiDocs
DEPENDS ${CMAKE_BINARY_DIR}/api-c++/index.html)
INSTALL(DIRECTORY "${CMAKE_BINARY_DIR}/api-c++/"
DESTINATION "docs/api-c++/")
INSTALL(FILES "${CMAKE_CURRENT_SOURCE_DIR}/../C++ API Reference.html"
DESTINATION "docs/")
../../api-python/_static/custom.css
\ No newline at end of file
../../api-python/_static/logo.png
\ No newline at end of file
../../api-python/_templates/navigation.html
\ No newline at end of file
# -*- coding: utf-8 -*-
"""
breathe.apidoc
~~~~~~~~~~~~~~
Parses doxygen XML tree looking for C/C++ modules and creates ReST files
appropriately to create code documentation with Sphinx. It also creates a
modules index (See TYPEDICT below.).
This is derived from the "sphinx-autopackage" script, which is:
Copyright 2008 Société des arts technologiques (SAT),
http://www.sat.qc.ca/
:copyright: Originally by Sphinx Team, C++ modifications by Tatsuyuki Ishi
:license: BSD, see LICENSE for details.
"""
from __future__ import print_function
import argparse
import errno
import os
import sys
import xml.etree.ElementTree
from breathe import __version__
# Account for FileNotFoundError in Python 2
# IOError is broader but will hopefully suffice
try:
FileNotFoundError
except NameError:
FileNotFoundError = IOError
# Reference: Doxygen XSD schema file, CompoundKind only
# Only what breathe supports are included
# Translates identifier to English
TYPEDICT = {
"class": "Class",
"interface": "Interface",
"struct": "Struct",
"union": "Union",
"file": "File",
"namespace": "Namespace",
"group": "Group",
}
# Types that accept the :members: option.
MEMBERS_TYPES = ["class", "group", "interface", "namespace", "struct"]
def print_info(msg, args):
if not args.quiet:
print(msg)
def write_file(name, text, args):
"""Write the output file for module/package <name>."""
if args.outflat:
name = os.path.basename(name)
fname = os.path.join(args.destdir, "%s.%s" % (name, args.suffix))
if args.dryrun:
print_info("Would create file %s." % fname, args)
return
if not args.force and os.path.isfile(fname):
print_info("File %s already exists, skipping." % fname, args)
else:
print_info("Creating file %s." % fname, args)
if not os.path.exists(os.path.dirname(fname)):
try:
os.makedirs(os.path.dirname(fname))
except OSError as exc: # Guard against race condition
if exc.errno != errno.EEXIST:
raise
try:
with open(fname, "r") as target:
orig = target.read()
if orig == text:
print_info("File %s up to date, skipping." % fname, args)
return
except FileNotFoundError:
# Don't mind if it isn't there
pass
with open(fname, "w") as target:
target.write(text)
def format_heading(level, text):
"""Create a heading of <level> [1, 2 or 3 supported]."""
underlining = ["=", "-", "~",][
level - 1
] * len(text)
return "%s\n%s\n\n" % (text, underlining)
def format_directive(package_type, package, args):
"""Create the breathe directive and add the options."""
directive = ".. doxygen%s:: %s\n" % (package_type, package)
if args.project:
directive += " :project: %s\n" % args.project
if args.members and package_type in MEMBERS_TYPES:
directive += " :members:\n"
return directive
def create_package_file(package, package_type, package_id, args):
"""Build the text of the file and write the file."""
text = f".. _{package}:\n\n"
if args.brieftitles:
_, _, brief = package.rpartition("::")
text += format_heading(1, f"``{brief}``")
else:
text += format_heading(1, "%s %s" % (TYPEDICT[package_type], package))
text += format_directive(package_type, package, args)
if args.packagenames:
outname = package
else:
outname = package_id
if outname.startswith(args.removeprefix):
outname = outname[len(args.removeprefix) :]
write_file(os.path.join(package_type, outname), text, args)
def create_modules_toc_file(key, value, args):
"""Create the module's index."""
if not os.path.isdir(os.path.join(args.destdir, key)):
return
text = format_heading(1, "%s list" % value)
text += ".. toctree::\n"
text += " :glob:\n\n"
text += " %s/*\n" % key
write_file("%slist" % key, text, args)
def filter_package(refid, kind, args) -> bool:
# Skip over types that weren't requested
if kind not in args.outtypes:
return False
if args.publiconly:
package = xml.etree.ElementTree.parse(
os.path.join(args.rootpath, refid + ".xml")
)
if not package.findall(f'.//compounddef[@prot="public"]'):
return False
return True
def recurse_tree(args):
"""
Look for every file in the directory tree and create the corresponding
ReST files.
"""
index = xml.etree.ElementTree.parse(os.path.join(args.rootpath, "index.xml"))
# Assuming this is a valid Doxygen XML
for compound in index.getroot():
name = compound.findtext("name")
kind = compound.get("kind")
refid = compound.get("refid")
if filter_package(refid, kind, args):
create_package_file(name, kind, refid, args)
class TypeAction(argparse.Action):
def __init__(self, option_strings, dest, **kwargs):
super(TypeAction, self).__init__(option_strings, dest, **kwargs)
self.default = TYPEDICT.keys()
self.metavar = ",".join(TYPEDICT.keys())
def __call__(self, parser, namespace, values, option_string=None):
value_list = values.split(",")
for value in value_list:
if value not in TYPEDICT:
raise ValueError("%s not a valid option" % value)
setattr(namespace, self.dest, value_list)
def main():
"""Parse and check the command line arguments."""
parser = argparse.ArgumentParser(
description="""\
Parse XML created by Doxygen in <rootpath> and create one reST file with
breathe generation directives per definition in the <DESTDIR>.
Note: By default this script will not overwrite already created files.""",
formatter_class=argparse.RawDescriptionHelpFormatter,
)
parser.add_argument(
"-o",
"--output-dir",
action="store",
dest="destdir",
help="Directory to place all output",
required=True,
)
parser.add_argument(
"-f",
"--force",
action="store_true",
dest="force",
help="Overwrite existing files",
)
parser.add_argument(
"-m",
"--members",
action="store_true",
dest="members",
help="Include members for types: %s" % MEMBERS_TYPES,
)
parser.add_argument(
"-n",
"--dry-run",
action="store_true",
dest="dryrun",
help="Run the script without creating files",
)
parser.add_argument(
"-T",
"--no-toc",
action="store_true",
dest="notoc",
help="Don't create a table of contents file",
)
parser.add_argument(
"-s",
"--suffix",
action="store",
dest="suffix",
help="file suffix (default: rst)",
default="rst",
)
parser.add_argument(
"-p",
"--project",
action="store",
dest="project",
help="project to add to generated directives",
)
parser.add_argument(
"-g",
"--generate",
action=TypeAction,
dest="outtypes",
help="types of output to generate, comma-separated list",
)
parser.add_argument(
"-b",
"--brief-titles",
action="store_true",
dest="brieftitles",
help="Use only the last part of the compoundname for the title",
)
parser.add_argument(
"-r",
"--rename-output",
action="store_true",
dest="packagenames",
help="Rename output RST files to match input package names",
)
parser.add_argument(
"--remove-prefix",
action="store",
dest="removeprefix",
help="Remove a prefix from output file names",
)
parser.add_argument(
"-F",
"--flat-output",
action="store_true",
dest="outflat",
help="Place all output directly in output directory, no subdirectories",
)
parser.add_argument(
"-P",
"--public-only",
action="store_true",
dest="publiconly",
help="Only process objects that are marked as @public",
)
parser.add_argument(
"-q",
"--quiet",
action="store_true",
dest="quiet",
help="suppress informational messages",
)
parser.add_argument(
"--version",
action="version",
version="Breathe (breathe-apidoc) %s, modified for OpenMM" % __version__,
)
parser.add_argument("rootpath", type=str, help="The directory contains index.xml")
args = parser.parse_args()
if args.suffix.startswith("."):
args.suffix = args.suffix[1:]
if not os.path.isdir(args.rootpath):
print("%s is not a directory." % args.rootpath, file=sys.stderr)
sys.exit(1)
if "index.xml" not in os.listdir(args.rootpath):
print("%s does not contain a index.xml" % args.rootpath, file=sys.stderr)
sys.exit(1)
if not os.path.isdir(args.destdir):
if not args.dryrun:
os.makedirs(args.destdir)
args.rootpath = os.path.abspath(args.rootpath)
recurse_tree(args)
if not args.notoc:
for key in args.outtypes:
create_modules_toc_file(key, TYPEDICT[key], args)
# So program can be started with "python -m breathe.apidoc ..."
if __name__ == "__main__":
main()
import os
import sys
extensions = ["sphinx.ext.mathjax", "breathe"]
autosummary_generate = False
autodoc_member_order = "bysource"
breathe_projects = {
"api-c++": "doxygen/xml",
}
breathe_default_project = "api-c++"
# Tell sphinx what the primary language being documented is.
primary_domain = "cpp"
# Tell sphinx what the pygments highlight language should be.
highlight_language = "cpp"
source_suffix = ".rst"
master_doc = "index"
project = u"OpenMM C++ API"
copyright = u"2015, Stanford University and the Authors"
version = "@OPENMM_MAJOR_VERSION@.@OPENMM_MINOR_VERSION@"
release = "@OPENMM_MAJOR_VERSION@.@OPENMM_MINOR_VERSION@"
exclude_patterns = ["_build", "_templates"]
html_static_path = ["_static"]
templates_path = ["_templates"]
pygments_style = "sphinx"
html_theme = "alabaster"
html_theme_options = {
"github_button": False,
"github_user": "openmm",
"github_repo": "openmm",
"logo_name": True,
"logo": "logo.png",
"extra_nav_links": [
{
"title": "OpenMM.org",
"uri": "https://openmm.org",
"relative": False,
},
{
"title": "User's Manual",
"uri": "../userguide/",
"relative": True,
},
{
"title": "Developer Guide",
"uri": "../developerguide/",
"relative": True,
},
{
"title": "Python API reference",
"uri": "../api-python/",
"relative": True,
},
{
"title": "Cookbook & Tutorials",
"uri": "https://openmm.github.io/openmm-cookbook/",
"relative": False,
},
{
"title": "GitHub",
"uri": "https://github.com/openmm",
"relative": False,
},
],
"show_relbar_bottom": True,
}
html_sidebars = {
"**": [
"about.html",
"searchbox.html",
"navigation.html",
]
}
doxygen_xml = "doxygen/xml"
=============
Extra classes
=============
Tabulated functions
===================
These classes use table of values to define a mathematical function and can be
used by various :ref:`custom forces <custom-forces>`.
The :ref:`OpenMM::TabulatedFunction` class is an abstract class that the other classes
extend.
.. toctree::
:maxdepth: 2
generated/TabulatedFunction
generated/Continuous1DFunction
generated/Continuous2DFunction
generated/Continuous3DFunction
generated/Discrete1DFunction
generated/Discrete2DFunction
generated/Discrete3DFunction
Virtual Sites
=============
A virtual site is a particle whose position is computed directly from the
positions of other particles. The :ref:`OpenMM::VirtualSite` class is an abstract
class that the other classes extend.
.. toctree::
:maxdepth: 2
generated/VirtualSite
generated/LocalCoordinatesSite
generated/OutOfPlaneSite
generated/ThreeParticleAverageSite
generated/TwoParticleAverageSite
Serialization
=============
These classes are used to serialize other objects, allowing them to be stored on
disk.
.. toctree::
:maxdepth: 2
generated/SerializationNode
generated/SerializationProxy
generated/XmlSerializer
Other classes
=============
These classes don't fit neatly into the other categories, but that is not to say
that they aren't important!
.. toctree::
:maxdepth: 2
generated/LocalEnergyMinimizer
generated/MinimizationReporter
generated/NoseHooverChain
generated/OpenMMException
generated/Vec3
.. _forces:
======
Forces
======
The ``Force`` abstract class
============================
The ``Force`` objects added to a ``System`` define the behavior of the
particles. ``Force`` is an abstract class; subclasses implement specific behaviors. Classes that extend ``Force`` may implement actual physical forces, or any number of processes that either actually apply forces to particles or directly modify their positions or momenta.
.. toctree::
:maxdepth: 2
generated/Force
Common bonded and non-bonded forces
===================================
These classes implement forces that are widely used in biomolecular simulation.
.. toctree::
:maxdepth: 2
generated/CMAPTorsionForce
generated/DrudeForce
generated/GBSAOBCForce
generated/GayBerneForce
generated/HarmonicAngleForce
generated/HarmonicBondForce
generated/NonbondedForce
generated/PeriodicTorsionForce
generated/RBTorsionForce
AMOEBA forces
=============
These forces are used to implement the polarizable AMOEBA force fields.
.. toctree::
:maxdepth: 2
generated/AmoebaGeneralizedKirkwoodForce
generated/AmoebaMultipoleForce
generated/AmoebaTorsionTorsionForce
generated/AmoebaVdwForce
generated/AmoebaWcaDispersionForce
generated/HippoNonbondedForce
Pseudo-forces
=============
These inherit from ``Force``, but do not describe physical forces. They are used
to implement thermostats or barostats, or otherwise modify the simulation from
step to step. They are conceptually closer to modifications to the integrator,
but providing them as a ``Force`` simplifies implementation and allows them to
be combined in arbitrary ways.
.. toctree::
:maxdepth: 2
generated/AndersenThermostat
generated/ATMForce
generated/CMMotionRemover
generated/MonteCarloAnisotropicBarostat
generated/MonteCarloBarostat
generated/MonteCarloFlexibleBarostat
generated/MonteCarloMembraneBarostat
generated/RMSDForce
generated/RPMDMonteCarloBarostat
.. _custom-forces:
Customizing ``Force``
=====================
OpenMM provides a number of classes that make it easier to implement custom
forces for common scenarios. These classes implement constructors that take an
algebraic expression as a string. The class is instantiated (not extended) to
provide a ``Force`` object that efficiently implements the provided
expression.
.. toctree::
:maxdepth: 2
generated/CustomAngleForce
generated/CustomBondForce
generated/CustomCVForce
generated/CustomCentroidBondForce
generated/CustomCompoundBondForce
generated/CustomExternalForce
generated/CustomGBForce
generated/CustomHbondForce
generated/CustomManyParticleForce
generated/CustomNonbondedForce
generated/CustomTorsionForce
==============
OpenMM C++ API
==============
The C++ API provides information about the classes and methods available in OpenMM for C++ developers. OpenMM uses an object-oriented API that makes all its functionality available through a small number of classes.
Core classes
============
.. toctree::
:maxdepth: 1
:hidden:
generated/System
generated/Context
generated/State
generated/Platform
:cpp:class:`OpenMM::System`
---------------------------
A ``System`` specifies generic properties of the molecular system to be
simulated: the number of particles it contains, the mass of each one, the size
of the periodic box, and so on. The interactions between the particles are
specified through a set of :ref:`Force <forces>` objects that are added to the
``System``. Force field specific parameters, such as particle charges, are
stored in these ``Force`` objects, not as direct properties of the ``System``.
:cpp:class:`OpenMM::Context`
----------------------------
A ``Context`` stores all of the state information for a simulation: particle
positions and velocities, as well as arbitrary parameters defined by the
``Forces`` in the System. It is possible to create multiple ``Contexts`` for a
single ``System``, and thus have multiple simulations of that ``System`` in
progress at the same time. ``Context`` does not provide methods for accessing
state variables directly; they must be read via a ``State`` object.
:cpp:class:`OpenMM::State`
--------------------------
A ``State`` object must be constructed before data can be read from a
simulation. State variables are not accessible directly via a ``Context`` in
order to make explicit the precise time that a variable reflects. A ``State``
is created by calling a method on a ``Context`` and stores only the information
requested at invocation.
:cpp:class:`OpenMM::Platform`
-----------------------------
A ``Platform`` is a single implementation of OpenMM at a low level. This allows
the same high level API documented here to be used on all sorts of compute
hardware, from GPUs to supercomputers. A ``Platform`` implements some set of
kernels, which define which operations it supports. Writing a new ``Platform``
allows OpenMM to be ported to new hardware or to be implemented in a new way
without rewriting the entire application.
Forces
======
``Force`` objects define the behavior of the particles in a ``System``. The
``Force`` class is actually slightly more general than its name suggests. A
``Force`` can, indeed, apply forces to particles, but it can also directly
modify particle positions and velocities in arbitrary ways. Some thermostats
and barostats, for example, can be implemented as ``Force`` classes. Examples
of Force subclasses include :cpp:class:`HarmonicBondForce
<OpenMM::HarmonicBondForce>`, :cpp:class:`NonbondedForce
<OpenMM::NonbondedForce>`, and :cpp:class:`MonteCarloBarostat
<OpenMM::MonteCarloBarostat>`.
.. toctree::
:maxdepth: 2
forces
Integrators
===========
An ``Integrator`` implements an algorithm for advancing the simulation through
time. They provide a ``Context`` a means of stepping the simulation forward,
and must be coupled to a ``Context`` to function. Examples of Integrator
subclasses include :cpp:class:`LangevinIntegrator <OpenMM::LangevinIntegrator>`,
:cpp:class:`VerletIntegrator <OpenMM::VerletIntegrator>`, and :cpp:class:`BrownianIntegrator <OpenMM::BrownianIntegrator>`.
.. toctree::
:maxdepth: 2
integrators
Extras
======
OpenMM's public API includes a few more classes that support the above.
.. toctree::
:maxdepth: 2
extras
===========
Integrators
===========
The ``Integrator`` abstract class
=================================
An ``Integrator`` implements an algorithm for advancing the simulation through
time. ``Integrator`` is an abstract class; subclasses implement specific
algorithms.
.. toctree::
:maxdepth: 2
generated/Integrator
General purpose integrators
===========================
These are integrators appropriate for traditional MD and BD simulations.
.. toctree::
:maxdepth: 2
generated/BrownianIntegrator
generated/LangevinIntegrator
generated/LangevinMiddleIntegrator
generated/NoseHooverIntegrator
generated/VariableLangevinIntegrator
generated/VariableVerletIntegrator
generated/VerletIntegrator
Drude integrators
=================
These integrators permit modelling polarization with a Drude particle.
.. toctree::
:maxdepth: 2
generated/DrudeIntegrator
generated/DrudeLangevinIntegrator
generated/DrudeNoseHooverIntegrator
generated/DrudeSCFIntegrator
Ring Polymer Molecular Dynamics integrators
===========================================
The RPMD integrator implements Ring Polymer MD.
.. toctree::
:maxdepth: 2
generated/RPMDIntegrator
Customizing ``Integrator``
==========================
These classes facilitate customisation of the integrator. ``CustomIntegrator``
allows a wide variety of integration algorithms to be implemented efficiently
without writing any low-level code. The integrator is built up as a series of
steps, each defined as an algebraic expression. ``CompoundIntegrator`` allows
different integrators to be combined by making it possible to switch the active
integrator in the middle of a simulation.
.. toctree::
:maxdepth: 2
generated/CustomIntegrator
generated/CompoundIntegrator
set(STAGING_OUTPUT_FILES "") # Will contain all required package files
file(GLOB STAGING_INPUT_FILES RELATIVE "${CMAKE_CURRENT_SOURCE_DIR}"
"app.rst.jinja2"
"library.rst.jinja2"
"index.rst"
"render.py"
"conf.py"
"process-docstring.py"
"_static/logo.png"
"_static/custom.css"
"_templates/class.rst"
"_templates/navigation.html"
)
foreach(INIT_FILE ${STAGING_INPUT_FILES})
set(infile "${CMAKE_CURRENT_SOURCE_DIR}/${INIT_FILE}")
set(outfile "${CMAKE_CURRENT_BINARY_DIR}/${INIT_FILE}")
add_custom_command(
OUTPUT "${outfile}"
COMMAND "${CMAKE_COMMAND}" -E copy_if_different "${infile}" "${outfile}"
DEPENDS "${infile}"
COMMENT "CMake-copying file ${infile} to ${outfile}")
set(STAGING_OUTPUT_FILES ${STAGING_OUTPUT_FILES} "${outfile}")
endforeach(INIT_FILE ${STAGING_INPUT_FILES})
add_custom_command(
OUTPUT "${CMAKE_CURRENT_BINARY_DIR}/app.rst"
"${CMAKE_CURRENT_BINARY_DIR}/library.rst"
COMMAND "${PYTHON_EXECUTABLE}" "${CMAKE_CURRENT_BINARY_DIR}/render.py"
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/app.rst.jinja2"
"${CMAKE_CURRENT_BINARY_DIR}/library.rst.jinja2"
"${CMAKE_CURRENT_BINARY_DIR}/render.py"
)
add_custom_command(
OUTPUT "${CMAKE_BINARY_DIR}/api-python/index.html"
COMMAND "${PYTHON_EXECUTABLE}" -m sphinx . "${CMAKE_BINARY_DIR}/api-python"
WORKING_DIRECTORY "${CMAKE_CURRENT_BINARY_DIR}"
DEPENDS "${CMAKE_CURRENT_BINARY_DIR}/conf.py"
"${CMAKE_CURRENT_BINARY_DIR}/process-docstring.py"
"${CMAKE_CURRENT_BINARY_DIR}/app.rst"
"${CMAKE_CURRENT_BINARY_DIR}/library.rst"
"${CMAKE_CURRENT_BINARY_DIR}/index.rst"
"${CMAKE_CURRENT_BINARY_DIR}/_static/logo.png"
"${CMAKE_CURRENT_BINARY_DIR}/_static/custom.css"
"${CMAKE_CURRENT_BINARY_DIR}/_templates/class.rst"
"${CMAKE_CURRENT_BINARY_DIR}/_templates/navigation.html"
PythonInstall
)
file(MAKE_DIRECTORY ${CMAKE_BINARY_DIR}/api-python/)
add_custom_target(PythonApiDocs DEPENDS ${CMAKE_BINARY_DIR}/api-python/index.html)
INSTALL(DIRECTORY "${CMAKE_BINARY_DIR}/api-python/"
DESTINATION "docs/api-python/")
INSTALL(FILES "${CMAKE_CURRENT_SOURCE_DIR}/../Python API Reference.html"
DESTINATION "docs/")
/* Reasonable defaults */
html {
overflow-x: hidden;
overflow-y: scroll;
text-rendering: optimizeLegibility;
text-size-adjust: 100%;
-moz-osx-font-smoothing: grayscale;
-webkit-font-smoothing: antialiased;
}
/* Fix responsiveness */
body {
overflow-x: hidden;
}
div.body {
min-width: unset;
}
@media screen and (max-width: 870px) {
div.sphinxsidebar p.logo {
display: unset;
}
}
@media screen and (max-width: 875px) {
ul {
margin-left: 30px;
}
div.sphinxsidebar {
width: 100vw;
padding: 0;
}
}
@media screen and (min-width: 871px) and (max-width: 940px) {
div.document {
width: 100vw
}
}
/* When search bar is in nav footer, don't let it stretch too far */
.searchformwrapper {
max-width: 250px;
}
/* Fix next/prev links in footer */
/* Don't just float the whole thing right */
nav#rellinks {
float: unset;
}
nav#rellinks ul {
padding-left: 0;
display: flex;
justify-content: space-between;
flex-wrap: wrap;
}
nav#rellinks li {
line-height: 1.3;
padding: 5px 0px;
}
nav#rellinks li:first-child {
display: block;
text-indent: -17px;
padding-left: 17px;
}
nav#rellinks li + li {
margin-left: auto;
text-align: right;
display: flex;
}
nav#rellinks li + li a {
display: inline-block;
margin-right: 5px;
}
nav#rellinks li + li:before {
content: "";
}
/* Put the title and logo side by side*/
.sphinxsidebarwrapper {
display: flex;
flex-wrap: wrap;
align-items: flex-start;
flex-direction: row-reverse;
justify-content: space-between;
align-content: flex-start;
}
.sphinxsidebar .logo-name {
flex-basis: 140px;
font-size: 20px;
}
.sphinxsidebar p.logo {
flex-basis: 60px;
text-align: right;
display: block;
margin-top: 0 !important;
}
/* Get control over the image */
.sphinxsidebar p.logo a {
height: auto;
display: block;
}
/* Make sure the remaining items use the width of
* the whole navbar and don't get squished together
* by flex.
*/
.sphinxsidebar .logo-name ~ * {
flex-basis: 100%
}
/* Emulate a placeholder rather than a heading for search */
.sphinxsidebar #searchbox form.search input[type="text"] {
background-image: url("data:image/svg+xml;utf8,<svg xmlns='http://www.w3.org/2000/svg' version='1.1' height='50px' width='120px'><text x='10' y='17' fill='gray' font-size='15'>Search...</text></svg>");
background-repeat: no-repeat;
}
.sphinxsidebar #searchbox form.search input[type="text"]:focus {
background-image: none;
}
/* Hide unwanted elements*/
.sphinxsidebarwrapper #searchbox h3, /* Search heading */
.sphinxsidebarwrapper > h3, /* Navigation heading */
.sphinxsidebarwrapper p:empty, /* Empty elements taking up space */
.sphinxsidebar .logo-name + a[href], /* Inexplicable but ugly link */
.sphinxsidebarwrapper hr /* Horizontal rules */
{
display: none;
}
/* Hide logo on tiny screens */
@media screen and (max-width: 280px) {
div.sphinxsidebar p.logo {
display: none;
}
.sphinxsidebar .logo-name {
flex-basis: 100%;
}
}
/* Style TOC in sidebar more clearly */
.sphinxsidebarwrapper li.toctree-l1 {
padding: 0.15em 0;
line-height: 1.4;
}
.sphinxsidebarwrapper a.current,
.sphinxsidebarwrapper a.current:hover {
text-decoration: none;
border-bottom: none;
cursor: text;
}
/* Tweak spacing */
div.sphinxsidebarwrapper #searchbox {
margin-bottom: 0;
}
div.sphinxsidebarwrapper .nav-toctree > ul {
margin: 5px 0;
}
/* Enlarge space between toctrees and external links */
div.sphinxsidebarwrapper .nav-toctree {
margin-top: 15px;
margin-bottom: 15px;
}
div.sphinxsidebarwrapper .extra-nav-links {
margin-bottom: 0;
}
/* Custom body styling */
/* Center captions of figures, examples, etc. */
.body .caption {
text-align: center;
}
.body .toctree-l1 {
font-weight: bold;
}
.body .toctree-l2 {
font-weight: normal;
}
.body .toctree-l3 {
font-size: 0.8em;
}
/* Fix navbar to top */
@media screen and (min-width: 875px) {
.sphinxsidebar {
position: fixed;
height: 100vh;
overflow-y: hidden;
top: 0;
float: unset !important;
margin-left: 0 !important;
}
.sphinxsidebarwrapper {
height: calc(100% - 60px);
overflow-y: auto;
padding-top: 30px !important;
padding-bottom: 30px !important;
/* Hide scrollbar */
-ms-overflow-style: none; /* IE and Edge */
scrollbar-width: none; /* Firefox */
}
/* Hide scrollbar */
.sphinxsidebarwrapper::-webkit-scrollbar {
display: none
}
}
/* Space out Breathe a bit more */
.body dl.cpp {
margin-bottom: 20px;
}
.cpp .sig-name {
font-size: 1em;
}
\ No newline at end of file
{{ objname }}
{{ underline }}
.. currentmodule:: {{ module }}
.. autoclass:: {{ objname }}
{% block methods %}
.. automethod:: __init__
{% if methods %}
.. rubric:: Methods
.. autosummary::
{% for item in methods %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
{% block attributes %}
{% if attributes %}
.. rubric:: Attributes
.. autosummary::
{% for item in attributes %}
~{{ name }}.{{ item }}
{%- endfor %}
{% endif %}
{% endblock %}
<div class="navigation-scrollbox">
<div class="nav-toctree">
{{ toctree(includehidden=theme_sidebar_includehidden, collapse=theme_sidebar_collapse) }}
</div>
{% if theme_extra_nav_links %}
<ul class="extra-nav-links">
{% for link in theme_extra_nav_links %}
<li class="toctree-l1">
<a href="{{ pathto(link.uri, 1) if link.relative else link.uri }}">
{{ link.title }}
</a>
</li>
{% endfor %}
</ul>
{% endif %}
</div>
.. _app :
Application Layer
=================
Loaders and Setup
~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
{% for fileclass in fileclasses %}
~{{ fileclass }}
{% endfor %}
Representation and Manipulation
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
~openmm.app.topology.Topology
~openmm.app.topology.Chain
~openmm.app.topology.Residue
~openmm.app.topology.Atom
~openmm.app.modeller.Modeller
Simulation
~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
~openmm.app.forcefield.ForceField
~openmm.app.simulation.Simulation
Reporting Output
~~~~~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
{% for reporter in reporters %}
~{{ reporter }}
{% endfor %}
Extras
~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
{% for extra in app_extras %}
~{{ extra }}
{% endfor %}
Units
~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
:nosignatures:
{% for unit in units %}
~{{ unit }}
{% endfor %}
# -*- coding: utf-8 -*-
import os
import sys
import openmm.version
extensions = [
"sphinx.ext.mathjax",
"sphinx.ext.ifconfig",
"sphinx.ext.autosummary",
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
"process-docstring",
]
autosummary_generate = True
autodoc_default_options = {
"members": True,
"inherited-members": True,
"member-order": "bysource",
}
source_suffix = ".rst"
master_doc = "index"
project = u"OpenMM Python API"
copyright = u"2015, Stanford University and the Authors"
version = openmm.version.short_version
release = openmm.version.full_version
exclude_patterns = ["_build", "_templates"]
html_static_path = ["_static"]
templates_path = ["_templates"]
pygments_style = "sphinx"
html_theme = "alabaster"
html_theme_options = {
"github_button": False,
"github_user": "openmm",
"github_repo": "openmm",
"logo_name": True,
"logo": "logo.png",
"extra_nav_links": [
{
"title": "OpenMM.org",
"uri": "https://openmm.org",
"relative": False,
},
{
"title": "User's Manual",
"uri": "../userguide/",
"relative": True,
},
{
"title": "Developer Guide",
"uri": "../developerguide/",
"relative": True,
},
{
"title": "C++ API reference",
"uri": "../api-c++/",
"relative": True,
},
{
"title": "Cookbook & Tutorials",
"uri": "https://openmm.github.io/openmm-cookbook/",
"relative": False,
},
{
"title": "GitHub",
"uri": "https://github.com/openmm",
"relative": False,
},
],
"show_relbar_bottom": True,
}
html_sidebars = {
"**": [
"about.html",
"searchbox.html",
"navigation.html",
]
}
# Napoleon settings
napoleon_google_docstring = False
napoleon_numpy_docstring = True
napoleon_include_private_with_doc = False
napoleon_include_special_with_doc = True
napoleon_use_admonition_for_examples = False
napoleon_use_admonition_for_notes = False
napoleon_use_admonition_for_references = False
napoleon_use_ivar = False
napoleon_use_param = True
napoleon_use_rtype = True
.. currentmodule:: openmm.openmm
OpenMM Python API
=================
The Python API provides information about the classes and methods available in OpenMM for Python developers.
OpenMM consists of two parts. First, there is a set of :ref:`libraries <library>` for performing many types of computations needed for molecular simulations: force evaluation, numerical integration, energy minimization, etc.
Second, there is an :ref:`application layer <app>`, a set of Python libraries providing a high level interface for running simulations. This layer is targeted at computational biologists or other people who want to run simulations, and who may or may not be programmers.
See the user guide for more details.
.. toctree::
:maxdepth: 2
app
library
.. _library :
Library Layer
=============
Core Objects
~~~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
:nosignatures:
~openmm.openmm.System
~openmm.openmm.Context
~openmm.openmm.Platform
~openmm.openmm.State
Forces
~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
:nosignatures:
{% for force in forces %}
~{{ force }}
{% endfor %}
Integrators
~~~~~~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
:nosignatures:
{% for integrator in integrators %}
~{{ integrator }}
{% endfor %}
Extras
~~~~~~
.. autosummary::
:toctree: generated/
:template: class.rst
:nosignatures:
{% for extra in library_extras %}
~{{ extra }}
{% endfor %}
import re
def count_leading_whitespace(s):
count = 0
for c in s:
if c.isspace():
count += 1
else:
break
return count
def process_docstring(app, what, name, obj, options, lines):
"""This hook edits the docstrings to replace "<tt><pre>" html tags,
Breathe-style verbatim embed blocks, and deprecated markers with
sphinx directives.
"""
def repl(m):
s = m.group(1)
if not s.startswith(linesep):
s = linesep + s
newline = '.. code-block:: c++' + linesep
return newline + ' ' + s.replace(linesep, linesep + ' ')
def repl2(m):
s = m.group(1)
if not s.startswith(linesep):
s = linesep + s
newline = '|LINEBREAK|.. admonition::|LINEBREAK| Deprecated' + linesep
return newline + ' ' + s.replace(linesep, linesep + ' ')
def repl3(m):
s = m.group(1)
return '*' + s + '*'
def repl4(m):
s = m.group(1)
if s.startswith("embed:rst"):
lines = s.split(linesep)[1:]
# Match behaviour of Breathe
if s.startswith("embed:rst:leading-asterisk"):
lines = [l.replace("*", " ", 1) for l in lines]
elif s.startswith("embed:rst:leading-slashes"):
lines = [l.replace("///", " ", 1) for l in lines]
# Strip leading whitespace to match first line
to_strip = count_leading_whitespace(lines[0])
lines = [l[to_strip:] for l in lines]
return linesep.join(lines)
else:
s = m.group(1)
if not s.startswith(linesep):
s = linesep + s
newline = '.. verbatim::' + linesep
return newline + ' ' + s.replace(linesep, linesep + ' ')
linesep = '|LINEBREAK|'
joined = linesep.join(lines)
joined = re.sub(r'<tt><pre>((|LINEBREAK|)?.*?)</pre></tt>', repl, joined)
joined = re.sub(r'<tt>(.*?)</tt>', repl, joined)
joined = re.sub(r'@deprecated(.*?\|LINEBREAK\|)', repl2, joined, flags=re.IGNORECASE)
joined = re.sub(r'<i>(.*?)</i>', repl3, joined)
joined = re.sub(r'<verbatim>(.*?)</verbatim>', repl4, joined)
lines[:] = [(l if not l.isspace() else '') for l in joined.split(linesep)]
def setup(app):
app.connect('autodoc-process-docstring', process_docstring)
def test():
lines = ['Hello World', '<tt><pre>', 'contents', '</pre></tt>', '', '<tt>contents2</tt>']
linesRef = ['Hello World', '.. code-block:: c++', '', ' contents', '', '', '.. code-block:: c++', '', ' contents2']
process_docstring(None, None, None, None, None, lines)
assert lines == linesRef
if __name__ == '__main__':
test()
"""
The function of this script is to render the Jinja2 templates in the current
directory into input files for sphinx. It introspects the OpenMM Python module
to find all of the classes and formats them for inclusion into the templates.
"""
from os.path import dirname, join, splitext, basename
from glob import glob
import inspect
import jinja2
import openmm
import openmm.app
def fullname(klass):
return klass.__module__ + '.' + klass.__name__
def library_template_variables():
"""Create the data structure available to the Jinja2 renderer when
filling in the templates.
This function extracts all of classes in ``openmm.openmm`` and returns
a dictionary with them grouped into three lists, the integrators, the forces,
and the remainder (library_extras).
A couple core classes are skipped, because they're included manually in the
template.
"""
data = {
'integrators': [],
'forces': [],
'library_extras': [],
'units': [],
}
mm_klasses = inspect.getmembers(openmm, predicate=inspect.isclass)
# gather all Force subclasses
for name, klass in mm_klasses:
if issubclass(klass, openmm.openmm.Force):
data['forces'].append(fullname(klass))
# gather all Integrator subclasses
for _, klass in mm_klasses:
if issubclass(klass, openmm.openmm.Integrator):
data['integrators'].append(fullname(klass))
# gather all extra subclasses in openmm.openmm
# core classes that are already included in library.rst.jinja2
exclude = ['openmm.openmm.Platform', 'openmm.openmm.Context',
'openmm.openmm.System', 'openmm.openmm.State']
exclude.extend(data['forces'])
exclude.extend(data['integrators'])
# these classes are useless and not worth documenting.
exclude.extend([
'openmm.openmm.SwigPyIterator',
'openmm.openmm.OpenMMException'])
for _, klass in mm_klasses:
full = fullname(klass)
if full not in exclude and not klass.__name__[0].islower():
data['library_extras'].append(full)
# gather units related classes
unit_klasses = inspect.getmembers(openmm.unit, predicate=inspect.isclass)
for name, klass in unit_klasses:
data['units'].append(fullname(klass))
return data
def app_template_variables():
"""Create the data structure available to the Jinja2 renderer when
filling in the templates.
This function extracts all of classes in ``openmm.app`` and returns
a dictionary with them grouped into three lists, the reporters, the
classes with the word "File" in the name, and the remainder.
Four classes are skipped (see exclude), because they're included manually
in the template.
"""
data = {
'reporters': [],
'fileclasses': [],
'app_extras': [],
}
app_klasses = inspect.getmembers(openmm.app, predicate=inspect.isclass)
# gather all Reporters
for name, klass in app_klasses:
if name.endswith('Reporter'):
data['reporters'].append(fullname(klass))
# gather all classes with "File" in the name
for name, klass in app_klasses:
if 'File' in name or 'CharmmParameterSet' in name:
data['fileclasses'].append(fullname(klass))
# gather all extra subclasses in openmm.app
exclude = ['openmm.app.topology.Topology',
'openmm.app.topology.Chain',
'openmm.app.topology.Residue',
'openmm.app.topology.Atom',
'openmm.app.modeller.Modeller',
'openmm.app.forcefield.ForceField',
'openmm.app.simulation.Simulation']
exclude.extend(data['reporters'])
exclude.extend(data['fileclasses'])
for _, klass in app_klasses:
full = fullname(klass)
if full not in exclude and not klass.__name__[0].islower():
data['app_extras'].append(full)
return data
def main():
here = dirname(__file__)
templateLoader = jinja2.FileSystemLoader(here)
templateEnv = jinja2.Environment(loader=templateLoader)
data = library_template_variables()
data.update(app_template_variables())
for template_fn in map(basename, glob(join(here, '*.jinja2'))):
output_fn = splitext(template_fn)[0]
print('Rendering %s to %s...' % (template_fn, output_fn))
template = templateEnv.get_template(template_fn)
output_text = template.render(data)
with open(output_fn, 'w') as f:
f.write(output_text)
if __name__ == '__main__':
main()
.. role:: code
.. raw:: html
<style> .code {font-family:monospace;} </style>
<style> .caption {text-align:center;} </style>
.. highlight:: c++
Introduction
############
This guide describes the internal architecture of the OpenMM library. It is
targeted at developers who want to add features to OpenMM, either by modifying
the core library directly or by writing plugins. If you just want to write
applications that use OpenMM, you do not need to read this guide; the User's
Manual tells you everything you need to know. This guide is intended for
people who want to contribute to OpenMM itself.
It is organized as follows:
* Chapter :numref:`the-core-library` describes the architecture of the core OpenMM library. It
discusses how the high level and low level APIs relate to each other, and the
flow of execution between them.
* Chapter :numref:`writing-plugins` describes in detail how to write a plugin. It focuses on the two
most common types of plugins: those which define new Forces, and those which
implement new Platforms.
* Chapter :numref:`the-reference-platform` discusses the architecture of the reference Platform, providing
information relevant to writing reference implementations of new features.
* Chapter :numref:`the-cpu-platform` discusses the architecture of the CPU Platform, providing
information relevant to writing CPU implementations of new features.
* Chapter :numref:`the-opencl-platform` discusses the architecture of the OpenCL Platform, providing
information relevant to writing OpenCL implementations of new features.
* Chapter :numref:`the-cuda-platform` discusses the architecture of the CUDA Platform, providing
information relevant to writing CUDA implementations of new features.
* Chapter :numref:`common-compute` describes the Common Compute framework, which lets you
write a single implementation of a feature that can be used for both OpenCL and CUDA.
This guide assumes you are already familiar with the public API and how to use
OpenMM in applications. If that is not the case, you should first read the
User's Manual and work through some of the example programs. Pay especially
close attention to the “Introduction to the OpenMM Library” chapter, since it
introduces concepts that are important in understanding this guide.
.. role:: code
.. raw:: html
<style> .code {font-family:monospace;} </style>
<style> .caption {text-align:center;} </style>
.. highlight:: c++
.. _the-core-library:
The Core Library
################
OpenMM is based on a layered architecture, as shown in the following diagram:
.. figure:: ../images/ArchitectureLayers.jpg
:align: center
:width: 100%
:autonumber:`Figure,Architecture Layers`\ : OpenMM architecture
The public API layer consists of the classes you access when using OpenMM in an
application: System; Force and its subclasses; Integrator and its subclasses;
and Context. These classes define a public interface but do no computation.
The next layer down consists of “implementation” classes that mirror the public
API classes: ContextImpl, ForceImpl, and a subclass of ForceImpl for each
subclass of Force (HarmonicBondForceImpl, NonbondedForceImpl, etc.). These
objects are created automatically when you create a Context. They store
information related to a particular simulation, and define methods for
performing calculations.
Note that, whereas a Force is logically “part of” a System, a ForceImpl is
logically “part of” a Context. (See :autonumref:`Figure,API Relationships`\ .) If you create many Contexts
for simulating the same System, there is still only one System and only one copy
of each Force in it. But there will be separate ForceImpls for each Context,
and those ForceImpls store information related to their particular Contexts.
.. figure:: ../images/SystemContextRelationships.jpg
:align: center
:autonumber:`Figure,API Relationships`\ : Relationships between public API and implementation layer objects
Also note that there is no “IntegratorImpl” class, because it is not needed.
Integrator is already specific to one Context. Many Contexts can all simulate
the same System, but each of them must have its own Integrator, so information
specific to one simulation can be stored directly in the Integrator.
The next layer down is the OpenMM Low Level API (OLLA). The important classes
in this layer are: Platform; Kernel; KernelImpl and its subclasses; and
KernelFactory. A Kernel is just a reference counted pointer to a KernelImpl;
the real work is done by KernelImpl objects (or more precisely, by instances of
its subclasses). A KernelFactory creates KernelImpl objects, and a Platform
ties together a set of KernelFactories, as well as defining information that
applies generally to performing computations with that Platform.
All of these classes (except Kernel) are abstract. A particular Platform
provides concrete subclasses of all of them. For example, the reference
platform defines a Platform subclass called ReferencePlatform, a KernelFactory
subclass called ReferenceKernelFactory, and a concrete subclass of each abstract
KernelImpl type: ReferenceCalcNonbondedForceKernel extends
CalcNonbondedForceKernel (which in turn extends KernelImpl),
ReferenceIntegrateVerletStepKernel extends IntegrateVerletStepKernel, and so on.
We can understand this better by walking through the entire sequence of events
that takes place when you create a Context. As an example, suppose you create a
System; add a NonbondedForce to it; create a VerletIntegrator; and then create a
Context for them using the reference Platform. Here is what happens.
#. The Context constructor creates a ContextImpl.
#. The ContextImpl calls :code:`createImpl()` on each Force in the System,
which creates an instance of the appropriate ForceImpl subclass.
#. The ContextImpl calls :code:`contextCreated()` on the Platform(), which
in turn calls :code:`setPlatformData()` on the ContextImpl. This allows
Platform-specific information to be stored in a ContextImpl. Every Platform has
its own mechanism for storing particle masses, constraint definitions, particle
positions, and so on. ContextImpl therefore allows the Platform to create an
arbitrary block of data and store it where it can be accessed by that Platform’s
kernels.
#. The ContextImpl calls :code:`createKernel()` on the Platform several
times to get instances of various kernels that it needs:
CalcKineticEnergyKernel, ApplyConstraintsKernel, etc.
#. For each kernel, the Platform looks up which KernelFactory has been
registered for that particular kernel. In this case, it will be a
ReferenceKernelFactory.
#. It calls :code:`createKernelImpl()` on the KernelFactory, which
creates and returns an instance of an appropriate KernelImpl subclass:
ReferenceCalcKineticEnergyKernel, ReferenceApplyConstraintsKernel, etc.
#. The ContextImpl loops over all of its ForceImpls and calls
:code:`initialize()` on each one.
#. Each ForceImpl asks the Platform to create whatever kernels it needs. In
this example, NonbondedForceImpl will request a CalcNonbondedForceKernel, and
get back a ReferenceCalcNonbondedForceKernel.
#. The ContextImpl calls :code:`initialize()` on the Integrator which, like
the other objects, requests kernels from the Platform. In this example,
VerletIntegrator requests an IntegrateVerletStepKernel and gets back a
ReferenceIntegrateVerletStepKernel.
At this point, the Context is fully initialized and ready for doing computation.
Reference implementations of various KernelImpls have been created, but they are
always referenced through abstract superclasses. Similarly, data structures
specific to the reference Platform have been created and stored in the
ContextImpl, but the format and content of these structures is opaque to the
ContextImpl. Whenever it needs to access them (for example, to get or set
particle positions), it does so through a kernel (UpdateStateDataKernel in this
case).
Now suppose that you call :code:`step()` on the VerletIntegrator. Here is
what happens to execute each time step.
#. The VerletIntegrator calls :code:`updateContextState()` on the
ContextImpl. This gives each Force an opportunity to modify the state of the
Context at the start of each time step.
#. The ContextImpl loops over its ForceImpls and calls
:code:`updateContextState()` on each one. In this case, our only ForceImpl is
a NonbondedForceImpl, which returns without doing anything. On the other hand,
if we had an AndersenThermostat in our System, its ForceImpl would invoke a
kernel to modify particle velocities.
#. The VerletIntegrator calls :code:`calcForcesAndEnergy()` on the
ContextImpl to request that the forces be computed.
#. The ContextImpl calls :code:`beginComputation()` on its
CalcForcesAndEnergyKernel. This initializes all the forces to zero and does any
other initialization the Platform requires before forces can be computed. For
example, some Platforms construct their nonbonded neighbor lists at this point.
#. The ContextImpl loops over its ForceImpls and calls
:code:`calcForcesAndEnergy()` on each one. In this case, we have a
NonbondedForceImpl which invokes its CalcNonbondedForceKernel to compute forces.
#. Finally, the ContextImpl calls :code:`finishComputation()` on its
CalcForcesAndEnergyKernel. This does any additional work needed to determine
the final forces, such as summing the values from intermediate buffers.
#. Finally, the VerletIntegrator invokes its IntegrateVerletStepKernel. This
takes the forces, positions, and velocities that are stored in a Platform-
specific format in the ContextImpl, uses them to compute new positions and
velocities, and stores them in the ContextImpl.
.. role:: code
.. raw:: html
<style> .code {font-family:monospace;} </style>
<style> .caption {text-align:center;} </style>
.. highlight:: c++
.. _writing-plugins:
Writing Plugins
###############
A plugin is a dynamic library that adds new features to OpenMM. It is typically
stored in the :code:`lib/plugins` directory inside your OpenMM installation,
and gets loaded along with all other plugins when the user calls
::
Platform::loadPluginsFromDirectory(Platform::getDefaultPluginsDirectory());
It is also possible to load plugins from a different directory, or to load them
individually by calling :code:`Platform::loadPluginLibrary()`\ .
Every plugin must implement two functions that are declared in the
PluginInitializer.h header file:
::
extern "C" void registerPlatforms();
extern "C" void registerKernelFactories();
When a plugin is loaded, these two functions are invoked to register any
Platforms and KernelFactories defined by the plugin. When many plugins are
loaded at once by calling :code:`Platform::loadPluginsFromDirectory()`\ ,
:code:`registerPlatforms()` is first called on all of them, then
:code:`registerKernelFactories()` is called on all of them. This allows one
plugin to define a Platform, and a different plugin to add KernelFactories to
it; the Platform is guaranteed to be registered by the first plugin before the
second plugin tries to add its KernelFactories, regardless of what order the
plugins happen to be loaded in.
Creating New Platforms
**********************
One common type of plugin defines a new Platform. There are four such plugins
that come with OpenMM: one for the Reference platform, one for the CPU Platform,
one for the CUDA Platform, and one for the OpenCL Platform.
To define a new Platform, you must create subclasses of the various abstract
classes in the OpenMM Low Level API: a subclass of Platform, one or more
subclasses of KernelFactory, and a subclass of each KernelImpl. That is easy to
say, but a huge amount of work to actually do. There are many different
algorithms involved in computing forces, enforcing constraints, performing
integration, and so on, all of which together make up a Platform. Of course,
there is no requirement that every Platform must implement every possible
feature. If you do not provide an implementation of a particular kernel, it
simply means your Platform cannot be used for any simulation that requires that
kernel; if a user tries to do so, an exception will be thrown.
Your plugin’s :code:`registerPlatforms()` function should create an instance
of your Platform subclass, then register it by calling
:code:`Platform::registerPlatform()`\ . You also must register the
KernelFactory for each kernel your Platform supports. This can be done in the
:code:`registerKernelFactories()` function, or more simply, directly in the
Platform’s constructor. You can use as many different KernelFactories as you
want for different kernels, but usually it is simplest to use a single
KernelFactory for all of them. The support for multiple KernelFactories exists
primarily to let plugins add new features to existing Platforms, as described in
the next section.
Creating New Forces
*******************
Another common type of plugin defines new Forces and provides implementations of
them for existing Platforms. (Defining new Integrators is not specifically
discussed here, but the process is very similar.) There are two such plugins
that come with OpenMM. They implement the AMOEBA force field and Drude
oscillators, respectively.
As an example, suppose you want to create a new Force subclass called
StringForce that uses the equations of String Theory to compute the interactions
between particles. You want to provide implementations of it for all four
standard platforms: Reference, CPU, CUDA, and OpenCL.
The first thing to realize is that this *cannot* be done with only a plugin
library. Plugins are loaded dynamically at runtime, and they relate to the low
level API; but you must also provide a public API. Users of your class need to
create StringForce objects and call methods on them. That means providing a
header file with the class declaration, and a (non-plugin) library with the
class definition to link their code against. The implementations for particular
Platforms can be in plugins, but the public API class itself cannot. Or to put
it differently, the full “plugin” (from the user’s perspective) consists of
three parts: the library OpenMM loads at runtime (which is what OpenMM considers
to be the “plugin”), a second library for users to link their code against, and
a header file for them to include in their source code.
To define the API, you will need to create the following classes:
#. StringForce. This is the public API for your force, and users will directly
link against the library containing it.
#. StringForceImpl. This is the ForceImpl subclass corresponding to
StringForce. It should be defined in the same library as StringForce, and
StringForce’s :code:`createImpl()` method should create an instance of it.
#. CalcStringForceKernel. This is an abstract class that extends KernelImpl,
and defines the API by which StringForceImpl invokes its kernel. You only need
to provide a header file for it, not an implementation; those will be provided
by Platforms.
Now suppose you are writing the OpenCL implementation of StringForce. Here are
the classes you need to write:
#. OpenCLCalcStringForceKernel. This extends CalcStringForceKernel and provides
implementations of its virtual methods. The code for this class will probably
be very complicated (and if it actually works, worth a Nobel Prize). It may
execute many different GPU kernels and create its own internal data structures.
But those details are entirely internal to your own code. As long as this class
implements the virtual methods of CalcStringForceKernel, you can do anything you
want inside it.
#. OpenCLStringForceKernelFactory. This is a KernelFactory subclass that knows
how to create instances of OpenCLCalcStringForceKernel.
Both of these classes should be packaged into a dynamic library (.so on Linux,
.dylib on Mac, .dll on Windows) that can be loaded as a plugin. This library
must also implement the two functions from PluginInitializer.h.
:code:`registerPlatforms()` will do nothing, since this plugin does not
implement any new Platforms. :code:`registerKernelFactories()` should call
\ :code:`Platform::getPlatformByName("OpenCL")` to get the OpenCL Platform,
then create a new OpenCLStringForceKernelFactory and call
:code:`registerKernelFactory()` on the Platform to register it. If the OpenCL
Platform is not available, you should catch the exception then return without
doing anything. Most likely this means there is no OpenCL runtime on the
computer your code is running on.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
This source diff could not be displayed because it is too large. You can view the blob instead.
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment