diff --git a/.appveyor.yml b/.appveyor.yml
deleted file mode 100644
index c3d9abd67b7..00000000000
--- a/.appveyor.yml
+++ /dev/null
@@ -1,211 +0,0 @@
-branches:
- only:
- - master
-
-environment:
-
- matrix:
-
- # For Python versions available on Appveyor, see
- # http://www.appveyor.com/docs/installed-software#python
- # The list here is complete at the time of writing.
-
- #- PYTHON_VERSION: 2.7
- # PYTHON: "C:\\Miniconda-x64"
- # CATEGORY: "nightly"
-
- #- PYTHON_VERSION: 3.4
- # PYTHON: "C:\\Miniconda34-x64"
- # CATEGORY: "nightly"
-
- #- PYTHON_VERSION: 3.5
- # PYTHON: "C:\\Miniconda35-x64"
- # CATEGORY: "nightly"
-
- #- PYTHON_VERSION: 3.6
- # PYTHON: "C:\\Miniconda36-x64"
- # CATEGORY: "nightly"
-
- - PYTHON_VERSION: 2.7
- PYTHON: "C:\\Miniconda"
- CATEGORY: "nightly"
- EXTRAS: YES
-
- #- PYTHON_VERSION: 3.4
- # PYTHON: "C:\\Miniconda34-x64"
- # CATEGORY: "nightly"
- # EXTRAS: YES
-
- - PYTHON_VERSION: 3.5
- PYTHON: "C:\\Miniconda35"
- CATEGORY: "nightly"
- EXTRAS: YES
-
- - PYTHON_VERSION: 3.6
- PYTHON: "C:\\Miniconda36"
- CATEGORY: "nightly"
- EXTRAS: YES
-
- - PYTHON_VERSION: 3.7
- PYTHON: "C:\\Miniconda37"
- CATEGORY: "nightly"
- # [191115]: disable extras because of installation dependency
- # issues with Miniconda 3.7 on appveyor
- #EXTRAS: YES
-
-
-install:
- - "SET PATH=%PYTHON%;%PYTHON%\\Scripts;%PYTHON%\\Library\\bin;%PATH%"
- - python --version
- #
- # Set standardized ways to invoke conda for the various channels. We
- # are seeing strange issues where conda-forge and cachemeorg are
- # fighting with anaconda over the version of core packages (notably,
- # conda). The following prevents conda-forge and cacheme.org from
- # overriding anaconda.
- #
- - SET CONDA_INSTALL=conda install -q -y
- - "SET ANACONDA=%CONDA_INSTALL% -c anaconda"
- - "SET CONDAFORGE=%CONDA_INSTALL% -c conda-forge --no-update-deps"
- #
- # Determine if we will use Appveyor's Miniconda or install Anaconda
- # (intermittently one or the other suffers from NumPy failing to load the
- # MKL DLL; See #542, #577
- #
- - SET USING_MINICONDA=1
- #
- # Update conda, then force it to NOT update itself again
- #
- # Somehow, the update from anaconda stalls for Python 3.4. So we're not specifying the channel here.
- #
- - conda config --set always_yes yes
- #- conda update -q -y conda
- - conda config --set auto_update_conda false
- #
- # If we are using full Anaconda instead of Appveyor's MiniConda,
- # install it
- #
- - IF NOT DEFINED USING_MINICONDA (conda install anaconda)
- #
- # Create a virtual environment for this build
- #
- #- conda create -n pyomo_test_env python=%PYTHON_VERSION%
- #- activate pyomo_test_env
- #- "SET CONDAENV=%PYTHON%\\envs\\pyomo_test_env"
- - "echo %PATH%"
- #
- - "SET ADDITIONAL_CF_PKGS=setuptools pip coverage sphinx_rtd_theme"
- #
- # Install extra packages (formerly pyomo.extras)
- #
- # If we are using Miniconda, we need to install additional packages
- # that usually come with the full Anaconda distribution
- #
- - SET MINICONDA_EXTRAS=""
- - IF DEFINED USING_MINICONDA (SET MINICONDA_EXTRAS=numpy scipy ipython openpyxl sympy pyodbc pyyaml networkx xlrd pandas matplotlib dill seaborn)
- #
- - "IF DEFINED EXTRAS (SET ADDITIONAL_CF_PKGS=%ADDITIONAL_CF_PKGS% pymysql pyro4 pint pathos %MINICONDA_EXTRAS%)"
- #- "IF DEFINED EXTRAS (%CONDAFORGE% mkl)"
- #
- # Finally, add any solvers we want to the list
- #
- - "SET ADDITIONAL_CF_PKGS=%ADDITIONAL_CF_PKGS% glpk ipopt"
- #
- # ...and install everything from conda-force in one go
- #
- - "%CONDAFORGE% %ADDITIONAL_CF_PKGS%"
- #
- # While we would like to install codecov using conda (for
- # consistency), there are cases (most recently, in Python 3.5) where
- # the installation is not reliable and codecov is not available after
- # being installed.
- #
- - python -m pip install codecov
- #
- # Install GAMS
- #
- - ps: Start-FileDownload 'https://d37drm4t2jghv5.cloudfront.net/distributions/24.8.5/windows/windows_x64_64.exe'
- - windows_x64_64.exe /SP- /VERYSILENT /NORESTART /DIR=.\gams /NOICONS
- - "SET PATH=%cd%\\gams;%PATH%"
- #
- # Clone but don't install pyomo-model-libraries
- #
- - "git clone https://github.com/Pyomo/pyomo-model-libraries.git"
- - "python -m pip install git+https://github.com/PyUtilib/pyutilib"
- - "python setup.py develop"
-
- # Set up python's coverage for covering subprocesses (important to do
- # here because we want coverage of the download scripts below)
- #
- - "SET BUILD_DIR=%cd%"
- - "SET COVERAGE_PROCESS_START=%BUILD_DIR%\\coveragerc"
- - "copy %BUILD_DIR%\\.coveragerc %COVERAGE_PROCESS_START%"
- - "echo data_file=%BUILD_DIR%\\.coverage >> %COVERAGE_PROCESS_START%"
- - python -c "from distutils.sysconfig import get_python_lib; import os; FILE=open(os.path.join(get_python_lib(),'run_coverage_at_startup.pth'), 'w'); FILE.write('import coverage; coverage.process_startup()'); FILE.close()"
-
- # Configure Pyomo to put the configuration directory here (so that it
- # is both writable, and will be cleared between test runs
- - "SET PYOMO_CONFIG_DIR=%BUILD_DIR%\\config"
-
- # Fetch additional solvers
- #
- - "pyomo download-extensions"
-
- # Report relevant package versions
- #
- - "glpsol -v"
- - "ipopt -v"
- - python --version
-
-build: off
-
-
-test_script:
- # Put your test command here.
- # If you don't need to build C extensions on 64-bit Python 3.3 or 3.4,
- # you can remove "build.cmd" from the front of the command, as it's
- # only needed to support those cases.
- # Note that you must use the environment variable %PYTHON% to refer to
- # the interpreter you're using - Appveyor does not do anything special
- # to put the Python evrsion you want to use on PATH.
- #
- # This block of commands enable tracking of coverage for any
- # subprocesses launched by tests
- - "SET BUILD_DIR=%cd%"
- - "SET COVERAGE_PROCESS_START=%BUILD_DIR%\\coveragerc"
- # Configure Pyomo to put the configuration directory here (so that it
- # is both writable, and will be cleared between test runs
- - "SET PYOMO_CONFIG_DIR=%BUILD_DIR%\\config"
-
- # Run Pyomo tests
- - "test.pyomo -v --cat=%CATEGORY% pyomo %BUILD_DIR%\\pyomo-model-libraries"
-
- # Run documentation tests
- #- "nosetests -v --with-doctest --doctest-extension=.rst doc\\OnlineDocs"
-
-
-#after_test:
- # This step builds your wheels.
- # Again, you only need build.cmd if you're building C extensions for
- # 64-bit Python 3.3/3.4. And you need to use %PYTHON% to get the correct
- # interpreter
- #- "build.cmd %PYTHON%\\python.exe setup.py bdist_wheel"
-
-
-#artifacts:
- # bdist_wheel puts your built wheel in the dist directory
- #- path: dist\*
-
-
-on_success:
- # You can use this step to upload your artifacts to a public website.
- # See Appveyor's documentation for more details. Or you can simply
- # access your wheels from the Appveyor "artifacts" tab for your build.
- #
- # Combine coverage reports over all subprocesses
- - "cd %BUILD_DIR%"
- - dir .cov*
- - "coverage combine %BUILD_DIR%"
- # On some appveyor platforms, the codecov script does not appear to be
- # in the PATH. We will directly import the module (installed above)
- - python -m codecov -X gcov
diff --git a/.codecov.yml b/.codecov.yml
index a02f011d94c..39efc7e8fd5 100644
--- a/.codecov.yml
+++ b/.codecov.yml
@@ -1,4 +1,19 @@
coverage:
range: "50...100"
+ status:
+ project:
+ default:
+ # Allow overall coverage to drop to avoid failures due to code
+ # cleanup or CI unavailability/lag
+ threshold: 5%
+ patch:
+ default:
+ # Force patches to be covered at the level of the codebase
+ threshold: 0%
# ci:
# - !ci.appveyor.com
+codecov:
+ notify:
+ # GHA: 18, Travis: 13, Jenkins: 6
+ after_n_builds: 33
+ wait_for_ci: yes
diff --git a/.coin-or/projDesc.xml b/.coin-or/projDesc.xml
index f632bceeca8..5ef39e793b3 100644
--- a/.coin-or/projDesc.xml
+++ b/.coin-or/projDesc.xml
@@ -227,8 +227,8 @@ Carl D. Laird, Chair, Pyomo Management Committee, cdlaird at sandia dot gov
Use explicit overrides to disable use of automated
version reporting.
-->
- 5.6.8
- 5.6.8
+ 5.6.9
+ 5.6.9
diff --git a/.coveragerc b/.coveragerc
index e7d46592c37..34b0503f183 100644
--- a/.coveragerc
+++ b/.coveragerc
@@ -1,9 +1,6 @@
[report]
omit =
- */python?.?/*
- */site-packages/nose/*
- *__init__*
- */setup.py
+ setup.py
*/tests/*
*/tmp/*
@@ -11,3 +8,9 @@ omit =
# "data_file" directive to the end of this file.
[run]
parallel = True
+source =
+ pyomo
+ examples
+omit =
+ # github actions creates a cache directory we don't want measured
+ cache/*
diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md
index 23de77b98af..f6da4169dc5 100644
--- a/.github/PULL_REQUEST_TEMPLATE.md
+++ b/.github/PULL_REQUEST_TEMPLATE.md
@@ -9,7 +9,7 @@
### Legal Acknowledgement
-By contributing to this software project, I agree to the following terms and conditions for my contribution:
+By contributing to this software project, I have read the [contribution guide](https://pyomo.readthedocs.io/en/stable/contribution_guide.html) and agree to the following terms and conditions for my contribution:
1. I agree my contributions are submitted under the BSD license.
2. I represent I am authorized to make the contributions and grant the license. If my employer has rights to intellectual property that includes these contributions, I represent that I have received permission to make contributions and grant the required license on behalf of that employer.
diff --git a/.github/workflows/mac_python_matrix_test.yml b/.github/workflows/mac_python_matrix_test.yml
deleted file mode 100644
index 742e08a5ab4..00000000000
--- a/.github/workflows/mac_python_matrix_test.yml
+++ /dev/null
@@ -1,50 +0,0 @@
-name: continuous-integration/github/pr/osx
-
-on:
- pull_request:
- branches:
- - master
- # Can add additional branches if desired
-
-jobs:
- pyomo-mac-tests:
- name: py${{ matrix.python-version }}
- runs-on: macos-latest
- strategy:
- fail-fast: false
- max-parallel: 5
- matrix:
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8] # All available Python versions
-
- steps:
- - uses: actions/checkout@v1 # Checkout branch(es)
- - name: Set up Python ${{ matrix.python-version }} # Initialize Python version
- uses: actions/setup-python@v1
- with:
- python-version: ${{ matrix.python-version }}
- - name: Install Pyomo dependencies
- run: |
- python -m pip install --upgrade pip
- git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git
- pip install --quiet git+https://github.com/PyUtilib/pyutilib
- python setup.py develop # Install Pyomo
-
- - name: Install Python modules and Pyomo extensions
- run: |
-
- brew update # Install pre-dependencies for pyodbc
- brew install bash gcc
- brew link --overwrite gcc
- brew install pkg-config
- brew install unixodbc
- brew install freetds # Now install Python modules
-
- pip install cython numpy scipy ipython openpyxl sympy pyyaml pyodbc networkx xlrd pandas matplotlib dill seaborn pymysql pyro4 pint pathos
-
- pyomo download-extensions # Get Pyomo extensions
- pyomo build-extensions
- - name: Run nightly, not fragile tests with test.pyomo
- run: |
- pip install nose
- KEY_JOB=1
- test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries # Run nightly, stable tests
diff --git a/.github/workflows/pr_master_test.yml b/.github/workflows/pr_master_test.yml
new file mode 100644
index 00000000000..9adcbe52563
--- /dev/null
+++ b/.github/workflows/pr_master_test.yml
@@ -0,0 +1,411 @@
+name: GitHub CI
+
+on:
+ push:
+ branches:
+ - master
+ pull_request:
+ branches:
+ - master
+
+defaults:
+ run:
+ shell: bash -l {0}
+
+env:
+ PYTHONWARNINGS: ignore::UserWarning
+ PYTHON_BASE_PKGS: >
+ coverage cython dill ipython networkx nose openpyxl pathos
+ pint pymysql pyro4 pyyaml sphinx_rtd_theme sympy xlrd wheel
+ PYTHON_NUMPY_PKGS: >
+ numpy scipy pyodbc pandas matplotlib seaborn
+
+jobs:
+ pyomo-tests:
+ name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.NAME }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest, macos-latest, windows-latest]
+ python: [2.7, 3.5, 3.6, 3.7, 3.8, pypy2, pypy3]
+ mpi: [0]
+ include:
+ - os: ubuntu-latest
+ TARGET: linux
+ PYENV: pip
+
+ - os: macos-latest
+ TARGET: osx
+ PYENV: pip
+
+ - os: windows-latest
+ TARGET: win
+ PYENV: conda
+ PACKAGES: glpk
+
+ - os: ubuntu-latest
+ python: 3.7
+ mpi: 3
+ TARGET: linux
+ PYENV: conda
+ PACKAGES: mpi4py
+ NAME: /mpi
+
+ exclude:
+ - {os: macos-latest, python: pypy2}
+ - {os: macos-latest, python: pypy3}
+ - {os: windows-latest, python: pypy2}
+ - {os: windows-latest, python: pypy3}
+
+
+ steps:
+ - uses: actions/checkout@v2
+
+ # Ideally we would cache the conda downloads; however, each cache is
+ # over 850MB, and with 5 python versions, that would consume 4.2 of
+ # the 5 GB GitHub allows.
+ #
+ #- name: Conda package cache
+ # uses: actions/cache@v1
+ # if: matrix.PYENV == 'conda'
+ # id: conda-cache
+ # with:
+ # path: cache/conda
+ # key: conda-v2-${{runner.os}}-${{matrix.python}}
+
+ - name: Pip package cache
+ uses: actions/cache@v1
+ if: matrix.PYENV == 'pip'
+ id: pip-cache
+ with:
+ path: cache/pip
+ key: pip-v2-${{runner.os}}-${{matrix.python}}
+
+ - name: OS package cache
+ uses: actions/cache@v1
+ id: os-cache
+ with:
+ path: cache/os
+ key: pkg-v2-${{runner.os}}
+
+ - name: TPL package download cache
+ uses: actions/cache@v1
+ id: download-cache
+ with:
+ path: cache/download
+ key: download-v3-${{runner.os}}
+
+ - name: Update OSX
+ if: matrix.TARGET == 'osx'
+ run: |
+ mkdir -p ${GITHUB_WORKSPACE}/cache/os
+ export HOMEBREW_CACHE=${GITHUB_WORKSPACE}/cache/os
+ brew update
+ # Notes:
+ # - install glpk
+ # - pyodbc needs: gcc pkg-config unixodbc freetds
+ for pkg in bash pkg-config unixodbc freetds glpk; do
+ brew list $pkg || brew install $pkg
+ done
+ #brew link --overwrite gcc
+
+ - name: Update Linux
+ if: matrix.TARGET == 'linux'
+ run: |
+ mkdir -p ${GITHUB_WORKSPACE}/cache/os
+ # Notes:
+ # - install glpk
+ # - ipopt needs: libopenblas-dev gfortran liblapack-dev
+ sudo apt-get -o Dir::Cache=${GITHUB_WORKSPACE}/cache/os \
+ install libopenblas-dev gfortran liblapack-dev glpk-utils
+ sudo chmod -R 777 ${GITHUB_WORKSPACE}/cache/os
+
+ - name: Set up Python ${{ matrix.python }}
+ if: matrix.PYENV == 'pip'
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Set up Miniconda Python ${{ matrix.python }}
+ if: matrix.PYENV == 'conda'
+ uses: goanpeca/setup-miniconda@v1
+ with:
+ auto-update-conda: true
+ python-version: ${{ matrix.python }}
+
+ # GitHub actions is very fragile when it comes to setting up various
+ # Python interpreters, expecially the setup-miniconda interface.
+ # Per the setup-miniconda documentation, it is important to always
+ # invoke bash as a login shell ('shell: bash -l {0}') so that the
+ # conda environment is properly activated. However, running within
+ # a login shell appears to foul up the link to python from
+ # setup-python. Further, we have anecdotal evidence that
+ # subprocesses invoked through $(python -c ...) and `python -c ...`
+ # will not pick up the python activated by setup-python on OSX.
+ #
+ # Our solution is to define a PYTHON_EXE environment variable that
+ # can be explicitly called within subprocess calls to reach the
+ # correct interpreter. Note that we must explicitly run in a *non*
+ # login shell to set up the environment variable for the
+ # setup-python environments.
+
+ - name: Install Python Packages (pip)
+ if: matrix.PYENV == 'pip'
+ shell: bash
+ run: |
+ python -m pip install --cache-dir cache/pip --upgrade pip
+ # Note: pandas 1.0.3 causes gams 29.1.0 import to fail in python 3.8
+ pip install --cache-dir cache/pip ${PYTHON_BASE_PKGS} \
+ ${{matrix.PACKAGES}}
+ if [[ ${{matrix.python}} != pypy* ]]; then
+ # NumPy and derivatives either don't build under pypy, or if
+ # they do, the builds take forever.
+ pip install --cache-dir cache/pip ${PYTHON_NUMPY_PKGS}
+ fi
+ pip install --cache-dir cache/pip cplex \
+ || echo "WARNING: CPLEX Community Edition is not available"
+ pip install --cache-dir cache/pip xpress \
+ || echo "WARNING: Xpress Community Edition is not available"
+ python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \
+ % (sys.executable,))'
+
+ - name: Install Python packages (conda)
+ if: matrix.PYENV == 'conda'
+ run: |
+ mkdir -p $GITHUB_WORKSPACE/cache/conda
+ conda config --set always_yes yes
+ conda config --set auto_update_conda false
+ conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda
+ conda info
+ conda config --show-sources
+ conda list --show-channel-urls
+ conda install -q -y -c conda-forge ${PYTHON_BASE_PKGS} \
+ ${PYTHON_NUMPY_PKGS} ${{matrix.PACKAGES}}
+ # Note: CPLEX 12.9 (the last version in conda that supports
+ # Python 2.7) causes a seg fault in the tests.
+ conda install -q -y -c ibmdecisionoptimization cplex=12.10 \
+ || echo "WARNING: CPLEX Community Edition is not available"
+ conda install -q -y -c fico-xpress xpress \
+ || echo "WARNING: Xpress Community Edition is not available"
+ python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \
+ % (sys.executable,))'
+
+ - name: Setup TPL package directories
+ run: |
+ TPL_DIR="${GITHUB_WORKSPACE}/cache/tpl"
+ mkdir -p "$TPL_DIR"
+ DOWNLOAD_DIR="${GITHUB_WORKSPACE}/cache/download"
+ mkdir -p "$DOWNLOAD_DIR"
+ echo "::set-env name=TPL_DIR::$TPL_DIR"
+ echo "::set-env name=DOWNLOAD_DIR::$DOWNLOAD_DIR"
+
+ - name: Install Ipopt
+ run: |
+ IPOPT_DIR=$TPL_DIR/ipopt
+ echo "::add-path::$IPOPT_DIR"
+ mkdir -p $IPOPT_DIR
+ IPOPT_TAR=${DOWNLOAD_DIR}/ipopt.tar.gz
+ if test ! -e $IPOPT_TAR; then
+ echo "...downloading Ipopt"
+ URL=https://github.com/IDAES/idaes-ext/releases/download/2.0.0
+ if test "${{matrix.TARGET}}" == osx; then
+ echo "IDAES Ipopt not available on OSX"
+ exit 0
+ elif test "${{matrix.TARGET}}" == linux; then
+ curl --retry 8 -L $URL/idaes-solvers-ubuntu1804-64.tar.gz \
+ > $IPOPT_TAR
+ else
+ curl --retry 8 -L $URL/idaes-solvers-windows-64.tar.gz \
+ $URL/idaes-lib-windows-64.tar.gz > $IPOPT_TAR
+ fi
+ fi
+ cd $IPOPT_DIR
+ tar -xzi < $IPOPT_TAR
+
+ - name: Install GAMS
+ # We install using Powershell because the GAMS installer hangs
+ # when launched from bash on Windows
+ shell: pwsh
+ run: |
+ $GAMS_DIR="${env:TPL_DIR}/gams"
+ echo "::add-path::$GAMS_DIR"
+ echo "::set-env name=LD_LIBRARY_PATH::${env:LD_LIBRARY_PATH}:$GAMS_DIR"
+ echo "::set-env name=DYLD_LIBRARY_PATH::${env:DYLD_LIBRARY_PATH}:$GAMS_DIR"
+ $INSTALLER="${env:DOWNLOAD_DIR}/gams_install.exe"
+ $URL="https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ $URL = "$URL/windows/windows_x64_64.exe"
+ } elseif ( "${{matrix.TARGET}}" -eq "osx" ) {
+ $URL = "$URL/macosx/osx_x64_64_sfx.exe"
+ } else {
+ $URL = "$URL/linux/linux_x64_64_sfx.exe"
+ }
+ if (-not (Test-Path "$INSTALLER" -PathType Leaf)) {
+ echo "...downloading GAMS"
+ Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER"
+ }
+ echo "...installing GAMS"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "/SP- /NORESTART /VERYSILENT /DIR=$GAMS_DIR /NOICONS" `
+ -Wait
+ } else {
+ chmod 777 $INSTALLER
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "-q -d $GAMS_DIR" -Wait
+ mv $GAMS_DIR/*/* $GAMS_DIR/.
+ }
+
+ - name: Install GAMS Python bindings
+ run: |
+ GAMS_DIR="$TPL_DIR/gams"
+ py_ver=$($PYTHON_EXE -c 'import sys;v="_%s%s" % sys.version_info[:2] \
+ ;print(v if v != "_27" else "")')
+ if test -e $GAMS_DIR/apifiles/Python/api$py_ver; then
+ echo "Installing GAMS Python bindings"
+ pushd $GAMS_DIR/apifiles/Python/api$py_ver
+ $PYTHON_EXE setup.py install
+ popd
+ fi
+
+ - name: Install BARON
+ shell: pwsh
+ run: |
+ $BARON_DIR="${env:TPL_DIR}/baron"
+ echo "::add-path::$BARON_DIR"
+ $URL="https://www.minlp.com/downloads/xecs/baron/current/"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe"
+ $URL += "baron-win64.exe"
+ } elseif ( "${{matrix.TARGET}}" -eq "osx" ) {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip"
+ $URL += "baron-osx64.zip"
+ } else {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip"
+ $URL += "baron-lin64.zip"
+ }
+ if (-not (Test-Path "$INSTALLER" -PathType Leaf)) {
+ echo "...downloading BARON ($URL)"
+ Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER"
+ }
+ echo "...installing BARON"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "/SP- /NORESTART /VERYSILENT /DIR=$BARON_DIR /NOICONS" `
+ -Wait
+ } else {
+ unzip -q $INSTALLER
+ mv baron-* $BARON_DIR
+ }
+
+ - name: Install GJH_ASL_JSON
+ if: matrix.TARGET != 'win'
+ run: |
+ GJH_DIR="$TPL_DIR/gjh"
+ echo "::add-path::${GJH_DIR}"
+ INSTALL_DIR="${DOWNLOAD_DIR}/gjh"
+ if test ! -e "$INSTALL_DIR/bin"; then
+ mkdir -p "$INSTALL_DIR"
+ INSTALLER="$INSTALL_DIR/gjh_asl_json.zip"
+ URL="https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master"
+ curl --retry 8 -L $URL > $INSTALLER
+ cd $INSTALL_DIR
+ unzip -q $INSTALLER
+ cd gjh_asl_json-master/Thirdparty
+ ./get.ASL
+ cd ..
+ make
+ mv bin "$INSTALL_DIR/bin"
+ fi
+ cp -rp "$INSTALL_DIR/bin" "$GJH_DIR"
+
+ - name: Install Pyomo and PyUtilib
+ run: |
+ echo ""
+ echo "Clone Pyomo-model-libraries..."
+ git clone https://github.com/Pyomo/pyomo-model-libraries.git
+ echo ""
+ echo "Install PyUtilib..."
+ echo ""
+ $PYTHON_EXE -m pip install git+https://github.com/PyUtilib/pyutilib
+ echo ""
+ echo "Install Pyomo..."
+ echo ""
+ $PYTHON_EXE setup.py develop
+ echo ""
+ echo "Set custom PYOMO_CONFIG_DIR"
+ echo ""
+ echo "::set-env name=PYOMO_CONFIG_DIR::${GITHUB_WORKSPACE}/config"
+
+ - name: Set up coverage tracking
+ run: |
+ if test "${{matrix.TARGET}}" == win; then
+ COVERAGE_BASE=${GITHUB_WORKSPACE}\\.cover
+ else
+ COVERAGE_BASE=${GITHUB_WORKSPACE}/.cover
+ fi
+ COVERAGE_RC=${COVERAGE_BASE}_rc
+ echo "::set-env name=COVERAGE_RCFILE::$COVERAGE_RC"
+ echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_RC"
+ cp ${GITHUB_WORKSPACE}/.coveragerc ${COVERAGE_RC}
+ echo "data_file=${COVERAGE_BASE}age" >> ${COVERAGE_RC}
+ SITE_PACKAGES=$($PYTHON_EXE -c "from distutils.sysconfig import \
+ get_python_lib; print(get_python_lib())")
+ echo "Python site-packages: $SITE_PACKAGES"
+ echo 'import coverage; coverage.process_startup()' \
+ > ${SITE_PACKAGES}/run_coverage_at_startup.pth
+
+ - name: Download and install extensions
+ run: |
+ echo ""
+ echo "Pyomo download-extensions"
+ echo ""
+ pyomo download-extensions
+ echo ""
+ echo "Pyomo build-extensions"
+ echo ""
+ pyomo build-extensions --parallel 2
+
+ - name: Report pyomo plugin information
+ run: |
+ pyomo help --solvers || exit 1
+ pyomo help --transformations || exit 1
+ pyomo help --writers || exit 1
+
+ - name: Run Pyomo tests
+ if: matrix.mpi == 0
+ run: |
+ test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries
+
+ - name: Run Pyomo MPI tests
+ if: matrix.mpi != 0
+ run: |
+ # Manually invoke the DAT parser so that parse_table_datacmds.py
+ # is fully generated by a single process before invoking MPI
+ python -c "from pyomo.dataportal.parse_datacmds import \
+ parse_data_commands; parse_data_commands(data='')"
+ mpirun -np ${{matrix.mpi}} --oversubscribe nosetests -v \
+ --eval-attr="mpi and (not fragile)" \
+ pyomo `pwd`/pyomo-model-libraries
+
+ - name: Process code coverage report
+ env:
+ CODECOV_NAME: ${{matrix.TARGET}}/${{matrix.python}}${{matrix.NAME}}
+ run: |
+ coverage combine
+ coverage report -i
+ coverage xml -i
+ i=0
+ while : ; do
+ curl --retry 8 -L https://codecov.io/bash -o codecov.sh
+ bash codecov.sh -Z -X gcov -f coverage.xml
+ if test $? == 0; then
+ break
+ elif test $i -ge 4; then
+ exit 1
+ fi
+ DELAY=$(( RANDOM % 30 + 30))
+ echo "Pausing $DELAY seconds before re-attempting upload"
+ sleep $DELAY
+ done
diff --git a/.github/workflows/push_branch_test.yml b/.github/workflows/push_branch_test.yml
new file mode 100644
index 00000000000..3e40bf528b4
--- /dev/null
+++ b/.github/workflows/push_branch_test.yml
@@ -0,0 +1,410 @@
+name: GitHub Branch CI
+
+on:
+ push:
+ branches-ignore:
+ - master
+
+defaults:
+ run:
+ shell: bash -l {0}
+
+env:
+ PYTHONWARNINGS: ignore::UserWarning
+ PYTHON_BASE_PKGS: >
+ coverage cython dill ipython networkx nose openpyxl pathos
+ pint pymysql pyro4 pyyaml sphinx_rtd_theme sympy xlrd wheel
+ PYTHON_NUMPY_PKGS: >
+ numpy scipy pyodbc pandas matplotlib seaborn
+
+jobs:
+ pyomo-tests:
+ name: ${{ matrix.TARGET }}/${{ matrix.python }}${{ matrix.NAME }}
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ python: [3.8]
+ mpi: [0]
+ include:
+ - os: ubuntu-latest
+ TARGET: linux
+ PYENV: pip
+
+ - os: macos-latest
+ python: 2.7
+ TARGET: osx
+ PYENV: pip
+
+ - os: windows-latest
+ python: 3.5
+ TARGET: win
+ PYENV: conda
+ PACKAGES: glpk
+
+ - os: ubuntu-latest
+ python: 3.7
+ mpi: 3
+ TARGET: linux
+ PYENV: conda
+ PACKAGES: mpi4py
+ NAME: /mpi
+
+ exclude:
+ - {os: macos-latest, python: pypy2}
+ - {os: macos-latest, python: pypy3}
+ - {os: windows-latest, python: pypy2}
+ - {os: windows-latest, python: pypy3}
+
+
+ steps:
+ - uses: actions/checkout@v2
+
+ # Ideally we would cache the conda downloads; however, each cache is
+ # over 850MB, and with 5 python versions, that would consume 4.2 of
+ # the 5 GB GitHub allows.
+ #
+ #- name: Conda package cache
+ # uses: actions/cache@v1
+ # if: matrix.PYENV == 'conda'
+ # id: conda-cache
+ # with:
+ # path: cache/conda
+ # key: conda-v2-${{runner.os}}-${{matrix.python}}
+
+ - name: Pip package cache
+ uses: actions/cache@v1
+ if: matrix.PYENV == 'pip'
+ id: pip-cache
+ with:
+ path: cache/pip
+ key: pip-v2-${{runner.os}}-${{matrix.python}}
+
+ - name: OS package cache
+ uses: actions/cache@v1
+ id: os-cache
+ with:
+ path: cache/os
+ key: pkg-v2-${{runner.os}}
+
+ - name: TPL package download cache
+ uses: actions/cache@v1
+ id: download-cache
+ with:
+ path: cache/download
+ key: download-v3-${{runner.os}}
+
+ - name: Update OSX
+ if: matrix.TARGET == 'osx'
+ run: |
+ mkdir -p ${GITHUB_WORKSPACE}/cache/os
+ export HOMEBREW_CACHE=${GITHUB_WORKSPACE}/cache/os
+ brew update
+ # Notes:
+ # - install glpk
+ # - pyodbc needs: gcc pkg-config unixodbc freetds
+ for pkg in bash pkg-config unixodbc freetds glpk; do
+ brew list $pkg || brew install $pkg
+ done
+ #brew link --overwrite gcc
+
+ - name: Update Linux
+ if: matrix.TARGET == 'linux'
+ run: |
+ mkdir -p ${GITHUB_WORKSPACE}/cache/os
+ # Notes:
+ # - install glpk
+ # - ipopt needs: libopenblas-dev gfortran liblapack-dev
+ sudo apt-get -o Dir::Cache=${GITHUB_WORKSPACE}/cache/os \
+ install libopenblas-dev gfortran liblapack-dev glpk-utils
+ sudo chmod -R 777 ${GITHUB_WORKSPACE}/cache/os
+
+ - name: Set up Python ${{ matrix.python }}
+ if: matrix.PYENV == 'pip'
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python }}
+
+ - name: Set up Miniconda Python ${{ matrix.python }}
+ if: matrix.PYENV == 'conda'
+ uses: goanpeca/setup-miniconda@v1
+ with:
+ auto-update-conda: true
+ python-version: ${{ matrix.python }}
+
+ # GitHub actions is very fragile when it comes to setting up various
+ # Python interpreters, expecially the setup-miniconda interface.
+ # Per the setup-miniconda documentation, it is important to always
+ # invoke bash as a login shell ('shell: bash -l {0}') so that the
+ # conda environment is properly activated. However, running within
+ # a login shell appears to foul up the link to python from
+ # setup-python. Further, we have anecdotal evidence that
+ # subprocesses invoked through $(python -c ...) and `python -c ...`
+ # will not pick up the python activated by setup-python on OSX.
+ #
+ # Our solution is to define a PYTHON_EXE environment variable that
+ # can be explicitly called within subprocess calls to reach the
+ # correct interpreter. Note that we must explicitly run in a *non*
+ # login shell to set up the environment variable for the
+ # setup-python environments.
+
+ - name: Install Python Packages (pip)
+ if: matrix.PYENV == 'pip'
+ shell: bash
+ run: |
+ python -m pip install --cache-dir cache/pip --upgrade pip
+ # Note: pandas 1.0.3 causes gams 29.1.0 import to fail in python 3.8
+ pip install --cache-dir cache/pip ${PYTHON_BASE_PKGS} \
+ ${{matrix.PACKAGES}}
+ if [[ ${{matrix.python}} != pypy* ]]; then
+ # NumPy and derivatives either don't build under pypy, or if
+ # they do, the builds take forever.
+ pip install --cache-dir cache/pip ${PYTHON_NUMPY_PKGS}
+ fi
+ pip install --cache-dir cache/pip cplex \
+ || echo "WARNING: CPLEX Community Edition is not available"
+ pip install --cache-dir cache/pip xpress \
+ || echo "WARNING: Xpress Community Edition is not available"
+ python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \
+ % (sys.executable,))'
+
+ - name: Install Python packages (conda)
+ if: matrix.PYENV == 'conda'
+ run: |
+ mkdir -p $GITHUB_WORKSPACE/cache/conda
+ conda config --set always_yes yes
+ conda config --set auto_update_conda false
+ conda config --prepend pkgs_dirs $GITHUB_WORKSPACE/cache/conda
+ conda info
+ conda config --show-sources
+ conda list --show-channel-urls
+ conda install -q -y -c conda-forge ${PYTHON_BASE_PKGS} \
+ ${PYTHON_NUMPY_PKGS} ${{matrix.PACKAGES}}
+ # Note: CPLEX 12.9 (the last version in conda that supports
+ # Python 2.7) causes a seg fault in the tests.
+ conda install -q -y -c ibmdecisionoptimization cplex=12.10 \
+ || echo "WARNING: CPLEX Community Edition is not available"
+ conda install -q -y -c fico-xpress xpress \
+ || echo "WARNING: Xpress Community Edition is not available"
+ python -c 'import sys; print("::set-env name=PYTHON_EXE::%s" \
+ % (sys.executable,))'
+
+ - name: Setup TPL package directories
+ run: |
+ TPL_DIR="${GITHUB_WORKSPACE}/cache/tpl"
+ mkdir -p "$TPL_DIR"
+ DOWNLOAD_DIR="${GITHUB_WORKSPACE}/cache/download"
+ mkdir -p "$DOWNLOAD_DIR"
+ echo "::set-env name=TPL_DIR::$TPL_DIR"
+ echo "::set-env name=DOWNLOAD_DIR::$DOWNLOAD_DIR"
+
+ - name: Install Ipopt
+ run: |
+ IPOPT_DIR=$TPL_DIR/ipopt
+ echo "::add-path::$IPOPT_DIR"
+ mkdir -p $IPOPT_DIR
+ IPOPT_TAR=${DOWNLOAD_DIR}/ipopt.tar.gz
+ if test ! -e $IPOPT_TAR; then
+ echo "...downloading Ipopt"
+ URL=https://github.com/IDAES/idaes-ext/releases/download/2.0.0
+ if test "${{matrix.TARGET}}" == osx; then
+ echo "IDAES Ipopt not available on OSX"
+ exit 0
+ elif test "${{matrix.TARGET}}" == linux; then
+ curl --retry 8 -L $URL/idaes-solvers-ubuntu1804-64.tar.gz \
+ > $IPOPT_TAR
+ else
+ curl --retry 8 -L $URL/idaes-solvers-windows-64.tar.gz \
+ $URL/idaes-lib-windows-64.tar.gz > $IPOPT_TAR
+ fi
+ fi
+ cd $IPOPT_DIR
+ tar -xzi < $IPOPT_TAR
+
+ - name: Install GAMS
+ # We install using Powershell because the GAMS installer hangs
+ # when launched from bash on Windows
+ shell: pwsh
+ run: |
+ $GAMS_DIR="${env:TPL_DIR}/gams"
+ echo "::add-path::$GAMS_DIR"
+ echo "::set-env name=LD_LIBRARY_PATH::${env:LD_LIBRARY_PATH}:$GAMS_DIR"
+ echo "::set-env name=DYLD_LIBRARY_PATH::${env:DYLD_LIBRARY_PATH}:$GAMS_DIR"
+ $INSTALLER="${env:DOWNLOAD_DIR}/gams_install.exe"
+ $URL="https://d37drm4t2jghv5.cloudfront.net/distributions/29.1.0"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ $URL = "$URL/windows/windows_x64_64.exe"
+ } elseif ( "${{matrix.TARGET}}" -eq "osx" ) {
+ $URL = "$URL/macosx/osx_x64_64_sfx.exe"
+ } else {
+ $URL = "$URL/linux/linux_x64_64_sfx.exe"
+ }
+ if (-not (Test-Path "$INSTALLER" -PathType Leaf)) {
+ echo "...downloading GAMS"
+ Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER"
+ }
+ echo "...installing GAMS"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "/SP- /NORESTART /VERYSILENT /DIR=$GAMS_DIR /NOICONS" `
+ -Wait
+ } else {
+ chmod 777 $INSTALLER
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "-q -d $GAMS_DIR" -Wait
+ mv $GAMS_DIR/*/* $GAMS_DIR/.
+ }
+
+ - name: Install GAMS Python bindings
+ run: |
+ GAMS_DIR="$TPL_DIR/gams"
+ py_ver=$($PYTHON_EXE -c 'import sys;v="_%s%s" % sys.version_info[:2] \
+ ;print(v if v != "_27" else "")')
+ if test -e $GAMS_DIR/apifiles/Python/api$py_ver; then
+ echo "Installing GAMS Python bindings"
+ pushd $GAMS_DIR/apifiles/Python/api$py_ver
+ $PYTHON_EXE setup.py install
+ popd
+ fi
+
+ - name: Install BARON
+ shell: pwsh
+ run: |
+ $BARON_DIR="${env:TPL_DIR}/baron"
+ echo "::add-path::$BARON_DIR"
+ $URL="https://www.minlp.com/downloads/xecs/baron/current/"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.exe"
+ $URL += "baron-win64.exe"
+ } elseif ( "${{matrix.TARGET}}" -eq "osx" ) {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip"
+ $URL += "baron-osx64.zip"
+ } else {
+ $INSTALLER = "${env:DOWNLOAD_DIR}/baron_install.zip"
+ $URL += "baron-lin64.zip"
+ }
+ if (-not (Test-Path "$INSTALLER" -PathType Leaf)) {
+ echo "...downloading BARON ($URL)"
+ Invoke-WebRequest -Uri "$URL" -OutFile "$INSTALLER"
+ }
+ echo "...installing BARON"
+ if ( "${{matrix.TARGET}}" -eq "win" ) {
+ Start-Process -FilePath "$INSTALLER" -ArgumentList `
+ "/SP- /NORESTART /VERYSILENT /DIR=$BARON_DIR /NOICONS" `
+ -Wait
+ } else {
+ unzip -q $INSTALLER
+ mv baron-* $BARON_DIR
+ }
+
+ - name: Install GJH_ASL_JSON
+ if: matrix.TARGET != 'win'
+ run: |
+ GJH_DIR="$TPL_DIR/gjh"
+ echo "::add-path::${GJH_DIR}"
+ INSTALL_DIR="${DOWNLOAD_DIR}/gjh"
+ if test ! -e "$INSTALL_DIR/bin"; then
+ mkdir -p "$INSTALL_DIR"
+ INSTALLER="$INSTALL_DIR/gjh_asl_json.zip"
+ URL="https://codeload.github.com/ghackebeil/gjh_asl_json/zip/master"
+ curl --retry 8 -L $URL > $INSTALLER
+ cd $INSTALL_DIR
+ unzip -q $INSTALLER
+ cd gjh_asl_json-master/Thirdparty
+ ./get.ASL
+ cd ..
+ make
+ mv bin "$INSTALL_DIR/bin"
+ fi
+ cp -rp "$INSTALL_DIR/bin" "$GJH_DIR"
+
+ - name: Install Pyomo and PyUtilib
+ run: |
+ echo ""
+ echo "Clone Pyomo-model-libraries..."
+ git clone https://github.com/Pyomo/pyomo-model-libraries.git
+ echo ""
+ echo "Install PyUtilib..."
+ echo ""
+ $PYTHON_EXE -m pip install git+https://github.com/PyUtilib/pyutilib
+ echo ""
+ echo "Install Pyomo..."
+ echo ""
+ $PYTHON_EXE setup.py develop
+ echo ""
+ echo "Set custom PYOMO_CONFIG_DIR"
+ echo ""
+ echo "::set-env name=PYOMO_CONFIG_DIR::${GITHUB_WORKSPACE}/config"
+
+ - name: Set up coverage tracking
+ run: |
+ if test "${{matrix.TARGET}}" == win; then
+ COVERAGE_BASE=${GITHUB_WORKSPACE}\\.cover
+ else
+ COVERAGE_BASE=${GITHUB_WORKSPACE}/.cover
+ fi
+ COVERAGE_RC=${COVERAGE_BASE}_rc
+ echo "::set-env name=COVERAGE_RCFILE::$COVERAGE_RC"
+ echo "::set-env name=COVERAGE_PROCESS_START::$COVERAGE_RC"
+ cp ${GITHUB_WORKSPACE}/.coveragerc ${COVERAGE_RC}
+ echo "data_file=${COVERAGE_BASE}age" >> ${COVERAGE_RC}
+ SITE_PACKAGES=$($PYTHON_EXE -c "from distutils.sysconfig import \
+ get_python_lib; print(get_python_lib())")
+ echo "Python site-packages: $SITE_PACKAGES"
+ echo 'import coverage; coverage.process_startup()' \
+ > ${SITE_PACKAGES}/run_coverage_at_startup.pth
+
+ - name: Download and install extensions
+ run: |
+ echo ""
+ echo "Pyomo download-extensions"
+ echo ""
+ pyomo download-extensions
+ echo ""
+ echo "Pyomo build-extensions"
+ echo ""
+ pyomo build-extensions --parallel 2
+
+ - name: Report pyomo plugin information
+ run: |
+ pyomo help --solvers || exit 1
+ pyomo help --transformations || exit 1
+ pyomo help --writers || exit 1
+
+ - name: Run Pyomo tests
+ if: matrix.mpi == 0
+ run: |
+ test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries
+
+ - name: Run Pyomo MPI tests
+ if: matrix.mpi != 0
+ run: |
+ # Manually invoke the DAT parser so that parse_table_datacmds.py
+ # is fully generated by a single process before invoking MPI
+ python -c "from pyomo.dataportal.parse_datacmds import \
+ parse_data_commands; parse_data_commands(data='')"
+ mpirun -np ${{matrix.mpi}} --oversubscribe nosetests -v \
+ --eval-attr="mpi and (not fragile)" \
+ pyomo `pwd`/pyomo-model-libraries
+
+ - name: Process code coverage report
+ env:
+ CODECOV_NAME: ${{matrix.TARGET}}/${{matrix.python}}${{matrix.NAME}}
+ run: |
+ coverage combine
+ coverage report -i
+ coverage xml -i
+ i=0
+ while : ; do
+ curl --retry 8 -L https://codecov.io/bash -o codecov.sh
+ bash codecov.sh -Z -X gcov -f coverage.xml
+ if test $? == 0; then
+ break
+ elif test $i -ge 4; then
+ exit 1
+ fi
+ DELAY=$(( RANDOM % 30 + 30))
+ echo "Pausing $DELAY seconds before re-attempting upload"
+ sleep $DELAY
+ done
diff --git a/.github/workflows/release_wheel_creation.yml b/.github/workflows/release_wheel_creation.yml
new file mode 100644
index 00000000000..f20d306dcc4
--- /dev/null
+++ b/.github/workflows/release_wheel_creation.yml
@@ -0,0 +1,110 @@
+name: Pyomo Release Distribution Creation
+
+on:
+ push:
+ tags:
+ - '*'
+
+jobs:
+ manylinux:
+ name: ${{ matrix.TARGET }}/wheel_creation
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [ubuntu-latest]
+ include:
+ - os: ubuntu-latest
+ TARGET: manylinux
+ python-version: [3.7]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install twine wheel setuptools
+ - name: Build manylinux Python wheels
+ uses: RalfG/python-wheels-manylinux-build@v0.2.2-manylinux2010_x86_64
+ with:
+ python-versions: 'cp27-cp27mu cp35-cp35m cp36-cp36m cp37-cp37m cp38-cp38'
+ build-requirements: 'cython'
+ package-path: ''
+ pip-wheel-args: ''
+ # When locally testing, --no-deps flag is necessary (PyUtilib dependency will trigger an error otherwise)
+ - name: Delete linux wheels
+ run: |
+ sudo rm -rf wheelhouse/*-linux_x86_64.whl
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: manylinux-wheels
+ path: wheelhouse
+ osx:
+ name: ${{ matrix.TARGET }}py${{ matrix.python-version }}/wheel_creation
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [macos-latest]
+ include:
+ - os: macos-latest
+ TARGET: osx
+ python-version: [ 2.7, 3.5, 3.6, 3.7, 3.8 ]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ run: |
+ python -m pip install --upgrade pip
+ pip install twine wheel setuptools cython
+ - name: Build OSX Python wheels
+ run: |
+ python setup.py --with-cython sdist --format=gztar bdist_wheel
+
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: osx-wheels
+ path: dist
+
+
+ windows:
+ name: ${{ matrix.TARGET }}py${{ matrix.python-version }}/wheel_creation
+ runs-on: ${{ matrix.os }}
+ strategy:
+ fail-fast: false
+ matrix:
+ os: [windows-latest]
+ include:
+ - os: windows-latest
+ TARGET: win
+ python-version: [ 3.6, 3.7, 3.8 ]
+ steps:
+ - uses: actions/checkout@v2
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v1
+ with:
+ python-version: ${{ matrix.python-version }}
+ - name: Install dependencies
+ shell: pwsh
+ run: |
+ $env:PYTHONWARNINGS="ignore::UserWarning"
+ Invoke-Expression "python -m pip install --upgrade pip"
+ Invoke-Expression "pip install setuptools twine wheel cython"
+ - name: Build Windows Python wheels
+ shell: pwsh
+ run: |
+ $env:PYTHONWARNINGS="ignore::UserWarning"
+ Invoke-Expression "python setup.py --with-cython sdist --format=gztar bdist_wheel"
+ - name: Upload artifact
+ uses: actions/upload-artifact@v1
+ with:
+ name: win-wheels
+ path: dist
diff --git a/.github/workflows/ubuntu_python_matrix_test.yml b/.github/workflows/ubuntu_python_matrix_test.yml
deleted file mode 100644
index cc6dec8a49b..00000000000
--- a/.github/workflows/ubuntu_python_matrix_test.yml
+++ /dev/null
@@ -1,55 +0,0 @@
-name: continuous-integration/github/pr/linux
-
-on:
- pull_request:
- branches:
- - master
-
-jobs:
- pyomo-linux-tests:
- name: py${{ matrix.python-version }}
- runs-on: ubuntu-18.04
- strategy:
- fail-fast: false
- max-parallel: 5
- matrix:
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
- steps:
- - uses: actions/checkout@v1
- - name: Set up Python ${{ matrix.python-version }}
- uses: actions/setup-python@v1
- with:
- python-version: ${{ matrix.python-version }}
- - name: Install dependencies
- run: |
- export PYTHONWARNINGS="ignore::UserWarning"
- echo "Upgrade pip..."
- python -m pip install --upgrade pip
- echo "Install extras..."
- pip install numpy scipy ipython openpyxl sympy pyodbc pyyaml networkx xlrd matplotlib dill
- pip install pandas # Pandas needs to be installed after its dependencies to work correctly for Python 2.7
- pip install seaborn pymysql pyro4 pint pathos
- echo "Installing GAMS..."
- wget -q https://d37drm4t2jghv5.cloudfront.net/distributions/24.8.5/linux/linux_x64_64_sfx.exe
- chmod +x linux_x64_64_sfx.exe
- ./linux_x64_64_sfx.exe -q -d gams
- PATH=$PATH:/gams/gams24.3_linux_x64_64_sfx
- echo "Cloning Pyomo-model-libraries..."
- git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git
- echo "Installing PyUtilib..."
- pip install --quiet git+https://github.com/PyUtilib/pyutilib
- echo "Installing Pyomo..."
- python setup.py develop
- - name: Install extensions
- run: |
- export PYTHONWARNINGS="ignore::UserWarning"
- echo "Download and install extensions..."
- pyomo download-extensions
- pyomo build-extensions
- - name: Run nightly tests with test.pyomo
- run: |
- export PYTHONWARNINGS="ignore::UserWarning"
- echo "Run test.pyomo..."
- pip install nose
- KEY_JOB=1
- test.pyomo -v --cat="nightly" pyomo `pwd`/pyomo-model-libraries
\ No newline at end of file
diff --git a/.github/workflows/win_python_matrix_test.yml b/.github/workflows/win_python_matrix_test.yml
deleted file mode 100644
index 1ef54d13314..00000000000
--- a/.github/workflows/win_python_matrix_test.yml
+++ /dev/null
@@ -1,85 +0,0 @@
-name: continuous-integration/github/pr/win
-
-on:
- pull_request:
- branches:
- - master
-
-jobs:
- pyomo-tests:
- name: py${{ matrix.python-version }}
- runs-on: ${{ matrix.os }}
- strategy:
- fail-fast: false # This flag causes all of the matrix to continue to run, even if one matrix option fails
- max-parallel: 5
- matrix:
- os: ['windows-latest']
- python-version: [2.7, 3.5, 3.6, 3.7, 3.8]
- steps:
- - uses: actions/checkout@v1
- - name: Set up Python ${{ matrix.python-version }} with Miniconda
- uses: goanpeca/setup-miniconda@v1 # Using an action created by user goanpeca to set up different Python Miniconda environments
- with:
- auto-update-conda: true
- python-version: ${{ matrix.python-version }}
- - name: Install dependencies
- shell: pwsh
- run: |
- $env:PYTHONWARNINGS="ignore::UserWarning"
- Write-Host ("Current Enviroment variables: ")
- gci env: | Sort Name
- Write-Host ("")
- Write-Host ("Setting Conda Env Vars... ")
- Write-Host ("")
- $env:CONDA_INSTALL = "conda install -q -y "
- $env:ANACONDA = $env:CONDA_INSTALL + " -c anaconda "
- $env:CONDAFORGE = $env:CONDA_INSTALL + " -c conda-forge --no-update-deps "
- $env:USING_MINICONDA = 1
- $env:ADDITIONAL_CF_PKGS="setuptools pip coverage sphinx_rtd_theme "
- $env:MINICONDA_EXTRAS=""
- $env:MINICONDA_EXTRAS="numpy scipy ipython openpyxl sympy pyodbc pyyaml networkx xlrd pandas matplotlib dill seaborn "
- $env:ADDITIONAL_CF_PKGS=$env:ADDITIONAL_CF_PKGS + "pymysql pyro4 pint pathos " + $env:MINICONDA_EXTRAS
- $env:ADDITIONAL_CF_PKGS=$env:ADDITIONAL_CF_PKGS + " glpk ipopt"
- $env:EXP = $env:CONDAFORGE + $env:ADDITIONAL_CF_PKGS
- Invoke-Expression $env:EXP
- Write-Host ("Installing GAMS")
- Invoke-WebRequest -Uri 'https://d37drm4t2jghv5.cloudfront.net/distributions/24.8.5/windows/windows_x64_64.exe' -OutFile 'windows_x64_64.exe'
- Start-Process -FilePath 'windows_x64_64.exe' -ArgumentList '/SP- /VERYSILENT /NORESTART /DIR=.\gams /NOICONS'
- $env:PATH += $(Get-Location).Path + "\gams"
- Write-Host ("New Shell Environment: ")
- gci env: | Sort Name
- Write-Host ("")
- Write-Host ("Update conda, then force it to NOT update itself again...")
- Write-Host ("")
- Invoke-Expression "conda config --set always_yes yes"
- Invoke-Expression "conda config --set auto_update_conda false"
- Write-Host ("")
- Write-Host ("Clone model library and install PyUtilib...")
- Write-Host ("")
- git clone --quiet https://github.com/Pyomo/pyomo-model-libraries.git
- git clone --quiet https://github.com/PyUtilib/pyutilib.git
- cd pyutilib
- python setup.py develop
- cd ..
- Write-Host ("")
- Write-Host ("Install Pyomo...")
- Write-Host ("")
- python setup.py develop
- - name: Install extensions
- shell: pwsh
- run: |
- $env:PYTHONWARNINGS="ignore::UserWarning"
- Write-Host "Pyomo download-extensions"
- Invoke-Expression "pyomo download-extensions"
- Invoke-Expression "pyomo build-extensions"
- Write-Host "Calling solvers"
- Invoke-Expression "glpsol -v"
- Invoke-Expression "ipopt -v"
- - name: Run nightly tests with test.pyomo
- shell: pwsh
- run: |
- $env:PYTHONWARNINGS="ignore::UserWarning"
- Write-Host "Setup and run nosetests"
- $env:BUILD_DIR = $(Get-Location).Path
- $env:EXP = "test.pyomo -v --cat='nightly' pyomo " + $env:BUILD_DIR + "\pyomo-model-libraries"
- Invoke-Expression $env:EXP
diff --git a/.jenkins.sh b/.jenkins.sh
index 589a672872c..7f716779c6b 100644
--- a/.jenkins.sh
+++ b/.jenkins.sh
@@ -102,8 +102,8 @@ if test -z "$MODE" -o "$MODE" == setup; then
# Set up coverage for this build
export COVERAGE_PROCESS_START=${WORKSPACE}/coveragerc
cp ${WORKSPACE}/pyomo/.coveragerc ${COVERAGE_PROCESS_START}
- echo "source=${WORKSPACE}/pyomo" >> ${COVERAGE_PROCESS_START}
- echo "data_file=${WORKSPACE}/pyomo/.coverage" >> ${COVERAGE_PROCESS_START}
+ echo "data_file=${WORKSPACE}/pyomo/.coverage" \
+ >> ${COVERAGE_PROCESS_START}
echo 'import coverage; coverage.process_startup()' \
> "${LOCAL_SITE_PACKAGES}/run_coverage_at_startup.pth"
fi
@@ -177,6 +177,7 @@ if test -z "$MODE" -o "$MODE" == test; then
# Note, that the PWD should still be $WORKSPACE/pyomo
#
coverage combine || exit 1
+ coverage report -i
export OS=`uname`
if test -z "$CODECOV_TOKEN"; then
coverage xml
@@ -192,7 +193,7 @@ if test -z "$MODE" -o "$MODE" == test; then
| tee .cover.upload
if test $? == 0 -a `grep -i error .cover.upload | wc -l` -eq 0; then
break
- elif test $i -ge 3; then
+ elif test $i -ge 4; then
exit 1
fi
DELAY=$(( RANDOM % 30 + 15))
diff --git a/.travis.yml b/.travis.yml
index 928e75da88c..0e61f8f3419 100644
--- a/.travis.yml
+++ b/.travis.yml
@@ -104,9 +104,25 @@ after_success:
# Combine coverage reports over all subprocesses and upload
- ${DOC} find . -maxdepth 10 -name ".cov*"
- ${DOC} coverage combine
- - ${DOC} codecov --env TAG -X gcov
- # Trigger PyomoGallery build, but only when building the master branch
- # Note: this is disabled unless a token is injected through an
- # environment variable
+ - ${DOC} coverage report -i
+ - ${DOC} coverage xml -i
+ - |
+ i=0
+ while : ; do
+ i=$[$i+1]
+ echo "Uploading coverage to codecov (attempt $i)"
+ ${DOC} codecov --env TAG -X gcov -X s3
+ if test $? == 0; then
+ break
+ elif test $i -ge 4; then
+ exit 1
+ fi
+ DELAY=$(( RANDOM % 30 + 30))
+ echo "Pausing $DELAY seconds before re-attempting upload"
+ sleep $DELAY
+ done
+ # Trigger PyomoGallery build, but only when building the master branch
+ # Note: this is disabled unless a token is injected through an
+ # environment variable
- "if [ -n \"${SECRET_TRAVIS_TOKEN}\" -a -n \"${KEY_JOB}\" -a \"${TRAVIS_PULL_REQUEST}\" == false ]; then curl -s -X POST -H 'Content-Type: application/json' -H 'Accept: application/json' -H 'Travis-API-Version: 3' -H 'Authorization: token ${SECRET_TRAVIS_TOKEN}' -d '{\"request\": {\"branch\": \"master\"}}' https://api.travis-ci.org/repo/Pyomo%2FPyomoGallery/requests; fi"
diff --git a/CHANGELOG.txt b/CHANGELOG.txt
index 9fb99a62717..8a10326a073 100644
--- a/CHANGELOG.txt
+++ b/CHANGELOG.txt
@@ -7,6 +7,76 @@ Current Development
-------------------------------------------------------------------------------
+-------------------------------------------------------------------------------
+Pyomo 5.6.9 18 Mar 2020
+-------------------------------------------------------------------------------
+
+- General
+ - Fix bug and improve output formatting in pyomo.util.infeasible (#1226, #1234)
+ - Add 'version' and 'remove_in' arguments to deprecation_warning (#1231)
+ - Change NoArgumentGiven to a class and standardize useage (#1236)
+ - Update GSL URL to track change in AMPL SSL certificate (#1245)
+ - Clean up setup.py (#1227)
+ - Remove legacy build/test/distribution scripts (#1263)
+ - Use dict comprehension for constructing dictionaries (#1241)
+ - Fix report_timing for constructing objects without index_set (#1298)
+ - Add missing import for ftoa (#1320)
+ - Add attempt_import and standardize yaml imports (#1328)
+ - Add get_text_file method to the FileDownloader (#1330)
+ - Add helper function to retrieve solver status (#1335)
+ - Speed up import of pyomo.environ (#1344)
+- Core
+ - Update Units test to handle Pint 0.10 (#1246)
+ - Move blockutil.py from pyomo/core to pyomo/util (#1238)
+ - Deprecate pyomo.connectors (#1237)
+ - Add initial implementation for a MatrixConstraint (#1242)
+ - Fix _BlockData set_value() (#1249)
+ - Raise error on failed Param validation (#1272)
+ - Fix return value for component decorator (#1296)
+ - Change mult. order in taylor_series_expansion for numpy compatibility (#1329)
+ - Deprecate 'Any' being the defalt Param domain (#1266)
+- Solver Interfaces
+ - Update CPLEX direct interface to support CPLEX 12.10 (#1276)
+ - Shorten GAMS ShortNameLabeler symbols (#1338)
+ - Add branching priorities to CPLEXSHELL (#1300)
+- PySP updates
+ - Added a csvwriter test to the rapper tests (#1318)
+ - Fix csvwriter when NetworkX used to specify the scenario tree (#1321)
+- GDP updates
+ - Update BigM estimation for nonlinear expressions (#1222)
+ - Refactor GDP examples for easier testing (#1289)
+ - Rewrite of BigM transformation (#1129)
+- DAE updates
+ - Add a flatten_dae_variables utility (#1315, #1334)
+- Network updates
+ - Allow disabling split_frac and correct bounds on duplicated variables (#1186)
+- Testing
+ - Remove 'nightly' tests from the 'expensive' suite (#1247)
+ - Set up GitHub actions for Linux, OSX, and Windows testing (#1233, #1232,
+ #1230, #1262, #1277, #1317, #1281, #1323, #1331, #1342)
+ - Clean up Travis driver (#1264)
+ - Update Appveyor driver (#1293, #1343)
+ - Add GitHub Actions workflow for testing forks/branches (#1294)
+ - Update tests to use sys.executable to launch python subprocesses (#1322)
+ - Improve testing and coverage reporting for MPI tests (#1325)
+ - Update codecov config to reduce failing coverage checks on PRs (#1345)
+- Documentation
+ - Remove CBC from installation documentation (#1303)
+ - Add GitHub Actions documentation to the contribution guide (#1316)
+ - Documentation for using indexed components in persistent solver interfaces
+ (#1324)
+ - Documentation for developers on using forks (#1326)
+- Contributed Packages
+ - Deprecate pyomo.contrib.simplemodel (#1250)
+ - Updates to GDPopt, Merge GDPbb into GDPopt (#1255, #1268)
+ - PyNumero updates, redesign of NLP interfaces API, support for Windows,
+ updates to PyNumero.sparse, add MUMPS interface (#1253, #1271, #1273, #1285,
+ #1314)
+ - FBBT fixes and tests (#1291)
+ - Updates to Parmest, support for leave-N-out sampling and data reconciliation,
+ graphics and documentation improvements (#1337)
+ - Fix Benders MPI logic bug and expand parallel test coverage (#1278)
+
-------------------------------------------------------------------------------
Pyomo 5.6.8 13 Dec 2019
-------------------------------------------------------------------------------
diff --git a/README.md b/README.md
index f4de0b879b3..dea07ed9db3 100644
--- a/README.md
+++ b/README.md
@@ -1,6 +1,5 @@
-
-[![Travis Status](https://img.shields.io/travis/Pyomo/pyomo.svg?logo=travis)](https://travis-ci.org/Pyomo/pyomo)
-[![Appveyor Status](https://ci.appveyor.com/api/projects/status/km08tbkv05ik14n9/branch/master?svg=true)](https://ci.appveyor.com/project/WilliamHart/pyomo/branch/master)
+[![Github Actions Status](https://github.com/Pyomo/pyomo/workflows/GitHub%20CI/badge.svg?event=push)](https://github.com/Pyomo/pyomo/actions?query=event%3Apush+workflow%3A%22GitHub+CI%22)
+[![Travis Status](https://img.shields.io/travis/com/Pyomo/pyomo/master?logo=travis)](https://travis-ci.com/Pyomo/pyomo)
[![Jenkins Status](https://img.shields.io/jenkins/s/https/software.sandia.gov/downloads/pub/pyomo/jenkins/Pyomo_trunk.svg?logo=jenkins&logoColor=white)](https://jenkins-srn.sandia.gov/job/Pyomo_trunk)
[![codecov](https://codecov.io/gh/Pyomo/pyomo/branch/master/graph/badge.svg)](https://codecov.io/gh/Pyomo/pyomo)
[![Documentation Status](https://readthedocs.org/projects/pyomo/badge/?version=latest)](http://pyomo.readthedocs.org/en/latest/)
diff --git a/RELEASE.txt b/RELEASE.txt
index 9ec15e06193..4140da5258b 100644
--- a/RELEASE.txt
+++ b/RELEASE.txt
@@ -1,4 +1,4 @@
-We are pleased to announce the release of Pyomo 5.6.8. Pyomo is a collection
+We are pleased to announce the release of Pyomo 5.6.9. Pyomo is a collection
of Python software packages that supports a diverse set of optimization
capabilities for formulating and analyzing optimization models.
@@ -6,6 +6,7 @@ capabilities for formulating and analyzing optimization models.
The following are highlights of the 5.6 release series:
- New expression system enables robust support for pypy
+ - Significant reduction in the time to import pyomo.environ
- Dropped support for Python 2.6
- New contributed packages: preprocessing, parmest, pynumero,
sensitivity_toolbox, petsc interface, mindtpy, fbbt, gdpbb,
diff --git a/doc/OnlineDocs/README.txt b/doc/OnlineDocs/README.txt
index 84ef11ea75c..237dc8d3fcf 100644
--- a/doc/OnlineDocs/README.txt
+++ b/doc/OnlineDocs/README.txt
@@ -3,7 +3,7 @@ GETTING STARTED
0. Install Sphinx
- pip install sphinx
+ pip install sphinx sphinx_rtd_theme
1. Edit documentation
diff --git a/doc/OnlineDocs/_static/theme_overrides.css b/doc/OnlineDocs/_static/theme_overrides.css
index 485dd3c3bd7..8b38496e25e 100644
--- a/doc/OnlineDocs/_static/theme_overrides.css
+++ b/doc/OnlineDocs/_static/theme_overrides.css
@@ -6,3 +6,12 @@ code.docutils.literal{
font-size: 100%;
}
+.wy-table-responsive table td, .wy-table-responsive table th {
+ white-space: normal;
+}
+
+.wy-table-responsive {
+ margin-bottom: 24px;
+ max-width: 100%;
+ overflow: visible;
+}
diff --git a/doc/OnlineDocs/advanced_topics/persistent_solvers.rst b/doc/OnlineDocs/advanced_topics/persistent_solvers.rst
index 729b9d8fb01..aebb0545dd0 100644
--- a/doc/OnlineDocs/advanced_topics/persistent_solvers.rst
+++ b/doc/OnlineDocs/advanced_topics/persistent_solvers.rst
@@ -125,6 +125,34 @@ be modified and then updated with with solver:
>>> m.x.setlb(1.0) # doctest: +SKIP
>>> opt.update_var(m.x) # doctest: +SKIP
+Working with Indexed Variables and Constraints
+----------------------------------------------
+
+The examples above all used simple variables and constraints; in order to use
+indexed variables and/or constraints, the code must be slightly adapted:
+
+>>> for v in indexed_var.values(): # doctest: +SKIP
+... opt.add_var(v)
+>>> for v in indexed_con.values(): # doctest: +SKIP
+... opt.add_constraint(v)
+
+This must be done when removing variables/constraints, too. Not doing this would
+result in AttributeError exceptions, for example:
+
+>>> opt.add_var(indexed_var) # doctest: +SKIP
+>>> # ERROR: AttributeError: 'IndexedVar' object has no attribute 'is_binary'
+>>> opt.add_constraint(indexed_con) # doctest: +SKIP
+>>> # ERROR: AttributeError: 'IndexedConstraint' object has no attribute 'body'
+
+The method "is_indexed" can be used to automate the process, for example:
+
+>>> def add_variable(opt, variable): # doctest: +SKIP
+... if variable.is_indexed():
+... for v in variable.values():
+... opt.add_var(v)
+... else:
+... opt.add_var(v)
+
Persistent Solver Performance
-----------------------------
In order to get the best performance out of the persistent solvers, use the
diff --git a/doc/OnlineDocs/contributed_packages/mindtpy.rst b/doc/OnlineDocs/contributed_packages/mindtpy.rst
index f6fc8ca665d..c7a2773fec1 100644
--- a/doc/OnlineDocs/contributed_packages/mindtpy.rst
+++ b/doc/OnlineDocs/contributed_packages/mindtpy.rst
@@ -7,12 +7,12 @@ These decomposition algorithms usually rely on the solution of Mixed-Intger Line
(MILP) and Nonlinear Programs (NLP).
MindtPy currently implements the Outer Approximation (OA) algorithm originally described in
-`Duran & Grossmann`_. Usage and implementation
+`Duran & Grossmann, 1986`_. Usage and implementation
details for MindtPy can be found in the PSE 2018 paper Bernal et al.,
(`ref `_,
`preprint `_).
-.. _Duran & Grossmann: https://dx.doi.org/10.1007/BF02592064
+.. _Duran & Grossmann, 1986: https://dx.doi.org/10.1007/BF02592064
Usage of MindtPy to solve a Pyomo concrete model involves:
@@ -33,7 +33,7 @@ An example which includes the modeling approach may be found below.
>>> model.x = Var(bounds=(1.0,10.0),initialize=5.0)
>>> model.y = Var(within=Binary)
- >>> model.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y))
+ >>> model.c1 = Constraint(expr=(model.x-4.0)**2 - model.x <= 50.0*(1-model.y))
>>> model.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y))
>>> model.objective = Objective(expr=model.x, sense=minimize)
@@ -58,6 +58,49 @@ The solution may then be displayed by using the commands
>>> SolverFactory('mindtpy').solve(model, mip_solver='glpk', nlp_solver='ipopt', tee=True)
+Single tree implementation
+---------------------------------------------
+
+MindtPy also supports single tree implementation of Outer Approximation (OA) algorithm, which is known as LP/NLP algorithm originally described in `Quesada & Grossmann`_.
+The LP/NLP algorithm in MindtPy is implemeted based on the LazyCallback function in commercial solvers.
+
+.. _Quesada & Grossmann: https://www.sciencedirect.com/science/article/abs/pii/0098135492800288
+
+
+.. Note::
+
+The single tree implementation currently only works with CPLEX.
+To use LazyCallback function of CPLEX from Pyomo, the `CPLEX Python API`_ is required.
+This means both IBM ILOG CPLEX Optimization Studio and the CPLEX-Python modules should be installed on your computer.
+
+
+.. _CPLEX Python API: https://www.ibm.com/support/knowledgecenter/SSSA5P_12.7.1/ilog.odms.cplex.help/CPLEX/GettingStarted/topics/set_up/Python_setup.html
+
+
+A usage example for single tree is as follows:
+
+.. code::
+
+ >>> import pyomo.environ as pyo
+ >>> model = pyo.ConcreteModel()
+
+ >>> model.x = pyo.Var(bounds=(1.0, 10.0), initialize=5.0)
+ >>> model.y = pyo.Var(within=Binary)
+
+ >>> model.c1 = Constraint(expr=(model.x-4.0)**2 - model.x <= 50.0*(1-model.y))
+ >>> model.c2 = pyo.Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y))
+
+ >>> model.objective = pyo.Objective(expr=model.x, sense=pyo.minimize)
+
+ Solve the model using single tree implementation in MindtPy
+ >>> pyo.SolverFactory('mindtpy').solve(
+ ... model, strategy='OA',
+ ... mip_solver='cplex_persistent', nlp_solver='ipopt', single_tree=True)
+ >>> model.objective.display()
+
+
+
+
MindtPy implementation and optional arguments
---------------------------------------------
diff --git a/doc/OnlineDocs/contributed_packages/parmest/boxplot.png b/doc/OnlineDocs/contributed_packages/parmest/boxplot.png
new file mode 100644
index 00000000000..25bb4da764a
Binary files /dev/null and b/doc/OnlineDocs/contributed_packages/parmest/boxplot.png differ
diff --git a/doc/OnlineDocs/contributed_packages/parmest/datarec.rst b/doc/OnlineDocs/contributed_packages/parmest/datarec.rst
new file mode 100644
index 00000000000..cc7e0bb93d1
--- /dev/null
+++ b/doc/OnlineDocs/contributed_packages/parmest/datarec.rst
@@ -0,0 +1,26 @@
+.. _datarecsection:
+
+Data Reconciliation using parmest
+=================================
+
+The method :class:`~pyomo.contrib.parmest.parmest.Estimator.theta_est`
+can optionally return model values. This feature can be used to return
+reconciled data using a user specified objective. In this case, the list
+of variable names the user wants to estimate (theta_names) is set to an
+empty list and the objective function is defined to minimize
+measurement to model error. Note that the model used for data
+reconciliation may differ from the model used for parameter estimation.
+
+The following example illustrates the use of parmest for data
+reconciliation. The functions
+:class:`~pyomo.contrib.parmest.graphics.grouped_boxplot` or
+:class:`~pyomo.contrib.parmest.graphics.grouped_violinplot` can be used
+to visually compare the original and reconciled data.
+
+.. doctest::
+ :skipif: True
+
+ >>> import pyomo.contrib.parmest.parmest as parmest
+ >>> pest = parmest.Estimator(model_function, data, [], objective_function)
+ >>> obj, theta, data_rec = pest.theta_est(return_values=['A', 'B'])
+ >>> parmest.grouped_boxplot(data, data_rec)
diff --git a/doc/OnlineDocs/contributed_packages/parmest/driver.rst b/doc/OnlineDocs/contributed_packages/parmest/driver.rst
index 697fa2e24b8..840fa1b61bd 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/driver.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/driver.rst
@@ -1,36 +1,58 @@
.. _driversection:
Parameter Estimation using parmest
-=======================================
-
-Parameter Estimation using parmest requires a Pyomo model, experimental data which defines
-multiple scenarios, and a list of thetas to estimate.
-parmest uses PySP [PyomoBookII]_ to solve a two-stage stochastic programming
-problem, where the experimental data is used to create a scenario tree.
-The objective function needs to be written in PySP form with the
-Pyomo Expression for first stage cost (named "FirstStateCost") set to zero and the
-Pyomo Expression for second stage cost (named "SecondStageCost") defined as the
-deviation between model and the observations (typically defined as
-the sum of squared deviation between
-model values and observed values).
-
-If the Pyomo model is not formatted as a two-stage stochastic programming
-problem in this format, the user can supply a custom function to use as the second stage cost
-and the Pyomo model will be modified within parmest to match the specifications required by PySP.
-The PySP callback function is also defined within parmest.
-The callback function returns a populated
-and initialized model for each scenario.
+==================================
+
+Parameter Estimation using parmest requires a Pyomo model, experimental
+data which defines multiple scenarios, and a list of parameter names
+(thetas) to estimate. parmest uses PySP [PyomoBookII]_ to solve a
+two-stage stochastic programming problem, where the experimental data is
+used to create a scenario tree. The objective function needs to be
+written in PySP form with the Pyomo Expression for first stage cost
+(named "FirstStageCost") set to zero and the Pyomo Expression for second
+stage cost (named "SecondStageCost") defined as the deviation between
+the model and the observations (typically defined as the sum of squared
+deviation between model values and observed values).
+
+If the Pyomo model is not formatted as a two-stage stochastic
+programming problem in this format, the user can supply a custom
+function to use as the second stage cost and the Pyomo model will be
+modified within parmest to match the specifications required by PySP.
+The PySP callback function is also defined within parmest. The callback
+function returns a populated and initialized model for each scenario.
To use parmest, the user creates a :class:`~pyomo.contrib.parmest.parmest.Estimator` object
-and uses its methods for:
-
-* Parameter estimation, :class:`~pyomo.contrib.parmest.parmest.Estimator.theta_est`
-* Bootstrap resampling for parameter estimation, :class:`~pyomo.contrib.parmest.parmest.Estimator.theta_est_bootstrap`
-* Compute the objective at theta values, :class:`~pyomo.contrib.parmest.parmest.Estimator.objective_at_theta`
-* Compute likelihood ratio, :class:`~pyomo.contrib.parmest.parmest.Estimator.likelihood_ratio_test`
-
-A :class:`~pyomo.contrib.parmest.parmest.Estimator` object can be created using
-the following code. A description of each argument is listed below. Examples are provided in the :ref:`examplesection` Section.
+which includes the following methods:
+
+.. autosummary::
+ :nosignatures:
+
+ ~pyomo.contrib.parmest.parmest.Estimator.theta_est
+ ~pyomo.contrib.parmest.parmest.Estimator.theta_est_bootstrap
+ ~pyomo.contrib.parmest.parmest.Estimator.theta_est_leaveNout
+ ~pyomo.contrib.parmest.parmest.Estimator.objective_at_theta
+ ~pyomo.contrib.parmest.parmest.Estimator.confidence_region_test
+ ~pyomo.contrib.parmest.parmest.Estimator.likelihood_ratio_test
+ ~pyomo.contrib.parmest.parmest.Estimator.leaveNout_bootstrap_test
+
+Additional functions are available in parmest to group data, plot
+results, and fit distributions to theta values.
+
+.. autosummary::
+ :nosignatures:
+
+ ~pyomo.contrib.parmest.parmest.group_data
+ ~pyomo.contrib.parmest.graphics.pairwise_plot
+ ~pyomo.contrib.parmest.graphics.grouped_boxplot
+ ~pyomo.contrib.parmest.graphics.grouped_violinplot
+ ~pyomo.contrib.parmest.graphics.fit_rect_dist
+ ~pyomo.contrib.parmest.graphics.fit_mvn_dist
+ ~pyomo.contrib.parmest.graphics.fit_kde_dist
+
+A :class:`~pyomo.contrib.parmest.parmest.Estimator` object can be
+created using the following code. A description of each argument is
+listed below. Examples are provided in the :ref:`examplesection`
+Section.
.. testsetup:: *
:skipif: not __import__('pyomo.contrib.parmest.parmest').contrib.parmest.parmest.parmest_available
@@ -50,56 +72,75 @@ the following code. A description of each argument is listed below. Examples ar
>>> import pyomo.contrib.parmest.parmest as parmest
>>> pest = parmest.Estimator(model_function, data, theta_names, objective_function)
-
-Model function
-----------------
-The first argument is a function which uses data for a single scenario to return a
-populated and initialized Pyomo model for that scenario.
-Parameters that the user would like to estimate must be defined as variables (Pyomo `Var`).
-The variables can be fixed (parmest unfixes variables that will be estimated).
-The model does not have to be specifically written for parmest. That is, parmest can modify the objective for pySP, see :ref:`ObjFunction` below.
+Optionally, solver options can be supplied, e.g.,
-Data
------------------------
+.. doctest::
+ :skipif: not __import__('pyomo.contrib.parmest.parmest').contrib.parmest.parmest.parmest_available
+
+ >>> solver_options = {"max_iter": 6000}
+ >>> pest = parmest.Estimator(model_function, data, theta_names, objective_function, solver_options)
+
-The second argument is the data which will be used to populate the Pyomo model.
-Supported data formats include:
-* **Pandas Dataframe** where each row is a separate scenario and column names refer to observed quantities.
- Pandas DataFrames are easily stored and read in from csv, excel, or databases, or created directly in Python.
-* **List of dictionaries** where each entry in the list is a separate scenario and the keys (or nested keys)
- refer to observed quantities.
- Dictionaries are often preferred over DataFrames when using static and time series data.
- Dictionaries are easily stored and read in from json or yaml files, or created directly in Python.
-* **List of json file names** where each entry in the list contains a json file name for a separate scenario.
- This format is recommended when using large datasets in parallel computing.
+Model function
+--------------
-The data must be compatible with the model function that returns a populated and initialized Pyomo model for a
-single scenario.
-Data can include multiple entries per variable (time series and/or duplicate sensors).
-This information can be included in custom objective functions, see :ref:`ObjFunction` below.
+The first argument is a function which uses data for a single scenario
+to return a populated and initialized Pyomo model for that scenario.
+Parameters that the user would like to estimate must be defined as
+variables (Pyomo `Var`). The variables can be fixed (parmest unfixes
+variables that will be estimated). The model does not have to be
+specifically written for parmest. That is, parmest can modify the
+objective for PySP, see :ref:`ObjFunction` below.
+
+Data
+----
+
+The second argument is the data which will be used to populate the Pyomo
+model. Supported data formats include:
+
+* **Pandas Dataframe** where each row is a separate scenario and column
+ names refer to observed quantities. Pandas DataFrames are easily
+ stored and read in from csv, excel, or databases, or created directly
+ in Python.
+* **List of dictionaries** where each entry in the list is a separate
+ scenario and the keys (or nested keys) refer to observed quantities.
+ Dictionaries are often preferred over DataFrames when using static and
+ time series data. Dictionaries are easily stored and read in from
+ json or yaml files, or created directly in Python.
+* **List of json file names** where each entry in the list contains a
+ json file name for a separate scenario. This format is recommended
+ when using large datasets in parallel computing.
+
+The data must be compatible with the model function that returns a
+populated and initialized Pyomo model for a single scenario. Data can
+include multiple entries per variable (time series and/or duplicate
+sensors). This information can be included in custom objective
+functions, see :ref:`ObjFunction` below.
Theta names
------------------------
+-----------
-The third argument is a list of variable names that the user wants to estimate.
-The list contains strings with `Var` names from the Pyomo model.
+The third argument is a list of variable names that the user wants to
+estimate. The list contains strings with `Var` names from the Pyomo
+model.
.. _ObjFunction:
-Objective function
------------------------------
-
-The forth argument is an optional argument which defines the optimization objective function to use in
-parameter estimation.
-If no objective function is specified, the Pyomo model is used
-"as is" and should be defined with a "FirstStateCost" and
-"SecondStageCost" expression that are used to build an objective
-for PySP.
-If the Pyomo model is not written as a two stage stochastic programming problem in this format,
-and/or if the user wants to use an objective that is different than the original model,
-a custom objective function can be defined for parameter estimation.
-The objective function arguments include `model` and `data` and the objective function returns
-a Pyomo expression which are used to define "SecondStageCost".
-The objective function can be used to customize data points and weights that are used in parameter estimation.
+Objective function
+------------------
+
+The fourth argument is an optional argument which defines the
+optimization objective function to use in parameter estimation. If no
+objective function is specified, the Pyomo model is used "as is" and
+should be defined with "FirstStageCost" and "SecondStageCost"
+expressions that are used to build an objective for PySP. If the Pyomo
+model is not written as a two stage stochastic programming problem in
+this format, and/or if the user wants to use an objective that is
+different than the original model, a custom objective function can be
+defined for parameter estimation. The objective function arguments
+include `model` and `data` and the objective function returns a Pyomo
+expression which is used to define "SecondStageCost". The objective
+function can be used to customize data points and weights that are used
+in parameter estimation.
diff --git a/doc/OnlineDocs/contributed_packages/parmest/examples.rst b/doc/OnlineDocs/contributed_packages/parmest/examples.rst
index 38d30f6001e..f4c0ad62f67 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/examples.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/examples.rst
@@ -9,27 +9,36 @@ Examples can be found in `pyomo/contrib/parmest/examples` and include:
* Semibatch example [SemiBatch]_
* Rooney Biegler example [RooneyBiegler]_
-Each example contains a Python file that contains the Pyomo model and a Python file to run parameter estimation.
+Each example includes a Python file that contains the Pyomo model and a
+Python file to run parameter estimation.
-The description below uses the reactor design example.
-The file **reactor_design.py** includes a function which returns an populated instance of the Pyomo model.
-Note that the model is defined to maximize `cb` and that `k1`, `k2`, and `k3` are fixed.
-The _main_ program is included for easy testing of the model declaration.
+Additional use cases include:
+
+* Data reconciliation (reactor design example)
+* Parameter estimation using data with duplicate sensors and time-series
+ data (reactor design example)
+* Parameter estimation using mpi4py, the example saves results to a file
+ for later analysis/graphics (semibatch example)
+
+The description below uses the reactor design example. The file
+**reactor_design.py** includes a function which returns an populated
+instance of the Pyomo model. Note that the model is defined to maximize
+`cb` and that `k1`, `k2`, and `k3` are fixed. The _main_ program is
+included for easy testing of the model declaration.
.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/reactor_design/reactor_design.py
:language: python
-The file **reactor_design_parmest.py** uses parmest to estimate values of `k1`, `k2`, and `k3` by minimizing the sum of
-squared error between model and observed values of `ca`, `cb`, `cc`, and `cd`. The file also uses parmest to
-run parameter estimation with bootstrap resampling and perform a likelihood ratio test over a range of
-theta values.
+The file **parmest_example.py** uses parmest to estimate values of `k1`,
+`k2`, and `k3` by minimizing the sum of squared error between model and
+observed values of `ca`, `cb`, `cc`, and `cd`. The file also uses
+parmest to run parameter estimation with bootstrap resampling and
+perform a likelihood ratio test over a range of theta values.
-.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest.py
+.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/reactor_design/parmest_example.py
:language: python
-The semibatch and Rooney Biegler examples are defined in a similar manner.
+The semibatch and Rooney Biegler examples are defined in a similar
+manner.
-Additional use cases include:
-* Parameter estimation using data with duplicate sensors and time-series data (reactor design example)
-* Parameter estimation using mpi4py, the example saves results to a file for later analysis/graphics (semibatch example)
diff --git a/doc/OnlineDocs/contributed_packages/parmest/graphics.rst b/doc/OnlineDocs/contributed_packages/parmest/graphics.rst
index 8828e22cca0..a0837472bbb 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/graphics.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/graphics.rst
@@ -1,44 +1,55 @@
.. _graphicssection:
Graphics
-========================
-
-parmest includes a function, :class:`~pyomo.contrib.parmest.parmest.pairwise_plot`,
-to visualize results from bootstrap and likelihood ratio analysis.
-Confidence intervals using rectangular, multivariate normal, and kernel density
-estimate distributions can be included in the plot and used for scenario creation.
-Examples are provided in the :ref:`examplesection` Section.
-
-The pairwise plot includes a histogram of each parameter along the diagonal and
-a scatter plot for each pair of parameters in the upper and lower sections.
-The pairwise plot can also include the following optional information:
-
-* A single value for each theta (generally theta* from parameter estimation).
-* Confidence intervals for rectangular, multivariate normal, and/or kernel density
- estimate distributions at a specified level (i.e. 0.8).
- For plots with more than 2 parameters, theta* is used to extract a slice of the confidence
- region for each pairwise plot.
-* Filled contour lines for objective values at a specified level (i.e. 0.8).
- For plots with more than 2 parameters, theta* is used to extract a slice of the contour lines for each pairwise plot.
-* In addition to creating a figure, the user can optionally return the confidence region distributions
- which can be used to generate scenarios.
+========
+
+parmest includes the following functions to help visualize results:
+
+* :class:`~pyomo.contrib.parmest.graphics.grouped_boxplot`
+* :class:`~pyomo.contrib.parmest.graphics.grouped_violinplot`
+* :class:`~pyomo.contrib.parmest.graphics.pairwise_plot`
+
+Grouped boxplots and violinplots are used to compare datasets, generally
+before and after data reconciliation. Pairwise plots are used to
+visualize results from parameter estimation and include a histogram of
+each parameter along the diagonal and a scatter plot for each pair of
+parameters in the upper and lower sections. The pairwise plot can also
+include the following optional information:
+
+* A single value for each theta (generally theta* from parameter
+ estimation).
+* Confidence intervals for rectangular, multivariate normal, and/or
+ Gaussian kernel density estimate distributions at a specified level
+ (i.e. 0.8). For plots with more than 2 parameters, theta* is used to
+ extract a slice of the confidence region for each pairwise plot.
+* Filled contour lines for objective values at a specified level
+ (i.e. 0.8). For plots with more than 2 parameters, theta* is used to
+ extract a slice of the contour lines for each pairwise plot.
The following examples were generated using the reactor design example.
-:ref:fig-pairwise1 uses output from the bootstrap analysis, and
-:ref:fig-pairwise2 uses output from the likelihood ratio test.
+:numref:`fig-boxplot` uses output from data reconciliation,
+:numref:`fig-pairwise1` uses output from the bootstrap analysis, and
+:numref:`fig-pairwise2` uses output from the likelihood ratio test.
+.. _fig-boxplot:
+.. figure:: boxplot.png
+ :scale: 90 %
+ :alt: boxplot
+
+ Grouped boxplot showing data before and after data reconciliation.
+
.. _fig-pairwise1:
.. figure:: pairwise_plot_CI.png
:scale: 90 %
:alt: CI
- Pairwise bootstrap plot with rectangular, multivariate normal
- and kernel density estimation confidence region.
+ Pairwise bootstrap plot with rectangular, multivariate normal and
+ kernel density estimation confidence region.
.. _fig-pairwise2:
.. figure:: pairwise_plot_LR.png
:scale: 90 %
:alt: LR
- Pairwise likelihood ratio plot with contours of the objective and points that lie within an alpha confidence region.
-
\ No newline at end of file
+ Pairwise likelihood ratio plot with contours of the objective and
+ points that lie within an alpha confidence region.
diff --git a/doc/OnlineDocs/contributed_packages/parmest/index.rst b/doc/OnlineDocs/contributed_packages/parmest/index.rst
index 1a9596cd43b..3052b4e5dbf 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/index.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/index.rst
@@ -11,10 +11,12 @@ confidence regions and subsequent creation of scenarios for PySP.
overview.rst
installation.rst
driver.rst
+ datarec.rst
graphics.rst
examples.rst
parallel.rst
api.rst
+ scencreate.rst
Indices and Tables
------------------
diff --git a/doc/OnlineDocs/contributed_packages/parmest/installation.rst b/doc/OnlineDocs/contributed_packages/parmest/installation.rst
index b9cd170daca..5ca2932b89c 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/installation.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/installation.rst
@@ -7,19 +7,20 @@ various Python package dependencies and the IPOPT software
library for non-linear optimization.
Python package dependencies
--------------------------------
+---------------------------
#. numpy
#. pandas
#. pyomo
#. pyutilib
-#. matplotlib (optional, used for graphics)
-#. scipy.stats (optional, used for graphics)
-#. seaborn (optional, used for graphics)
-#. mpi4py.MPI (optional, used for parallel computing)
+#. matplotlib (optional)
+#. scipy.stats (optional)
+#. seaborn (optional)
+#. mpi4py.MPI (optional)
IPOPT
--------
+-----
+
IPOPT can be downloaded from https://projects.coin-or.org/Ipopt.
Testing
diff --git a/doc/OnlineDocs/contributed_packages/parmest/overview.rst b/doc/OnlineDocs/contributed_packages/parmest/overview.rst
index af95fd5903d..1b5c71b849e 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/overview.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/overview.rst
@@ -1,12 +1,12 @@
Overview
-================
+========
-The Python package called parmest facilitates model-based
-parameter estimation along with characterization of
-uncertainty associated with the estimates. For example, parmest
-can provide confidence regions around the parameter estimates.
-Additionally, parameter vectors, each with an attached probability estimate,
-can be used to build scenarios for design optimization.
+The Python package called parmest facilitates model-based parameter
+estimation along with characterization of uncertainty associated with
+the estimates. For example, parmest can provide confidence regions
+around the parameter estimates. Additionally, parameter vectors, each
+with an attached probability estimate, can be used to build scenarios
+for design optimization.
Functionality in parmest includes:
@@ -14,6 +14,7 @@ Functionality in parmest includes:
* Bootstrap resampling for parameter estimation
* Confidence regions based on single or multi-variate distributions
* Likelihood ratio
+* Leave-N-out cross validation
* Parallel processing
Background
@@ -26,18 +27,21 @@ a vector, :math:`{\theta}`, to use in the functional form
y = g(x; \theta)
-where :math:`x` is a vector containing measured data, typically in high dimension, :math:`{\theta}` is
-a vector of values to estimate, in much lower dimension, and the response vectors are
-given as :math:`y_{i}, i=1,\ldots,m` with :math:`m` also much
-smaller than the dimension of :math:`x`. This is done by collecting :math:`S` data points, which
-are :math:`{\tilde{x}},{\tilde{y}}` pairs and then finding :math:`{\theta}` values that
-minimize some function of the deviation between the values of :math:`{\tilde{y}}` that are measured
-and the values of :math:`g({\tilde{x}};{\theta})` for each corresponding :math:`{\tilde{x}}`,
-which is a subvector of the vector :math:`x`. Note
-that for most experiments, only small parts of :math:`x` will change from
-one experiment to the next.
+where :math:`x` is a vector containing measured data, typically in high
+dimension, :math:`{\theta}` is a vector of values to estimate, in much
+lower dimension, and the response vectors are given as :math:`y_{i},
+i=1,\ldots,m` with :math:`m` also much smaller than the dimension of
+:math:`x`. This is done by collecting :math:`S` data points, which are
+:math:`{\tilde{x}},{\tilde{y}}` pairs and then finding :math:`{\theta}`
+values that minimize some function of the deviation between the values
+of :math:`{\tilde{y}}` that are measured and the values of
+:math:`g({\tilde{x}};{\theta})` for each corresponding
+:math:`{\tilde{x}}`, which is a subvector of the vector :math:`x`. Note
+that for most experiments, only small parts of :math:`x` will change
+from one experiment to the next.
-The following least squares objective can be used to estimate parameter values, where data points are indexed by :math:`s=1,\ldots,S`
+The following least squares objective can be used to estimate parameter
+values, where data points are indexed by :math:`s=1,\ldots,S`
.. math::
@@ -49,19 +53,20 @@ where
q_{s}({\theta};{\tilde{x}}_{s}, {\tilde{y}}_{s}) = \sum_{i=1}^{m}w_{i}\left[{\tilde{y}}_{si} - g_{i}({\tilde{x}}_{s};{\theta})\right]^{2},
-i.e., the contribution of sample :math:`s` to :math:`Q`, where :math:`w \in \Re^{m}` is a vector
-of weights for the responses. For multi-dimensional :math:`y`, this
-is the squared weighted :math:`L_{2}` norm and for univariate :math:`y` the weighted squared deviation.
+i.e., the contribution of sample :math:`s` to :math:`Q`, where :math:`w
+\in \Re^{m}` is a vector of weights for the responses. For
+multi-dimensional :math:`y`, this is the squared weighted :math:`L_{2}`
+norm and for univariate :math:`y` the weighted squared deviation.
Custom objectives can also be defined for parameter estimation.
In the applications of interest to us, the function :math:`g(\cdot)` is
usually defined as an optimization problem with a large number of
(perhaps constrained) optimization variables, a subset of which are
-fixed at values :math:`{\tilde{x}}` when the optimization is performed.
-In other applications, the values of
-:math:`{\theta}` are fixed parameter values, but for the problem formulation above,
-the values of :math:`{\theta}` are the primary optimization variables. Note
-that in general, the function :math:`g(\cdot)` will have a large set of
-parameters that are not included in :math:`{\theta}`. Often, the :math:`y_{is}` will
-be vectors themselves, perhaps indexed by time with index sets
-that vary with :math:`s`.
+fixed at values :math:`{\tilde{x}}` when the optimization is performed.
+In other applications, the values of :math:`{\theta}` are fixed
+parameter values, but for the problem formulation above, the values of
+:math:`{\theta}` are the primary optimization variables. Note that in
+general, the function :math:`g(\cdot)` will have a large set of
+parameters that are not included in :math:`{\theta}`. Often, the
+:math:`y_{is}` will be vectors themselves, perhaps indexed by time with
+index sets that vary with :math:`s`.
diff --git a/doc/OnlineDocs/contributed_packages/parmest/parallel.rst b/doc/OnlineDocs/contributed_packages/parmest/parallel.rst
index 89958368a0c..8564724819e 100644
--- a/doc/OnlineDocs/contributed_packages/parmest/parallel.rst
+++ b/doc/OnlineDocs/contributed_packages/parmest/parallel.rst
@@ -1,29 +1,33 @@
.. _parallelsection:
Parallel Implementation
-===================================
+=======================
-Parallel implementation in parmest is **preliminary**.
-To run parmest in parallel, you need the mpi4py Python package and a *compatible* MPI installation.
-If you do NOT have mpi4py or a MPI installation, parmest still works (you should not get MPI import errors).
+Parallel implementation in parmest is **preliminary**. To run parmest
+in parallel, you need the mpi4py Python package and a *compatible* MPI
+installation. If you do NOT have mpi4py or a MPI installation, parmest
+still works (you should not get MPI import errors).
-For example, the following command can be used to run the semibatch model in parallel::
+For example, the following command can be used to run the semibatch
+model in parallel::
- mpiexec -n 4 python semibatch_parmest_parallel.py
+ mpiexec -n 4 python parmest_parallel_example.py
-The file **semibatch_parmest_parallel.py** is shown below.
+The file **parmest_parallel_example.py** is shown below.
Results are saved to file for later analysis.
-.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/semibatch/semibatch_parmest_parallel.py
+.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/semibatch/parmest_parallel_example.py
:language: python
Installation
--------------
+------------
-The mpi4py Python package should be installed using conda.
-The following installation instructions were tested on a Mac with Python 3.5.
+The mpi4py Python package should be installed using conda. The
+following installation instructions were tested on a Mac with Python
+3.5.
-Create a conda environment and install mpi4py using the following commands::
+Create a conda environment and install mpi4py using the following
+commands::
conda create -n parmest-parallel python=3.5
source activate parmest-parallel
diff --git a/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst b/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst
new file mode 100644
index 00000000000..e9ce28c89eb
--- /dev/null
+++ b/doc/OnlineDocs/contributed_packages/parmest/scencreate.rst
@@ -0,0 +1,34 @@
+Scenario Creation
+=================
+
+In addition to model-based parameter estimation, parmest can create
+scenarios for use in optimization under uncertainty. To do this, one
+first creates an ``Estimator`` object, then a ``ScenarioCreator``
+object, which has methods to add ``ParmestScen`` scenario objects to a
+``ScenarioSet`` object, which can write them to a csv file or output them
+via an iterator method.
+
+Example
+-------
+
+This example is in the semibatch subdirectory of the examples directory in
+the file ``scencreate.py``. It creates a csv file with scenarios that
+correspond one-to-one with the experiments used as input data. It also
+creates a few scenarios using the bootstrap methods and outputs prints the
+scenarios to the screen, accessing them via the ``ScensItator`` a ``print``
+
+.. literalinclude:: ../../../../pyomo/contrib/parmest/examples/semibatch/scencreate.py
+ :language: python
+
+.. note::
+ This example may produce an error message your version of Ipopt is not based
+ on a good linear solver.
+
+
+API
+---
+
+.. automodule:: pyomo.contrib.parmest.scenariocreator
+ :members:
+ :undoc-members:
+ :show-inheritance:
diff --git a/doc/OnlineDocs/contribution_guide.rst b/doc/OnlineDocs/contribution_guide.rst
index 8e196f4e126..3439a96ff88 100644
--- a/doc/OnlineDocs/contribution_guide.rst
+++ b/doc/OnlineDocs/contribution_guide.rst
@@ -44,6 +44,13 @@ at least 70% coverage of the lines modified in the PR and prefer
coverage closer to 90%. We also require that all tests pass before a PR
will be merged.
+The Pyomo master branch (as of `this commit `) provides a Github Action
+workflow that will test any changes pushed to a branch using Ubuntu with
+Python 3.7. For existing forks, fetch and merge your fork (and branches) with
+Pyomo's master. For new forks, you will need to enable Github Actions
+in the 'Actions' tab on your fork. Then the test will begin to run
+automatically with each push to your fork.
+
At any point in the development cycle, a "work in progress" pull request
may be opened by including '[WIP]' at the beginning of the PR
title. This allows your code changes to be tested by Pyomo's automatic
@@ -52,6 +59,224 @@ reviewed or merged by the core development team. In addition, any
'[WIP]' pull request left open for an extended period of time without
active development may be marked 'stale' and closed.
+Working on Forks and Branches
+-----------------------------
+
+All Pyomo development should be done on forks of the Pyomo
+repository. In order to fork the Pyomo repository, visit
+https://github.com/Pyomo/pyomo, click the "Fork" button in the
+upper right corner, and follow the instructions.
+
+This section discusses two recommended workflows for contributing
+pull-requests to Pyomo. The first workflow, labeled
+:ref:`Working with my fork and the GitHub Online UI `,
+does not require the use of 'remotes', and
+suggests updating your fork using the GitHub online UI. The second
+workflow, labeled
+:ref:`Working with remotes and the git command-line `, outlines
+a process that defines separate remotes for your fork and the main
+Pyomo repository.
+
+More information on git can be found at
+https://git-scm.com/book/en/v2. Section 2.5 has information on working
+with remotes.
+
+
+.. _forksgithubui:
+
+Working with my fork and the GitHub Online UI
++++++++++++++++++++++++++++++++++++++++++++++
+
+After creating your fork (per the instructions above), you can
+then clone your fork of the repository with
+
+::
+
+ git clone https://github.com//pyomo.git
+
+For new development, we strongly recommend working on feature
+branches. When you have a new feature to implement, create
+the branch with the following.
+
+::
+
+ cd pyomo/ # to make sure you are in the folder managed by git
+ git branch
+ git checkout
+
+Development can now be performed. When you are ready, commit
+any changes you make to your local repository. This can be
+done multiple times with informative commit messages for
+different tasks in the feature development.
+
+::
+
+ git add
+ git status # to check that you have added the correct files
+ git commit -m 'informative commit message to describe changes'
+
+In order to push the changes in your local branch to a branch on your fork, use
+
+::
+
+ git push origin
+
+
+When you have completed all the changes and are ready for a pull request, make
+sure all the changes have been pushed to the branch on your fork.
+
+ * visit https://github.com//pyomo.
+ * Just above the list of files and directories in the repository,
+ you should see a button that says "Branch: master". Click on
+ this button, and choose the correct branch.
+ * Click the "New pull request" button just to the right of the
+ "Branch: " button.
+ * Fill out the pull request template and click the green "Create
+ pull request" button.
+
+At times during your development, you may want to merge changes from
+the Pyomo master development branch into the feature branch on your
+fork and in your local clone of the repository.
+
+Using GitHub UI to merge Pyomo master into a branch on your fork
+****************************************************************
+
+To update your fork, you will actually be merging a pull-request from
+the main Pyomo repository into your fork.
+
+ * Visit https://github.com/Pyomo/pyomo.
+ * Click on the "New pull request" button just above the list of
+ files and directories.
+ * You will see the title "Compare changes" with some small text
+ below it which says "Compare changes across branches, commits,
+ tags, and more below. If you need to, you can also compare
+ across forks." Click the last part of this: "compare across
+ forks".
+ * You should now see four buttons just below this: "base
+ repository: Pyomo/pyomo", "base: master", "head repository:
+ Pyomo/pyomo", and "compare: master". Click the leftmost button
+ and choose "/Pyomo".
+ * Then click the button which is second to the left, and choose
+ the branch which you want to merge Pyomo master into. The four
+ buttons should now read: "base repository: /pyomo",
+ "base: ", "head repository: Pyomo/pyomo", and
+ "compare: master". This is setting you up to merge a pull-request
+ from Pyomo's master branch into your fork's branch.
+ * You should also now see a pull request template. If you fill out
+ the pull request template and click "Create pull request", this
+ will create a pull request which will update your fork and
+ branch with any changes that have been made to the master branch
+ of Pyomo.
+ * You can then merge the pull request by clicking the green "Merge
+ pull request" button from your fork on GitHub.
+
+.. _forksremotes:
+
+Working with remotes and the git command-line
++++++++++++++++++++++++++++++++++++++++++++++
+
+After you have created your fork, you can clone the fork and setup
+git 'remotes' that allow you to merge changes from (and to) different
+remote repositories. Below, we have included a set of recommendations,
+but, of course, there are other valid GitHub workflows that you can
+adopt.
+
+The following commands show how to clone your fork and setup
+two remotes, one for your fork, and one for the main Pyomo repository.
+
+::
+
+ git clone https://github.com//pyomo.git
+ git remote rename origin my-fork
+ git remote add main-pyomo https://github.com/pyomo/pyomo.git
+
+Note, you can see a list of your remotes with
+
+::
+
+ git remote -v
+
+The commands for creating a local branch and performing local commits
+are the same as those listed in the previous section above. Below are
+some common tasks based on this multi-remote setup.
+
+If you have changes that have been committed to a local feature branch
+(), you can push these changes to the branch on your fork
+with,
+
+::
+
+ git push my-fork
+
+In order to update a local branch with changes from a branch of the
+Pyomo repository,
+
+::
+
+ git checkout
+ git fetch main-pyomo
+ git merge main-pyomo/ --ff-only
+
+The "--ff-only" only allows a merge if the merge can be done by a
+fast-forward. If you do not require a fast-forward, you can drop this
+option. The most common concrete example of this would be
+
+::
+
+ git checkout master
+ git fetch main-pyomo
+ git merge main-pyomo/master --ff-only
+
+The above commands pull changes from the master branch of the main
+Pyomo repository into the master branch of your local clone. To push
+these changes to the master branch on your fork,
+
+::
+
+ git push my-fork master
+
+
+Setting up your development environment
++++++++++++++++++++++++++++++++++++++++
+
+After cloning your fork, you will want to install Pyomo from source.
+
+Step 1 (recommended): Create a new conda environment.
+
+::
+
+ conda create --name pyomodev
+
+You may change the environment name from ``pyomodev`` as you see fit. Then activate the environment:
+
+::
+
+ conda activate pyomodev
+
+Step 2: Install PyUtilib
+
+You will likely need the master branch of PyUtilib to contribute to Pyomo. Clone a copy of the repository in a new directory:
+
+::
+
+ git clone https://github.com/PyUtilib/pyutilib
+
+Then in the directory containing the clone of PyUtilib run:
+
+::
+
+ python setup.py develop
+
+Step 3: Install Pyomo
+
+Finally, move to the directory containing the clone of your Pyomo fork and run:
+
+::
+
+ python setup.py develop
+
+These commands register the cloned code with the active python environment (``pyomodev``). This way, your changes to the source code for ``pyomo`` and ``pyutilib`` are automatically used by the active environment. You can create another conda environment to switch to alternate versions of pyomo (e.g., stable).
+
Review Process
--------------
diff --git a/doc/OnlineDocs/installation.rst b/doc/OnlineDocs/installation.rst
index ad9f5ccec09..e4e82b31585 100644
--- a/doc/OnlineDocs/installation.rst
+++ b/doc/OnlineDocs/installation.rst
@@ -23,7 +23,7 @@ optimization solvers can be installed with conda as well:
::
- conda install -c conda-forge ipopt coincbc glpk
+ conda install -c conda-forge ipopt glpk
Using PIP
diff --git a/doc/OnlineDocs/modeling_extensions/gdp.rst b/doc/OnlineDocs/modeling_extensions/gdp.rst
index a3e066d2cb1..9fb6feeb03a 100644
--- a/doc/OnlineDocs/modeling_extensions/gdp.rst
+++ b/doc/OnlineDocs/modeling_extensions/gdp.rst
@@ -62,15 +62,15 @@ Transformation
To use standard commercial solvers, you must convert the disjunctive model to a standard MIP/MINLP model.
The two classical strategies for doing so are the (included) Big-M and Hull reformulations.
-From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm`` or ``--transform pyomo.gdp.chull``.
+From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm`` or ``--transform pyomo.gdp.hull``.
If you are using a Python script, ``TransformationFactory`` accomplishes the same functionality:
- ``TransformationFactory('gdp.bigm').apply_to(model)``
-- ``TransformationFactory('gdp.chull').apply_to(model)``
+- ``TransformationFactory('gdp.hull').apply_to(model)``
.. note::
- - all variables that appear in disjuncts need upper and lower bounds for chull
+ - all variables that appear in disjuncts need upper and lower bounds for hull
- for linear models, the BigM transform can estimate reasonably tight M
values for you if variables are bounded.
diff --git a/doc/OnlineDocs/pyomo_modeling_components/Sets.rst b/doc/OnlineDocs/pyomo_modeling_components/Sets.rst
index 8ec12fafea1..c7b331e408c 100644
--- a/doc/OnlineDocs/pyomo_modeling_components/Sets.rst
+++ b/doc/OnlineDocs/pyomo_modeling_components/Sets.rst
@@ -106,14 +106,17 @@ Note that the element number starts with 1 and not 0:
.. doctest::
>>> model.X.pprint()
- X : Dim=0, Dimen=1, Size=10, Domain=None, Ordered=False, Bounds=(1, 19)
- [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ X : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 10 : {1, 3, 5, 7, 9, 11, 13, 15, 17, 19}
>>> model.Y.pprint()
- Y : Dim=0, Dimen=1, Size=10, Domain=None, Ordered=False, Bounds=(1, 19)
- [1, 3, 5, 7, 9, 11, 13, 15, 17, 19]
+ Y : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 10 : {1, 3, 5, 7, 9, 11, 13, 15, 17, 19}
>>> model.Z.pprint()
- Z : Dim=0, Dimen=1, Size=10, Domain=None, Ordered=False, Bounds=(3, 21)
- [3, 5, 7, 9, 11, 13, 15, 17, 19, 21]
+ Z : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 10 : {3, 5, 7, 9, 11, 13, 15, 17, 19, 21}
Additional information about iterators for set initialization is in the
[PyomoBookII]_ book.
diff --git a/doc/OnlineDocs/working_abstractmodels/data/raw_dicts.rst b/doc/OnlineDocs/working_abstractmodels/data/raw_dicts.rst
index 7ce97864250..e10042b3ceb 100644
--- a/doc/OnlineDocs/working_abstractmodels/data/raw_dicts.rst
+++ b/doc/OnlineDocs/working_abstractmodels/data/raw_dicts.rst
@@ -29,10 +29,12 @@ components, the required data dictionary maps the implicit index
>>> i = m.create_instance(data)
>>> i.pprint()
2 Set Declarations
- I : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- r_index : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ r_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I*I : 9 : {(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)}
3 Param Declarations
p : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
diff --git a/doc/OnlineDocs/working_abstractmodels/instantiating_models.rst b/doc/OnlineDocs/working_abstractmodels/instantiating_models.rst
index 1cf68cbd65c..962e14558eb 100644
--- a/doc/OnlineDocs/working_abstractmodels/instantiating_models.rst
+++ b/doc/OnlineDocs/working_abstractmodels/instantiating_models.rst
@@ -32,7 +32,7 @@ is "empty":
>>> model.pprint()
1 Set Declarations
- I : Dim=0, Dimen=1, Size=0, Domain=None, Ordered=False, Bounds=None
+ I : Size=0, Index=None, Ordered=Insertion
Not constructed
1 Param Declarations
@@ -66,8 +66,9 @@ abstract ``model`` is left untouched.
True
>>> instance.pprint()
1 Set Declarations
- I : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
1 Param Declarations
p : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
@@ -102,8 +103,9 @@ several sources, including using a :ref:`dict `,
>>> instance2 = model.create_instance({None: {'I': {None: [4,5]}}})
>>> instance2.pprint()
1 Set Declarations
- I : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=(4, 5)
- [4, 5]
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {4, 5}
1 Param Declarations
p : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
diff --git a/doc/attic/GettingStarted/current/pyomo.txt b/doc/attic/GettingStarted/current/pyomo.txt
index acd07b9bed8..027dcd58afc 100644
--- a/doc/attic/GettingStarted/current/pyomo.txt
+++ b/doc/attic/GettingStarted/current/pyomo.txt
@@ -1042,7 +1042,7 @@ In order to use the solvers currently avaialbe, one must convert the
disjunctive model to a standard MIP/MINLP model. The easiest way to
do that is using the (included) BigM or Convex Hull transformations.
From the Pyomo command line, include the option +--transform pyomo.gdp.bigm+
-or +--transform pyomo.gdp.chull+
+or +--transform pyomo.gdp.hull+
=== Notes ===
diff --git a/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst b/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst
index d7992b93d5e..49649012825 100644
--- a/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst
+++ b/doc/attic/old_sphinx_files/getting_started/Disjunctions.rst
@@ -47,7 +47,7 @@ In order to use the solvers currently available, one must convert the
disjunctive model to a standard MIP/MINLP model. The easiest way to
do that is using the (included) BigM or Convex Hull transformations.
From the Pyomo command line, include the option ``--transform pyomo.gdp.bigm``
-or ``--transform pyomo.gdp.chull``
+or ``--transform pyomo.gdp.hull``
Notes
-----
diff --git a/examples/doc/pyomobook/pyomo-components-ch/var_declaration.txt b/examples/doc/pyomobook/pyomo-components-ch/var_declaration.txt
index 08f77116ff2..bcc7d2bea33 100644
--- a/examples/doc/pyomobook/pyomo-components-ch/var_declaration.txt
+++ b/examples/doc/pyomobook/pyomo-components-ch/var_declaration.txt
@@ -14,9 +14,9 @@ A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
[1, 2, 3]
s : Size=3, Index=A
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 1 : 1 : None : 2 : False : True : IntegerInterval(1, 2)
- 2 : 2 : None : 3 : False : True : IntegerInterval(2, 3)
- 3 : 3 : None : 4 : False : True : IntegerInterval(3, 4)
+ 1 : 1 : None : 2 : False : True : [1:2]
+ 2 : 2 : None : 3 : False : True : [2:3]
+ 3 : 3 : None : 4 : False : True : [3:4]
A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
[1, 2, 3]
0.0
diff --git a/examples/gdp/batchProcessing.py b/examples/gdp/batchProcessing.py
index 160cf17e722..7ac536b2eb9 100644
--- a/examples/gdp/batchProcessing.py
+++ b/examples/gdp/batchProcessing.py
@@ -10,221 +10,222 @@
because the _opt file is different (It has hard-coded bigM parameters so that each constraint
has the "optimal" bigM).'''
-model = AbstractModel()
-
-# TODO: it looks like they set a bigM for each j. Which I need to look up how to do...
-model.BigM = Suffix(direction=Suffix.LOCAL)
-model.BigM[None] = 1000
-
-
-## Constants from GAMS
-StorageTankSizeFactor = 2*5 # btw, I know 2*5 is 10... I don't know why it's written this way in GAMS?
-StorageTankSizeFactorByProd = 3
-MinFlow = -log(10000)
-VolumeLB = log(300)
-VolumeUB = log(3500)
-StorageTankSizeLB = log(100)
-StorageTankSizeUB = log(15000)
-UnitsInPhaseUB = log(6)
-UnitsOutOfPhaseUB = log(6)
-# TODO: YOU ARE HERE. YOU HAVEN'T ACTUALLY MADE THESE THE BOUNDS YET, NOR HAVE YOU FIGURED OUT WHOSE
-# BOUNDS THEY ARE. AND THERE ARE MORE IN GAMS.
-
-
-##########
-# Sets
-##########
-
-model.PRODUCTS = Set()
-model.STAGES = Set(ordered=True)
-model.PARALLELUNITS = Set(ordered=True)
-
-# TODO: this seems like an over-complicated way to accomplish this task...
-def filter_out_last(model, j):
- return j != model.STAGES.last()
-model.STAGESExceptLast = Set(initialize=model.STAGES, filter=filter_out_last)
-
-
-# TODO: these aren't in the formulation??
-#model.STORAGE_TANKS = Set()
-
-
-###############
-# Parameters
-###############
-
-model.HorizonTime = Param()
-model.Alpha1 = Param()
-model.Alpha2 = Param()
-model.Beta1 = Param()
-model.Beta2 = Param()
-
-model.ProductionAmount = Param(model.PRODUCTS)
-model.ProductSizeFactor = Param(model.PRODUCTS, model.STAGES)
-model.ProcessingTime = Param(model.PRODUCTS, model.STAGES)
-
-# These are hard-coded in the GAMS file, hence the defaults
-model.StorageTankSizeFactor = Param(model.STAGES, default=StorageTankSizeFactor)
-model.StorageTankSizeFactorByProd = Param(model.PRODUCTS, model.STAGES,
- default=StorageTankSizeFactorByProd)
-
-# TODO: bonmin wasn't happy and I think it might have something to do with this?
-# or maybe issues with convexity or a lack thereof... I don't know yet.
-# I made PRODUCTS ordered so I could do this... Is that bad? And it does index
-# from 1, right?
-def get_log_coeffs(model, k):
- return log(model.PARALLELUNITS.ord(k))
-
-model.LogCoeffs = Param(model.PARALLELUNITS, initialize=get_log_coeffs)
-
-# bounds
-model.volumeLB = Param(model.STAGES, default=VolumeLB)
-model.volumeUB = Param(model.STAGES, default=VolumeUB)
-model.storageTankSizeLB = Param(model.STAGES, default=StorageTankSizeLB)
-model.storageTankSizeUB = Param(model.STAGES, default=StorageTankSizeUB)
-model.unitsInPhaseUB = Param(model.STAGES, default=UnitsInPhaseUB)
-model.unitsOutOfPhaseUB = Param(model.STAGES, default=UnitsOutOfPhaseUB)
-
-
-################
-# Variables
-################
-
-# TODO: right now these match the formulation. There are more in GAMS...
-
-# unit size of stage j
-# model.volume = Var(model.STAGES)
-# # TODO: GAMS has a batch size indexed just by products that isn't in the formulation... I'm going
-# # to try to avoid it for the moment...
-# # batch size of product i at stage j
-# model.batchSize = Var(model.PRODUCTS, model.STAGES)
-# # TODO: this is different in GAMS... They index by stages too?
-# # cycle time of product i divided by batch size of product i
-# model.cycleTime = Var(model.PRODUCTS)
-# # number of units in parallel out-of-phase (or in phase) at stage j
-# model.unitsOutOfPhase = Var(model.STAGES)
-# model.unitsInPhase = Var(model.STAGES)
-# # TODO: what are we going to do as a boundary condition here? For that last stage?
-# # size of intermediate storage tank between stage j and j+1
-# model.storageTankSize = Var(model.STAGES)
-
-# variables for convexified problem
-# TODO: I am beginning to think these are my only variables actually.
-# GAMS never un-logs them, I don't think. And I think the GAMs ones
-# must be the log ones.
-def get_volume_bounds(model, j):
- return (model.volumeLB[j], model.volumeUB[j])
-model.volume_log = Var(model.STAGES, bounds=get_volume_bounds)
-model.batchSize_log = Var(model.PRODUCTS, model.STAGES)
-model.cycleTime_log = Var(model.PRODUCTS)
-def get_unitsOutOfPhase_bounds(model, j):
- return (0, model.unitsOutOfPhaseUB[j])
-model.unitsOutOfPhase_log = Var(model.STAGES, bounds=get_unitsOutOfPhase_bounds)
-def get_unitsInPhase_bounds(model, j):
- return (0, model.unitsInPhaseUB[j])
-model.unitsInPhase_log = Var(model.STAGES, bounds=get_unitsInPhase_bounds)
-def get_storageTankSize_bounds(model, j):
- return (model.storageTankSizeLB[j], model.storageTankSizeUB[j])
-# TODO: these bounds make it infeasible...
-model.storageTankSize_log = Var(model.STAGES, bounds=get_storageTankSize_bounds)
-
-# binary variables for deciding number of parallel units in and out of phase
-model.outOfPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary)
-model.inPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary)
-
-###############
-# Objective
-###############
-
-def get_cost_rule(model):
- return model.Alpha1 * sum(exp(model.unitsInPhase_log[j] + model.unitsOutOfPhase_log[j] + \
- model.Beta1 * model.volume_log[j]) for j in model.STAGES) +\
- model.Alpha2 * sum(exp(model.Beta2 * model.storageTankSize_log[j]) for j in model.STAGESExceptLast)
-model.min_cost = Objective(rule=get_cost_rule)
-
-
-##############
-# Constraints
-##############
-
-def processing_capacity_rule(model, j, i):
- return model.volume_log[j] >= log(model.ProductSizeFactor[i, j]) + model.batchSize_log[i, j] - \
- model.unitsInPhase_log[j]
-model.processing_capacity = Constraint(model.STAGES, model.PRODUCTS, rule=processing_capacity_rule)
-
-def processing_time_rule(model, j, i):
- return model.cycleTime_log[i] >= log(model.ProcessingTime[i, j]) - model.batchSize_log[i, j] - \
- model.unitsOutOfPhase_log[j]
-model.processing_time = Constraint(model.STAGES, model.PRODUCTS, rule=processing_time_rule)
-
-def finish_in_time_rule(model):
- return model.HorizonTime >= sum(model.ProductionAmount[i]*exp(model.cycleTime_log[i]) \
- for i in model.PRODUCTS)
-model.finish_in_time = Constraint(rule=finish_in_time_rule)
-
-
-###############
-# Disjunctions
-###############
-
-def storage_tank_selection_disjunct_rule(disjunct, selectStorageTank, j):
- model = disjunct.model()
- def volume_stage_j_rule(disjunct, i):
- return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \
- model.batchSize_log[i, j]
- def volume_stage_jPlus1_rule(disjunct, i):
- return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \
- model.batchSize_log[i, j+1]
- def batch_size_rule(disjunct, i):
- return -log(model.StorageTankSizeFactorByProd[i,j]) <= model.batchSize_log[i,j] - \
- model.batchSize_log[i, j+1] <= log(model.StorageTankSizeFactorByProd[i,j])
- def no_batch_rule(disjunct, i):
- return model.batchSize_log[i,j] - model.batchSize_log[i,j+1] == 0
-
- if selectStorageTank:
- disjunct.volume_stage_j = Constraint(model.PRODUCTS, rule=volume_stage_j_rule)
- disjunct.volume_stage_jPlus1 = Constraint(model.PRODUCTS,
- rule=volume_stage_jPlus1_rule)
- disjunct.batch_size = Constraint(model.PRODUCTS, rule=batch_size_rule)
- else:
- # The formulation says 0, but GAMS has this constant.
- # 04/04: Francisco says volume should be free:
- # disjunct.no_volume = Constraint(expr=model.storageTankSize_log[j] == MinFlow)
- disjunct.no_batch = Constraint(model.PRODUCTS, rule=no_batch_rule)
-model.storage_tank_selection_disjunct = Disjunct([0,1], model.STAGESExceptLast,
- rule=storage_tank_selection_disjunct_rule)
-
-def select_storage_tanks_rule(model, j):
- return [model.storage_tank_selection_disjunct[selectTank, j] for selectTank in [0,1]]
-model.select_storage_tanks = Disjunction(model.STAGESExceptLast, rule=select_storage_tanks_rule)
-
-# though this is a disjunction in the GAMs model, it is more efficiently formulated this way:
-# TODO: what on earth is k?
-def units_out_of_phase_rule(model, j):
- return model.unitsOutOfPhase_log[j] == sum(model.LogCoeffs[k] * model.outOfPhase[j,k] \
- for k in model.PARALLELUNITS)
-model.units_out_of_phase = Constraint(model.STAGES, rule=units_out_of_phase_rule)
-
-def units_in_phase_rule(model, j):
- return model.unitsInPhase_log[j] == sum(model.LogCoeffs[k] * model.inPhase[j,k] \
- for k in model.PARALLELUNITS)
-model.units_in_phase = Constraint(model.STAGES, rule=units_in_phase_rule)
-
-# and since I didn't do the disjunction as a disjunction, we need the XORs:
-def units_out_of_phase_xor_rule(model, j):
- return sum(model.outOfPhase[j,k] for k in model.PARALLELUNITS) == 1
-model.units_out_of_phase_xor = Constraint(model.STAGES, rule=units_out_of_phase_xor_rule)
-
-def units_in_phase_xor_rule(model, j):
- return sum(model.inPhase[j,k] for k in model.PARALLELUNITS) == 1
-model.units_in_phase_xor = Constraint(model.STAGES, rule=units_in_phase_xor_rule)
-
-
-# instance = model.create_instance('batchProcessing1.dat')
-# solver = SolverFactory('baron')
-# TransformationFactory('gdp.bigm').apply_to(instance)
-# TransformationFactory('core.add_slack_variables').apply_to(instance)
-# results = solver.solve(instance)
-# instance.display()
-# instance.solutions.store_to(results)
-# print results
+def build_model():
+ model = AbstractModel()
+
+ # TODO: it looks like they set a bigM for each j. Which I need to look up how to do...
+ model.BigM = Suffix(direction=Suffix.LOCAL)
+ model.BigM[None] = 1000
+
+
+ ## Constants from GAMS
+ StorageTankSizeFactor = 2*5 # btw, I know 2*5 is 10... I don't know why it's written this way in GAMS?
+ StorageTankSizeFactorByProd = 3
+ MinFlow = -log(10000)
+ VolumeLB = log(300)
+ VolumeUB = log(3500)
+ StorageTankSizeLB = log(100)
+ StorageTankSizeUB = log(15000)
+ UnitsInPhaseUB = log(6)
+ UnitsOutOfPhaseUB = log(6)
+ # TODO: YOU ARE HERE. YOU HAVEN'T ACTUALLY MADE THESE THE BOUNDS YET, NOR HAVE YOU FIGURED OUT WHOSE
+ # BOUNDS THEY ARE. AND THERE ARE MORE IN GAMS.
+
+
+ ##########
+ # Sets
+ ##########
+
+ model.PRODUCTS = Set()
+ model.STAGES = Set(ordered=True)
+ model.PARALLELUNITS = Set(ordered=True)
+
+ # TODO: this seems like an over-complicated way to accomplish this task...
+ def filter_out_last(model, j):
+ return j != model.STAGES.last()
+ model.STAGESExceptLast = Set(initialize=model.STAGES, filter=filter_out_last)
+
+
+ # TODO: these aren't in the formulation??
+ #model.STORAGE_TANKS = Set()
+
+
+ ###############
+ # Parameters
+ ###############
+
+ model.HorizonTime = Param()
+ model.Alpha1 = Param()
+ model.Alpha2 = Param()
+ model.Beta1 = Param()
+ model.Beta2 = Param()
+
+ model.ProductionAmount = Param(model.PRODUCTS)
+ model.ProductSizeFactor = Param(model.PRODUCTS, model.STAGES)
+ model.ProcessingTime = Param(model.PRODUCTS, model.STAGES)
+
+ # These are hard-coded in the GAMS file, hence the defaults
+ model.StorageTankSizeFactor = Param(model.STAGES, default=StorageTankSizeFactor)
+ model.StorageTankSizeFactorByProd = Param(model.PRODUCTS, model.STAGES,
+ default=StorageTankSizeFactorByProd)
+
+ # TODO: bonmin wasn't happy and I think it might have something to do with this?
+ # or maybe issues with convexity or a lack thereof... I don't know yet.
+ # I made PRODUCTS ordered so I could do this... Is that bad? And it does index
+ # from 1, right?
+ def get_log_coeffs(model, k):
+ return log(model.PARALLELUNITS.ord(k))
+
+ model.LogCoeffs = Param(model.PARALLELUNITS, initialize=get_log_coeffs)
+
+ # bounds
+ model.volumeLB = Param(model.STAGES, default=VolumeLB)
+ model.volumeUB = Param(model.STAGES, default=VolumeUB)
+ model.storageTankSizeLB = Param(model.STAGES, default=StorageTankSizeLB)
+ model.storageTankSizeUB = Param(model.STAGES, default=StorageTankSizeUB)
+ model.unitsInPhaseUB = Param(model.STAGES, default=UnitsInPhaseUB)
+ model.unitsOutOfPhaseUB = Param(model.STAGES, default=UnitsOutOfPhaseUB)
+
+
+ ################
+ # Variables
+ ################
+
+ # TODO: right now these match the formulation. There are more in GAMS...
+
+ # unit size of stage j
+ # model.volume = Var(model.STAGES)
+ # # TODO: GAMS has a batch size indexed just by products that isn't in the formulation... I'm going
+ # # to try to avoid it for the moment...
+ # # batch size of product i at stage j
+ # model.batchSize = Var(model.PRODUCTS, model.STAGES)
+ # # TODO: this is different in GAMS... They index by stages too?
+ # # cycle time of product i divided by batch size of product i
+ # model.cycleTime = Var(model.PRODUCTS)
+ # # number of units in parallel out-of-phase (or in phase) at stage j
+ # model.unitsOutOfPhase = Var(model.STAGES)
+ # model.unitsInPhase = Var(model.STAGES)
+ # # TODO: what are we going to do as a boundary condition here? For that last stage?
+ # # size of intermediate storage tank between stage j and j+1
+ # model.storageTankSize = Var(model.STAGES)
+
+ # variables for convexified problem
+ # TODO: I am beginning to think these are my only variables actually.
+ # GAMS never un-logs them, I don't think. And I think the GAMs ones
+ # must be the log ones.
+ def get_volume_bounds(model, j):
+ return (model.volumeLB[j], model.volumeUB[j])
+ model.volume_log = Var(model.STAGES, bounds=get_volume_bounds)
+ model.batchSize_log = Var(model.PRODUCTS, model.STAGES)
+ model.cycleTime_log = Var(model.PRODUCTS)
+ def get_unitsOutOfPhase_bounds(model, j):
+ return (0, model.unitsOutOfPhaseUB[j])
+ model.unitsOutOfPhase_log = Var(model.STAGES, bounds=get_unitsOutOfPhase_bounds)
+ def get_unitsInPhase_bounds(model, j):
+ return (0, model.unitsInPhaseUB[j])
+ model.unitsInPhase_log = Var(model.STAGES, bounds=get_unitsInPhase_bounds)
+ def get_storageTankSize_bounds(model, j):
+ return (model.storageTankSizeLB[j], model.storageTankSizeUB[j])
+ # TODO: these bounds make it infeasible...
+ model.storageTankSize_log = Var(model.STAGES, bounds=get_storageTankSize_bounds)
+
+ # binary variables for deciding number of parallel units in and out of phase
+ model.outOfPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary)
+ model.inPhase = Var(model.STAGES, model.PARALLELUNITS, within=Binary)
+
+ ###############
+ # Objective
+ ###############
+
+ def get_cost_rule(model):
+ return model.Alpha1 * sum(exp(model.unitsInPhase_log[j] + model.unitsOutOfPhase_log[j] + \
+ model.Beta1 * model.volume_log[j]) for j in model.STAGES) +\
+ model.Alpha2 * sum(exp(model.Beta2 * model.storageTankSize_log[j]) for j in model.STAGESExceptLast)
+ model.min_cost = Objective(rule=get_cost_rule)
+
+
+ ##############
+ # Constraints
+ ##############
+
+ def processing_capacity_rule(model, j, i):
+ return model.volume_log[j] >= log(model.ProductSizeFactor[i, j]) + model.batchSize_log[i, j] - \
+ model.unitsInPhase_log[j]
+ model.processing_capacity = Constraint(model.STAGES, model.PRODUCTS, rule=processing_capacity_rule)
+
+ def processing_time_rule(model, j, i):
+ return model.cycleTime_log[i] >= log(model.ProcessingTime[i, j]) - model.batchSize_log[i, j] - \
+ model.unitsOutOfPhase_log[j]
+ model.processing_time = Constraint(model.STAGES, model.PRODUCTS, rule=processing_time_rule)
+
+ def finish_in_time_rule(model):
+ return model.HorizonTime >= sum(model.ProductionAmount[i]*exp(model.cycleTime_log[i]) \
+ for i in model.PRODUCTS)
+ model.finish_in_time = Constraint(rule=finish_in_time_rule)
+
+
+ ###############
+ # Disjunctions
+ ###############
+
+ def storage_tank_selection_disjunct_rule(disjunct, selectStorageTank, j):
+ model = disjunct.model()
+ def volume_stage_j_rule(disjunct, i):
+ return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \
+ model.batchSize_log[i, j]
+ def volume_stage_jPlus1_rule(disjunct, i):
+ return model.storageTankSize_log[j] >= log(model.StorageTankSizeFactor[j]) + \
+ model.batchSize_log[i, j+1]
+ def batch_size_rule(disjunct, i):
+ return inequality(-log(model.StorageTankSizeFactorByProd[i,j]),
+ model.batchSize_log[i,j] - model.batchSize_log[i, j+1],
+ log(model.StorageTankSizeFactorByProd[i,j]))
+ def no_batch_rule(disjunct, i):
+ return model.batchSize_log[i,j] - model.batchSize_log[i,j+1] == 0
+
+ if selectStorageTank:
+ disjunct.volume_stage_j = Constraint(model.PRODUCTS, rule=volume_stage_j_rule)
+ disjunct.volume_stage_jPlus1 = Constraint(model.PRODUCTS,
+ rule=volume_stage_jPlus1_rule)
+ disjunct.batch_size = Constraint(model.PRODUCTS, rule=batch_size_rule)
+ else:
+ # The formulation says 0, but GAMS has this constant.
+ # 04/04: Francisco says volume should be free:
+ # disjunct.no_volume = Constraint(expr=model.storageTankSize_log[j] == MinFlow)
+ disjunct.no_batch = Constraint(model.PRODUCTS, rule=no_batch_rule)
+ model.storage_tank_selection_disjunct = Disjunct([0,1], model.STAGESExceptLast,
+ rule=storage_tank_selection_disjunct_rule)
+
+ def select_storage_tanks_rule(model, j):
+ return [model.storage_tank_selection_disjunct[selectTank, j] for selectTank in [0,1]]
+ model.select_storage_tanks = Disjunction(model.STAGESExceptLast, rule=select_storage_tanks_rule)
+
+ # though this is a disjunction in the GAMs model, it is more efficiently formulated this way:
+ # TODO: what on earth is k?
+ def units_out_of_phase_rule(model, j):
+ return model.unitsOutOfPhase_log[j] == sum(model.LogCoeffs[k] * model.outOfPhase[j,k] \
+ for k in model.PARALLELUNITS)
+ model.units_out_of_phase = Constraint(model.STAGES, rule=units_out_of_phase_rule)
+
+ def units_in_phase_rule(model, j):
+ return model.unitsInPhase_log[j] == sum(model.LogCoeffs[k] * model.inPhase[j,k] \
+ for k in model.PARALLELUNITS)
+ model.units_in_phase = Constraint(model.STAGES, rule=units_in_phase_rule)
+
+ # and since I didn't do the disjunction as a disjunction, we need the XORs:
+ def units_out_of_phase_xor_rule(model, j):
+ return sum(model.outOfPhase[j,k] for k in model.PARALLELUNITS) == 1
+ model.units_out_of_phase_xor = Constraint(model.STAGES, rule=units_out_of_phase_xor_rule)
+
+ def units_in_phase_xor_rule(model, j):
+ return sum(model.inPhase[j,k] for k in model.PARALLELUNITS) == 1
+ model.units_in_phase_xor = Constraint(model.STAGES, rule=units_in_phase_xor_rule)
+
+ return model
+
+
+if __name__ == "__main__":
+ m = build_model().create_instance('batchProcessing1.dat')
+ TransformationFactory('gdp.bigm').apply_to(m)
+ SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;'])
+ m.min_cost.display()
diff --git a/examples/gdp/data_set.py b/examples/gdp/data_set.py
deleted file mode 100644
index c338f915023..00000000000
--- a/examples/gdp/data_set.py
+++ /dev/null
@@ -1,21 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-
-#def data():
-
-pop = [ 15.881351, 15.881339, 15.881320, 15.881294, 15.881261, 15.881223, 15.881180, 15.881132, 15.881079, 15.881022, 15.880961, 15.880898, 15.880832, 15.880764, 15.880695, 15.880624, 15.880553, 15.880480, 15.880409, 15.880340, 15.880270, 15.880203, 15.880138, 15.880076, 15.880016, 15.879960, 15.879907, 15.879852, 15.879799, 15.879746, 15.879693, 15.879638, 15.879585, 15.879531, 15.879477, 15.879423, 15.879370, 15.879315, 15.879262, 15.879209, 15.879155, 15.879101, 15.879048, 15.878994, 15.878940, 15.878886, 15.878833, 15.878778, 15.878725, 15.878672, 15.878618, 15.878564, 15.878510, 15.878457, 15.878402, 15.878349, 15.878295, 15.878242, 15.878187, 15.878134, 15.878081, 15.878026, 15.877973, 15.877919, 15.877864, 15.877811, 15.877758, 15.877704, 15.877650, 15.877596, 15.877543, 15.877488, 15.877435, 15.877381, 15.877326, 15.877273, 15.877220, 15.877166, 15.877111, 15.877058, 15.877005, 15.876950, 15.876896, 15.876843, 15.876789, 15.876735, 15.876681, 15.876628, 15.876573, 15.876520, 15.876466, 15.876411, 15.876358, 15.876304, 15.876251, 15.876196, 15.876143, 15.876089, 15.876034, 15.875981, 15.875927, 15.875872, 15.875819, 15.875765, 15.875712, 15.875657, 15.875604, 15.875550, 15.875495, 15.875442, 15.875388, 15.875335, 15.875280, 15.875226, 15.875173, 15.875118, 15.875064, 15.875011, 15.874956, 15.874902, 15.874849, 15.874795, 15.874740, 15.874687, 15.874633, 15.874578, 15.874525, 15.874471, 15.874416, 15.874363, 15.874309, 15.874256, 15.874201, 15.874147, 15.874094, 15.874039, 15.873985, 15.873931, 15.873878, 15.873823, 15.873769, 15.873716, 15.873661, 15.873607, 15.873554, 15.873499, 15.873445, 15.873391, 15.873338, 15.873283, 15.873229, 15.873175, 15.873121, 15.873067, 15.873013, 15.872960, 15.872905, 15.872851, 15.872797, 15.872742, 15.872689, 15.872635, 15.872580, 15.872526, 15.872473, 15.872419, 15.872364, 15.872310, 15.872256, 15.872202, 15.872148, 15.872094, 15.872039, 15.871985, 15.871932, 15.871878, 15.871823, 15.871769, 15.871715, 15.871660, 15.871607, 15.871553, 15.871499, 15.871444, 15.871390, 15.871337, 15.871282, 15.871228, 15.871174, 15.871119, 15.871065, 15.871012, 15.870958, 15.870903, 15.870849, 15.870795, 15.870740, 15.870686, 15.870633, 15.870577, 15.870524, 15.870470, 15.870416, 15.870361, 15.870307, 15.870253, 15.870198, 15.870144, 15.870091, 15.870037, 15.869982, 15.869928, 15.869874, 15.869819, 15.869765, 15.869711, 15.869656, 15.869602, 15.869548, 15.869495, 15.869439, 15.869386, 15.869332, 15.869277, 15.869223, 15.869169, 15.869114, 15.869060, 15.869006, 15.868952, 15.868897, 15.868843, 15.868789, 15.868734, 15.868679, 15.868618, 15.868556, 15.868489, 15.868421, 15.868351, 15.868280, 15.868208, 15.868134, 15.868063, 15.867991, 15.867921, 15.867852, 15.867785, 15.867721, 15.867659, 15.867601, 15.867549, 15.867499, 15.867455, 15.867416, 15.867383, 15.867357, 15.867338, 15.867327, 15.867321, 15.867327, 15.867338, 15.867359, 15.867386, 15.867419, 15.867459, 15.867505, 15.867555, 15.867610, 15.867671, 15.867734, 15.867801, 15.867869, 15.867941, 15.868012, 15.868087, 15.868161, 15.868236, 15.868310, 15.868384, 15.868457, 15.868527, 15.868595, 15.868661, 15.868722, 15.868780, 15.868837, 15.868892, 15.868948, 15.869005, 15.869061, 15.869116, 15.869173, 15.869229, 15.869284, 15.869341, 15.869397, 15.869452, 15.869509, 15.869565, 15.869620, 15.869677, 15.869733, 15.869788, 15.869845, 15.869901, 15.869956, 15.870012, 15.870069, 15.870124, 15.870180, 15.870237, 15.870292, 15.870348, 15.870405, 15.870461, 15.870516, 15.870572, 15.870629, 15.870684, 15.870740, 15.870796, 15.870851, 15.870908, 15.870964, 15.871019, 15.871076, 15.871132, 15.871187, 15.871243, 15.871300, 15.871355, 15.871411, 15.871467, 15.871522, 15.871579, 15.871635, 15.871691, 15.871746, 15.871802, 15.871859, 15.871914, 15.871970, 15.872026, 15.872081, 15.872138, 15.872194, 15.872249, 15.872305, 15.872361, 15.872416, 15.872473, 15.872529, 15.872584, 15.872640, 15.872696, 15.872751, 15.872807, 15.872864, 15.872919, 15.872975, 15.873031, 15.873087, 15.873142, 15.873198, 15.873255, 15.873310, 15.873366, 15.873422, 15.873477, 15.873533, 15.873589, 15.873644, 15.873700, 15.873757, 15.873811, 15.873868, 15.873924, 15.873979, 15.874035, 15.874091, 15.874146, 15.874202, 15.874258, 15.874313, 15.874369, 15.874425, 15.874481, 15.874536, 15.874592]
-
-logIstar = [7.943245, 8.269994, 8.517212, 8.814208, 9.151740, 9.478472, 9.559847, 9.664087, 9.735378, 9.852583, 9.692265, 9.498807, 9.097634, 8.388878, 7.870516, 7.012956, 6.484941, 5.825368, 5.346815, 5.548361, 5.706732, 5.712617, 5.709714, 5.696888, 5.530087, 5.826563, 6.643563, 7.004292, 7.044663, 7.190259, 7.335926, 7.516861, 7.831779, 8.188895, 8.450204, 8.801436, 8.818379, 8.787658, 8.601685, 8.258338, 7.943364, 7.425585, 7.062834, 6.658307, 6.339600, 6.526984, 6.679178, 6.988758, 7.367331, 7.746694, 8.260558, 8.676522, 9.235582, 9.607778, 9.841917, 10.081571, 10.216090, 10.350366, 10.289668, 10.248842, 10.039504, 9.846343, 9.510392, 9.190923, 8.662465, 7.743221, 7.128458, 5.967898, 5.373883, 5.097497, 4.836570, 5.203345, 5.544798, 5.443047, 5.181152, 5.508669, 6.144130, 6.413744, 6.610423, 6.748885, 6.729511, 6.789841, 6.941034, 7.093516, 7.307039, 7.541077, 7.644803, 7.769145, 7.760187, 7.708017, 7.656795, 7.664983, 7.483828, 6.887324, 6.551093, 6.457449, 6.346064, 6.486300, 6.612378, 6.778753, 6.909477, 7.360570, 8.150303, 8.549044, 8.897572, 9.239323, 9.538751, 9.876531, 10.260911, 10.613536, 10.621510, 10.661115, 10.392899, 10.065536, 9.920090, 9.933097, 9.561691, 8.807713, 8.263463, 7.252184, 6.669083, 5.877763, 5.331878, 5.356563, 5.328469, 5.631146, 6.027497, 6.250717, 6.453919, 6.718444, 7.071636, 7.348905, 7.531528, 7.798226, 8.197941, 8.578809, 8.722964, 8.901152, 8.904370, 8.889865, 8.881902, 8.958903, 8.721281, 8.211509, 7.810624, 7.164607, 6.733688, 6.268503, 5.905983, 5.900432, 5.846547, 6.245427, 6.786271, 7.088480, 7.474295, 7.650063, 7.636703, 7.830990, 8.231516, 8.584816, 8.886908, 9.225216, 9.472778, 9.765505, 9.928623, 10.153033, 10.048574, 9.892620, 9.538818, 8.896100, 8.437584, 7.819738, 7.362598, 6.505880, 5.914972, 6.264584, 6.555019, 6.589319, 6.552029, 6.809771, 7.187616, 7.513918, 8.017712, 8.224957, 8.084474, 8.079148, 8.180991, 8.274269, 8.413748, 8.559599, 8.756090, 9.017927, 9.032720, 9.047983, 8.826873, 8.366489, 8.011876, 7.500830, 7.140406, 6.812626, 6.538719, 6.552218, 6.540129, 6.659927, 6.728530, 7.179692, 7.989210, 8.399173, 8.781128, 9.122303, 9.396378, 9.698512, 9.990104, 10.276543, 10.357284, 10.465869, 10.253833, 10.018503, 9.738407, 9.484367, 9.087025, 8.526409, 8.041126, 7.147168, 6.626706, 6.209446, 5.867231, 5.697439, 5.536769, 5.421413, 5.238297, 5.470136, 5.863007, 6.183083, 6.603569, 6.906278, 7.092324, 7.326612, 7.576052, 7.823430, 7.922775, 8.041677, 8.063403, 8.073229, 8.099726, 8.168522, 8.099041, 8.011404, 7.753147, 6.945211, 6.524244, 6.557723, 6.497742, 6.256247, 5.988794, 6.268093, 6.583316, 7.106842, 8.053929, 8.508237, 8.938915, 9.311863, 9.619753, 9.931745, 10.182361, 10.420978, 10.390829, 10.389230, 10.079342, 9.741479, 9.444561, 9.237448, 8.777687, 7.976436, 7.451502, 6.742856, 6.271545, 5.782289, 5.403089, 5.341954, 5.243509, 5.522993, 5.897001, 6.047042, 6.100738, 6.361727, 6.849562, 7.112544, 7.185346, 7.309412, 7.423746, 7.532142, 7.510318, 7.480175, 7.726362, 8.061117, 8.127072, 8.206166, 8.029634, 7.592953, 7.304869, 7.005394, 6.750019, 6.461377, 6.226432, 6.287047, 6.306452, 6.783694, 7.450957, 7.861692, 8.441530, 8.739626, 8.921994, 9.168961, 9.428077, 9.711664, 10.032714, 10.349937, 10.483985, 10.647475, 10.574038, 10.522431, 10.192246, 9.756246, 9.342511, 8.872072, 8.414189, 7.606582, 7.084701, 6.149903, 5.517257, 5.839429, 6.098090, 6.268935, 6.475965, 6.560543, 6.598942, 6.693938, 6.802531, 6.934345, 7.078370, 7.267736, 7.569640, 7.872204, 8.083603, 8.331226, 8.527144, 8.773523, 8.836599, 8.894303, 8.808326, 8.641717, 8.397901, 7.849034, 7.482899, 7.050252, 6.714103, 6.900603, 7.050765, 7.322905, 7.637986, 8.024340, 8.614505, 8.933591, 9.244008, 9.427410, 9.401385, 9.457744, 9.585068, 9.699673, 9.785478, 9.884559, 9.769732, 9.655075, 9.423071, 9.210198, 8.786654, 8.061787, 7.560976, 6.855829, 6.390707, 5.904006, 5.526631, 5.712303, 5.867027, 5.768367, 5.523352, 5.909118, 6.745543, 6.859218 ]
-
-deltaS = [ 9916.490263 ,12014.263380 ,13019.275755 ,12296.373612 ,8870.995603 ,1797.354574 ,-6392.880771 ,-16150.825387 ,-27083.245106 ,-40130.421462 ,-50377.169958 ,-57787.717468 ,-60797.223427 ,-59274.041897 ,-55970.213230 ,-51154.650927 ,-45877.841034 ,-40278.553775 ,-34543.967175 ,-28849.633641 ,-23192.776605 ,-17531.130740 ,-11862.021829 ,-6182.456792 ,-450.481090 ,5201.184400 ,10450.773882 ,15373.018272 ,20255.699431 ,24964.431669 ,29470.745887 ,33678.079947 ,37209.808930 ,39664.432393 ,41046.735479 ,40462.982011 ,39765.070209 ,39270.815830 ,39888.077002 ,42087.276604 ,45332.012929 ,49719.128772 ,54622.190928 ,59919.718626 ,65436.341097 ,70842.911460 ,76143.747430 ,81162.358574 ,85688.102884 ,89488.917734 ,91740.108470 ,91998.787916 ,87875.986012 ,79123.877908 ,66435.611045 ,48639.250610 ,27380.282817 ,2166.538464 ,-21236.428084 ,-43490.803535 ,-60436.624080 ,-73378.401966 ,-80946.278268 ,-84831.969493 ,-84696.627286 ,-81085.365407 ,-76410.847049 ,-70874.415387 ,-65156.276464 ,-59379.086883 ,-53557.267619 ,-47784.164830 ,-42078.001172 ,-36340.061427 ,-30541.788202 ,-24805.281435 ,-19280.817165 ,-13893.690606 ,-8444.172221 ,-3098.160839 ,2270.908649 ,7594.679295 ,12780.079247 ,17801.722109 ,22543.091206 ,26897.369814 ,31051.285734 ,34933.809557 ,38842.402859 ,42875.230152 ,47024.395356 ,51161.516122 ,55657.298307 ,60958.155424 ,66545.635029 ,72202.930397 ,77934.761905 ,83588.207792 ,89160.874522 ,94606.115027 ,99935.754968 ,104701.404975 ,107581.670606 ,108768.440311 ,107905.700480 ,104062.148863 ,96620.281684 ,83588.443029 ,61415.088182 ,27124.031692 ,-7537.285321 ,-43900.451653 ,-70274.062783 ,-87573.481475 ,-101712.148408 ,-116135.719087 ,-124187.225446 ,-124725.278371 ,-122458.145590 ,-117719.918256 ,-112352.138605 ,-106546.806030 ,-100583.803012 ,-94618.253238 ,-88639.090897 ,-82725.009842 ,-76938.910669 ,-71248.957807 ,-65668.352795 ,-60272.761991 ,-55179.538428 ,-50456.021161 ,-46037.728058 ,-42183.912670 ,-39522.184006 ,-38541.255303 ,-38383.665728 ,-39423.998130 ,-40489.466130 ,-41450.406768 ,-42355.156592 ,-43837.562085 ,-43677.262972 ,-41067.896944 ,-37238.628465 ,-32230.392026 ,-26762.766062 ,-20975.163308 ,-15019.218554 ,-9053.105545 ,-3059.663132 ,2772.399618 ,8242.538397 ,13407.752291 ,18016.047539 ,22292.125752 ,26616.583347 ,30502.564253 ,33153.890890 ,34216.684448 ,33394.220786 ,29657.417791 ,23064.375405 ,12040.831532 ,-2084.921068 ,-21390.235970 ,-38176.615985 ,-51647.714482 ,-59242.564959 ,-60263.150854 ,-58599.245165 ,-54804.972560 ,-50092.112608 ,-44465.812552 ,-38533.096297 ,-32747.104307 ,-27130.082610 ,-21529.632955 ,-15894.611939 ,-10457.566933 ,-5429.042583 ,-903.757828 ,2481.947589 ,5173.789976 ,8358.768202 ,11565.584635 ,14431.147931 ,16951.619820 ,18888.807708 ,20120.884465 ,20222.141242 ,18423.168124 ,16498.668271 ,14442.624242 ,14070.038273 ,16211.370808 ,19639.815904 ,24280.360465 ,29475.380079 ,35030.793540 ,40812.325095 ,46593.082382 ,52390.906885 ,58109.310860 ,63780.896094 ,68984.456561 ,72559.442320 ,74645.487900 ,74695.219755 ,72098.143876 ,66609.929889 ,56864.971296 ,41589.295266 ,19057.032104 ,-5951.329863 ,-34608.796853 ,-56603.801584 ,-72678.838057 ,-83297.070856 ,-90127.593511 ,-92656.040614 ,-91394.995510 ,-88192.056842 ,-83148.833075 ,-77582.587173 ,-71750.440823 ,-65765.369857 ,-59716.101820 ,-53613.430067 ,-47473.832358 ,-41287.031890 ,-35139.919259 ,-29097.671507 ,-23178.836760 ,-17486.807388 ,-12046.775779 ,-6802.483422 ,-1867.556171 ,2644.380534 ,6615.829501 ,10332.557518 ,13706.737038 ,17017.991307 ,20303.136670 ,23507.386461 ,26482.194102 ,29698.585356 ,33196.305757 ,37385.914179 ,42872.996212 ,48725.617879 ,54564.488527 ,60453.841604 ,66495.146265 ,72668.620416 ,78723.644870 ,84593.136677 ,89974.936239 ,93439.798630 ,95101.207834 ,94028.126381 ,89507.925620 ,80989.846001 ,66944.274744 ,47016.422041 ,19932.783790 ,-6198.433172 ,-32320.379400 ,-49822.852084 ,-60517.553414 ,-66860.548269 ,-70849.714105 ,-71058.721556 ,-67691.947812 ,-63130.703822 ,-57687.607311 ,-51916.952488 ,-45932.054982 ,-39834.909941 ,-33714.535713 ,-27564.443333 ,-21465.186188 ,-15469.326408 ,-9522.358787 ,-3588.742161 ,2221.802073 ,7758.244339 ,13020.269708 ,18198.562827 ,23211.338588 ,28051.699645 ,32708.577247 ,37413.795242 ,42181.401920 ,46462.499633 ,49849.582315 ,53026.578940 ,55930.600705 ,59432.642178 ,64027.356857 ,69126.843653 ,74620.328837 ,80372.056070 ,86348.152766 ,92468.907239 ,98568.998246 ,104669.511588 ,110445.790143 ,115394.348973 ,119477.553152 ,121528.574511 ,121973.674087 ,121048.017786 ,118021.473181 ,112151.993711 ,102195.999157 ,85972.731130 ,61224.719621 ,31949.279603 ,-3726.022971 ,-36485.298619 ,-67336.469799 ,-87799.366129 ,-98865.713558 ,-104103.651120 ,-105068.402300 ,-103415.820781 ,-99261.356633 ,-94281.850081 ,-88568.701325 ,-82625.711921 ,-76766.776770 ,-70998.803524 ,-65303.404499 ,-59719.198305 ,-54182.230439 ,-48662.904657 ,-43206.731668 ,-37732.701095 ,-32375.478519 ,-27167.508567 ,-22197.211891 ,-17722.869502 ,-13925.135219 ,-10737.893027 ,-8455.327914 ,-7067.008358 ,-7086.991191 ,-7527.693561 ,-8378.025732 ,-8629.383998 ,-7854.586079 ,-5853.040657 ,-1973.225485 ,2699.850783 ,8006.098287 ,13651.734934 ,19139.318072 ,24476.645420 ,29463.480336 ,33899.078820 ,37364.528796 ,38380.214949 ,37326.585649 ,33428.470616 ,27441.000494 ,21761.126583 ,15368.408081 ,7224.234078 ,-2702.217396 ,-14109.682505 ,-27390.915614 ,-38569.562393 ,-47875.155339 ,-53969.121872 ,-57703.473001 ,-57993.198171 ,-54908.391840 ,-50568.410328 ,-45247.622563 ,-39563.224328 ,-33637.786521 ,-27585.345413 ,-21572.074797 ,-15597.363909 ,-9577.429076 ,-3475.770622 ,2520.378408 ,8046.881775 ,13482.345595 ]
-
-beta_set = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]
-
-# return beta_set, deltaS, logIstar, pop
diff --git a/examples/gdp/disease_model.py b/examples/gdp/disease_model.py
index 7d47f5a3182..8695c850023 100644
--- a/examples/gdp/disease_model.py
+++ b/examples/gdp/disease_model.py
@@ -2,8 +2,8 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
@@ -16,102 +16,112 @@
# ============================================
# import packages
-from pyomo.core import *
+from pyomo.environ import *
from pyomo.gdp import *
import math
-# import data
-from data_set import *
-#from new_data_set import *
-
-# declare model name
-model = AbstractModel()
-
-# declare constants
-bpy = 26 # biweeks per year
-years = 15 # years of data
-bigM = 50.0 # big M for disjunction constraints
-
-# declare sets
-model.S_meas = RangeSet(1,bpy*years)
-model.S_meas_small = RangeSet(1,bpy*years-1)
-model.S_beta = RangeSet(1,bpy)
-
-# define variable bounds
-def _gt_zero(m,i):
- return (0.0,1e7)
-def _beta_bounds(m):
- return (None,5.0)
-
-# define variables
-
-# log of estimated cases
-#model.logI = Var(model.S_meas, bounds=_gt_zero)
-model.logI = Var(model.S_meas, bounds=(0.001,1e7))
-# log of transmission parameter beta
-#model.logbeta = Var(model.S_beta, bounds=_gt_zero)
-model.logbeta = Var(model.S_beta, bounds=(0.0001,5))
-# binary variable y over all betas
-#model.y = Var(model.S_beta, within=Binary)
-# low value of beta
-#model.logbeta_low = Var(bounds=_beta_bounds)
-model.logbeta_low = Var(bounds=(0.0001,5))
-# high value of beta
-#model.logbeta_high = Var(bounds=_beta_bounds)
-model.logbeta_high = Var(bounds=(0.0001,5))
-# dummy variables
-model.p = Var(model.S_meas, bounds=_gt_zero)
-model.n = Var(model.S_meas, bounds=_gt_zero)
-
-# define indexed constants
-
-# log of measured cases after adjusting for underreporting
-logIstar = logIstar
-# changes in susceptible population profile from susceptible reconstruction
-deltaS = deltaS
-# mean susceptibles
-#meanS = 1.04e6
-meanS = 8.65e5
-# log of measured population
-logN = pop
-# define index for beta over all measurements
-beta_set = beta_set
-
-# define objective
-def _obj_rule(m):
- expr = sum(m.p[i] + m.n[i] for i in m.S_meas)
- return expr
-model.obj = Objective(rule=_obj_rule, sense=minimize)
-
-# define constraints
-def _logSIR(m,i):
- expr = m.logI[i+1] - ( m.logbeta[beta_set[i-1]] + m.logI[i] + math.log(deltaS[i-1] + meanS) - logN[i-1] )
- return (0.0, expr)
-model.logSIR = Constraint(model.S_meas_small, rule=_logSIR)
-
-# objective function constraint
-def _p_n_const(m,i):
- expr = logIstar[i-1] - m.logI[i] - m.p[i] + m.n[i]
- return (0.0, expr)
-model.p_n_const = Constraint(model.S_meas,rule=_p_n_const)
-
-# disjuncts
-
-model.BigM = Suffix()
-model.y = RangeSet(0,1)
-def _high_low(disjunct, i, y):
- model = disjunct.model()
- if y:
- disjunct.c = Constraint(expr=model.logbeta_high - model.logbeta[i]== 0.0)
- else:
- disjunct.c = Constraint(expr=model.logbeta[i] - model.logbeta_low == 0.0)
- model.BigM[disjunct.c] = bigM
-model.high_low = Disjunct(model.S_beta, model.y, rule=_high_low)
-
-# disjunctions
-def _disj(model, i):
- return [model.high_low[i,j] for j in model.y]
-model.disj = Disjunction(model.S_beta, rule=_disj)
+def build_model():
+ # import data
+ pop = [ 15.881351, 15.881339, 15.881320, 15.881294, 15.881261, 15.881223, 15.881180, 15.881132, 15.881079, 15.881022, 15.880961, 15.880898, 15.880832, 15.880764, 15.880695, 15.880624, 15.880553, 15.880480, 15.880409, 15.880340, 15.880270, 15.880203, 15.880138, 15.880076, 15.880016, 15.879960, 15.879907, 15.879852, 15.879799, 15.879746, 15.879693, 15.879638, 15.879585, 15.879531, 15.879477, 15.879423, 15.879370, 15.879315, 15.879262, 15.879209, 15.879155, 15.879101, 15.879048, 15.878994, 15.878940, 15.878886, 15.878833, 15.878778, 15.878725, 15.878672, 15.878618, 15.878564, 15.878510, 15.878457, 15.878402, 15.878349, 15.878295, 15.878242, 15.878187, 15.878134, 15.878081, 15.878026, 15.877973, 15.877919, 15.877864, 15.877811, 15.877758, 15.877704, 15.877650, 15.877596, 15.877543, 15.877488, 15.877435, 15.877381, 15.877326, 15.877273, 15.877220, 15.877166, 15.877111, 15.877058, 15.877005, 15.876950, 15.876896, 15.876843, 15.876789, 15.876735, 15.876681, 15.876628, 15.876573, 15.876520, 15.876466, 15.876411, 15.876358, 15.876304, 15.876251, 15.876196, 15.876143, 15.876089, 15.876034, 15.875981, 15.875927, 15.875872, 15.875819, 15.875765, 15.875712, 15.875657, 15.875604, 15.875550, 15.875495, 15.875442, 15.875388, 15.875335, 15.875280, 15.875226, 15.875173, 15.875118, 15.875064, 15.875011, 15.874956, 15.874902, 15.874849, 15.874795, 15.874740, 15.874687, 15.874633, 15.874578, 15.874525, 15.874471, 15.874416, 15.874363, 15.874309, 15.874256, 15.874201, 15.874147, 15.874094, 15.874039, 15.873985, 15.873931, 15.873878, 15.873823, 15.873769, 15.873716, 15.873661, 15.873607, 15.873554, 15.873499, 15.873445, 15.873391, 15.873338, 15.873283, 15.873229, 15.873175, 15.873121, 15.873067, 15.873013, 15.872960, 15.872905, 15.872851, 15.872797, 15.872742, 15.872689, 15.872635, 15.872580, 15.872526, 15.872473, 15.872419, 15.872364, 15.872310, 15.872256, 15.872202, 15.872148, 15.872094, 15.872039, 15.871985, 15.871932, 15.871878, 15.871823, 15.871769, 15.871715, 15.871660, 15.871607, 15.871553, 15.871499, 15.871444, 15.871390, 15.871337, 15.871282, 15.871228, 15.871174, 15.871119, 15.871065, 15.871012, 15.870958, 15.870903, 15.870849, 15.870795, 15.870740, 15.870686, 15.870633, 15.870577, 15.870524, 15.870470, 15.870416, 15.870361, 15.870307, 15.870253, 15.870198, 15.870144, 15.870091, 15.870037, 15.869982, 15.869928, 15.869874, 15.869819, 15.869765, 15.869711, 15.869656, 15.869602, 15.869548, 15.869495, 15.869439, 15.869386, 15.869332, 15.869277, 15.869223, 15.869169, 15.869114, 15.869060, 15.869006, 15.868952, 15.868897, 15.868843, 15.868789, 15.868734, 15.868679, 15.868618, 15.868556, 15.868489, 15.868421, 15.868351, 15.868280, 15.868208, 15.868134, 15.868063, 15.867991, 15.867921, 15.867852, 15.867785, 15.867721, 15.867659, 15.867601, 15.867549, 15.867499, 15.867455, 15.867416, 15.867383, 15.867357, 15.867338, 15.867327, 15.867321, 15.867327, 15.867338, 15.867359, 15.867386, 15.867419, 15.867459, 15.867505, 15.867555, 15.867610, 15.867671, 15.867734, 15.867801, 15.867869, 15.867941, 15.868012, 15.868087, 15.868161, 15.868236, 15.868310, 15.868384, 15.868457, 15.868527, 15.868595, 15.868661, 15.868722, 15.868780, 15.868837, 15.868892, 15.868948, 15.869005, 15.869061, 15.869116, 15.869173, 15.869229, 15.869284, 15.869341, 15.869397, 15.869452, 15.869509, 15.869565, 15.869620, 15.869677, 15.869733, 15.869788, 15.869845, 15.869901, 15.869956, 15.870012, 15.870069, 15.870124, 15.870180, 15.870237, 15.870292, 15.870348, 15.870405, 15.870461, 15.870516, 15.870572, 15.870629, 15.870684, 15.870740, 15.870796, 15.870851, 15.870908, 15.870964, 15.871019, 15.871076, 15.871132, 15.871187, 15.871243, 15.871300, 15.871355, 15.871411, 15.871467, 15.871522, 15.871579, 15.871635, 15.871691, 15.871746, 15.871802, 15.871859, 15.871914, 15.871970, 15.872026, 15.872081, 15.872138, 15.872194, 15.872249, 15.872305, 15.872361, 15.872416, 15.872473, 15.872529, 15.872584, 15.872640, 15.872696, 15.872751, 15.872807, 15.872864, 15.872919, 15.872975, 15.873031, 15.873087, 15.873142, 15.873198, 15.873255, 15.873310, 15.873366, 15.873422, 15.873477, 15.873533, 15.873589, 15.873644, 15.873700, 15.873757, 15.873811, 15.873868, 15.873924, 15.873979, 15.874035, 15.874091, 15.874146, 15.874202, 15.874258, 15.874313, 15.874369, 15.874425, 15.874481, 15.874536, 15.874592]
+
+ logIstar = [7.943245, 8.269994, 8.517212, 8.814208, 9.151740, 9.478472, 9.559847, 9.664087, 9.735378, 9.852583, 9.692265, 9.498807, 9.097634, 8.388878, 7.870516, 7.012956, 6.484941, 5.825368, 5.346815, 5.548361, 5.706732, 5.712617, 5.709714, 5.696888, 5.530087, 5.826563, 6.643563, 7.004292, 7.044663, 7.190259, 7.335926, 7.516861, 7.831779, 8.188895, 8.450204, 8.801436, 8.818379, 8.787658, 8.601685, 8.258338, 7.943364, 7.425585, 7.062834, 6.658307, 6.339600, 6.526984, 6.679178, 6.988758, 7.367331, 7.746694, 8.260558, 8.676522, 9.235582, 9.607778, 9.841917, 10.081571, 10.216090, 10.350366, 10.289668, 10.248842, 10.039504, 9.846343, 9.510392, 9.190923, 8.662465, 7.743221, 7.128458, 5.967898, 5.373883, 5.097497, 4.836570, 5.203345, 5.544798, 5.443047, 5.181152, 5.508669, 6.144130, 6.413744, 6.610423, 6.748885, 6.729511, 6.789841, 6.941034, 7.093516, 7.307039, 7.541077, 7.644803, 7.769145, 7.760187, 7.708017, 7.656795, 7.664983, 7.483828, 6.887324, 6.551093, 6.457449, 6.346064, 6.486300, 6.612378, 6.778753, 6.909477, 7.360570, 8.150303, 8.549044, 8.897572, 9.239323, 9.538751, 9.876531, 10.260911, 10.613536, 10.621510, 10.661115, 10.392899, 10.065536, 9.920090, 9.933097, 9.561691, 8.807713, 8.263463, 7.252184, 6.669083, 5.877763, 5.331878, 5.356563, 5.328469, 5.631146, 6.027497, 6.250717, 6.453919, 6.718444, 7.071636, 7.348905, 7.531528, 7.798226, 8.197941, 8.578809, 8.722964, 8.901152, 8.904370, 8.889865, 8.881902, 8.958903, 8.721281, 8.211509, 7.810624, 7.164607, 6.733688, 6.268503, 5.905983, 5.900432, 5.846547, 6.245427, 6.786271, 7.088480, 7.474295, 7.650063, 7.636703, 7.830990, 8.231516, 8.584816, 8.886908, 9.225216, 9.472778, 9.765505, 9.928623, 10.153033, 10.048574, 9.892620, 9.538818, 8.896100, 8.437584, 7.819738, 7.362598, 6.505880, 5.914972, 6.264584, 6.555019, 6.589319, 6.552029, 6.809771, 7.187616, 7.513918, 8.017712, 8.224957, 8.084474, 8.079148, 8.180991, 8.274269, 8.413748, 8.559599, 8.756090, 9.017927, 9.032720, 9.047983, 8.826873, 8.366489, 8.011876, 7.500830, 7.140406, 6.812626, 6.538719, 6.552218, 6.540129, 6.659927, 6.728530, 7.179692, 7.989210, 8.399173, 8.781128, 9.122303, 9.396378, 9.698512, 9.990104, 10.276543, 10.357284, 10.465869, 10.253833, 10.018503, 9.738407, 9.484367, 9.087025, 8.526409, 8.041126, 7.147168, 6.626706, 6.209446, 5.867231, 5.697439, 5.536769, 5.421413, 5.238297, 5.470136, 5.863007, 6.183083, 6.603569, 6.906278, 7.092324, 7.326612, 7.576052, 7.823430, 7.922775, 8.041677, 8.063403, 8.073229, 8.099726, 8.168522, 8.099041, 8.011404, 7.753147, 6.945211, 6.524244, 6.557723, 6.497742, 6.256247, 5.988794, 6.268093, 6.583316, 7.106842, 8.053929, 8.508237, 8.938915, 9.311863, 9.619753, 9.931745, 10.182361, 10.420978, 10.390829, 10.389230, 10.079342, 9.741479, 9.444561, 9.237448, 8.777687, 7.976436, 7.451502, 6.742856, 6.271545, 5.782289, 5.403089, 5.341954, 5.243509, 5.522993, 5.897001, 6.047042, 6.100738, 6.361727, 6.849562, 7.112544, 7.185346, 7.309412, 7.423746, 7.532142, 7.510318, 7.480175, 7.726362, 8.061117, 8.127072, 8.206166, 8.029634, 7.592953, 7.304869, 7.005394, 6.750019, 6.461377, 6.226432, 6.287047, 6.306452, 6.783694, 7.450957, 7.861692, 8.441530, 8.739626, 8.921994, 9.168961, 9.428077, 9.711664, 10.032714, 10.349937, 10.483985, 10.647475, 10.574038, 10.522431, 10.192246, 9.756246, 9.342511, 8.872072, 8.414189, 7.606582, 7.084701, 6.149903, 5.517257, 5.839429, 6.098090, 6.268935, 6.475965, 6.560543, 6.598942, 6.693938, 6.802531, 6.934345, 7.078370, 7.267736, 7.569640, 7.872204, 8.083603, 8.331226, 8.527144, 8.773523, 8.836599, 8.894303, 8.808326, 8.641717, 8.397901, 7.849034, 7.482899, 7.050252, 6.714103, 6.900603, 7.050765, 7.322905, 7.637986, 8.024340, 8.614505, 8.933591, 9.244008, 9.427410, 9.401385, 9.457744, 9.585068, 9.699673, 9.785478, 9.884559, 9.769732, 9.655075, 9.423071, 9.210198, 8.786654, 8.061787, 7.560976, 6.855829, 6.390707, 5.904006, 5.526631, 5.712303, 5.867027, 5.768367, 5.523352, 5.909118, 6.745543, 6.859218 ]
+
+ deltaS = [ 9916.490263 ,12014.263380 ,13019.275755 ,12296.373612 ,8870.995603 ,1797.354574 ,-6392.880771 ,-16150.825387 ,-27083.245106 ,-40130.421462 ,-50377.169958 ,-57787.717468 ,-60797.223427 ,-59274.041897 ,-55970.213230 ,-51154.650927 ,-45877.841034 ,-40278.553775 ,-34543.967175 ,-28849.633641 ,-23192.776605 ,-17531.130740 ,-11862.021829 ,-6182.456792 ,-450.481090 ,5201.184400 ,10450.773882 ,15373.018272 ,20255.699431 ,24964.431669 ,29470.745887 ,33678.079947 ,37209.808930 ,39664.432393 ,41046.735479 ,40462.982011 ,39765.070209 ,39270.815830 ,39888.077002 ,42087.276604 ,45332.012929 ,49719.128772 ,54622.190928 ,59919.718626 ,65436.341097 ,70842.911460 ,76143.747430 ,81162.358574 ,85688.102884 ,89488.917734 ,91740.108470 ,91998.787916 ,87875.986012 ,79123.877908 ,66435.611045 ,48639.250610 ,27380.282817 ,2166.538464 ,-21236.428084 ,-43490.803535 ,-60436.624080 ,-73378.401966 ,-80946.278268 ,-84831.969493 ,-84696.627286 ,-81085.365407 ,-76410.847049 ,-70874.415387 ,-65156.276464 ,-59379.086883 ,-53557.267619 ,-47784.164830 ,-42078.001172 ,-36340.061427 ,-30541.788202 ,-24805.281435 ,-19280.817165 ,-13893.690606 ,-8444.172221 ,-3098.160839 ,2270.908649 ,7594.679295 ,12780.079247 ,17801.722109 ,22543.091206 ,26897.369814 ,31051.285734 ,34933.809557 ,38842.402859 ,42875.230152 ,47024.395356 ,51161.516122 ,55657.298307 ,60958.155424 ,66545.635029 ,72202.930397 ,77934.761905 ,83588.207792 ,89160.874522 ,94606.115027 ,99935.754968 ,104701.404975 ,107581.670606 ,108768.440311 ,107905.700480 ,104062.148863 ,96620.281684 ,83588.443029 ,61415.088182 ,27124.031692 ,-7537.285321 ,-43900.451653 ,-70274.062783 ,-87573.481475 ,-101712.148408 ,-116135.719087 ,-124187.225446 ,-124725.278371 ,-122458.145590 ,-117719.918256 ,-112352.138605 ,-106546.806030 ,-100583.803012 ,-94618.253238 ,-88639.090897 ,-82725.009842 ,-76938.910669 ,-71248.957807 ,-65668.352795 ,-60272.761991 ,-55179.538428 ,-50456.021161 ,-46037.728058 ,-42183.912670 ,-39522.184006 ,-38541.255303 ,-38383.665728 ,-39423.998130 ,-40489.466130 ,-41450.406768 ,-42355.156592 ,-43837.562085 ,-43677.262972 ,-41067.896944 ,-37238.628465 ,-32230.392026 ,-26762.766062 ,-20975.163308 ,-15019.218554 ,-9053.105545 ,-3059.663132 ,2772.399618 ,8242.538397 ,13407.752291 ,18016.047539 ,22292.125752 ,26616.583347 ,30502.564253 ,33153.890890 ,34216.684448 ,33394.220786 ,29657.417791 ,23064.375405 ,12040.831532 ,-2084.921068 ,-21390.235970 ,-38176.615985 ,-51647.714482 ,-59242.564959 ,-60263.150854 ,-58599.245165 ,-54804.972560 ,-50092.112608 ,-44465.812552 ,-38533.096297 ,-32747.104307 ,-27130.082610 ,-21529.632955 ,-15894.611939 ,-10457.566933 ,-5429.042583 ,-903.757828 ,2481.947589 ,5173.789976 ,8358.768202 ,11565.584635 ,14431.147931 ,16951.619820 ,18888.807708 ,20120.884465 ,20222.141242 ,18423.168124 ,16498.668271 ,14442.624242 ,14070.038273 ,16211.370808 ,19639.815904 ,24280.360465 ,29475.380079 ,35030.793540 ,40812.325095 ,46593.082382 ,52390.906885 ,58109.310860 ,63780.896094 ,68984.456561 ,72559.442320 ,74645.487900 ,74695.219755 ,72098.143876 ,66609.929889 ,56864.971296 ,41589.295266 ,19057.032104 ,-5951.329863 ,-34608.796853 ,-56603.801584 ,-72678.838057 ,-83297.070856 ,-90127.593511 ,-92656.040614 ,-91394.995510 ,-88192.056842 ,-83148.833075 ,-77582.587173 ,-71750.440823 ,-65765.369857 ,-59716.101820 ,-53613.430067 ,-47473.832358 ,-41287.031890 ,-35139.919259 ,-29097.671507 ,-23178.836760 ,-17486.807388 ,-12046.775779 ,-6802.483422 ,-1867.556171 ,2644.380534 ,6615.829501 ,10332.557518 ,13706.737038 ,17017.991307 ,20303.136670 ,23507.386461 ,26482.194102 ,29698.585356 ,33196.305757 ,37385.914179 ,42872.996212 ,48725.617879 ,54564.488527 ,60453.841604 ,66495.146265 ,72668.620416 ,78723.644870 ,84593.136677 ,89974.936239 ,93439.798630 ,95101.207834 ,94028.126381 ,89507.925620 ,80989.846001 ,66944.274744 ,47016.422041 ,19932.783790 ,-6198.433172 ,-32320.379400 ,-49822.852084 ,-60517.553414 ,-66860.548269 ,-70849.714105 ,-71058.721556 ,-67691.947812 ,-63130.703822 ,-57687.607311 ,-51916.952488 ,-45932.054982 ,-39834.909941 ,-33714.535713 ,-27564.443333 ,-21465.186188 ,-15469.326408 ,-9522.358787 ,-3588.742161 ,2221.802073 ,7758.244339 ,13020.269708 ,18198.562827 ,23211.338588 ,28051.699645 ,32708.577247 ,37413.795242 ,42181.401920 ,46462.499633 ,49849.582315 ,53026.578940 ,55930.600705 ,59432.642178 ,64027.356857 ,69126.843653 ,74620.328837 ,80372.056070 ,86348.152766 ,92468.907239 ,98568.998246 ,104669.511588 ,110445.790143 ,115394.348973 ,119477.553152 ,121528.574511 ,121973.674087 ,121048.017786 ,118021.473181 ,112151.993711 ,102195.999157 ,85972.731130 ,61224.719621 ,31949.279603 ,-3726.022971 ,-36485.298619 ,-67336.469799 ,-87799.366129 ,-98865.713558 ,-104103.651120 ,-105068.402300 ,-103415.820781 ,-99261.356633 ,-94281.850081 ,-88568.701325 ,-82625.711921 ,-76766.776770 ,-70998.803524 ,-65303.404499 ,-59719.198305 ,-54182.230439 ,-48662.904657 ,-43206.731668 ,-37732.701095 ,-32375.478519 ,-27167.508567 ,-22197.211891 ,-17722.869502 ,-13925.135219 ,-10737.893027 ,-8455.327914 ,-7067.008358 ,-7086.991191 ,-7527.693561 ,-8378.025732 ,-8629.383998 ,-7854.586079 ,-5853.040657 ,-1973.225485 ,2699.850783 ,8006.098287 ,13651.734934 ,19139.318072 ,24476.645420 ,29463.480336 ,33899.078820 ,37364.528796 ,38380.214949 ,37326.585649 ,33428.470616 ,27441.000494 ,21761.126583 ,15368.408081 ,7224.234078 ,-2702.217396 ,-14109.682505 ,-27390.915614 ,-38569.562393 ,-47875.155339 ,-53969.121872 ,-57703.473001 ,-57993.198171 ,-54908.391840 ,-50568.410328 ,-45247.622563 ,-39563.224328 ,-33637.786521 ,-27585.345413 ,-21572.074797 ,-15597.363909 ,-9577.429076 ,-3475.770622 ,2520.378408 ,8046.881775 ,13482.345595 ]
+
+ beta_set = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24, 25, 26]
+
+ #from new_data_set import *
+
+ # declare model name
+ model = ConcreteModel()
+
+ # declare constants
+ bpy = 26 # biweeks per year
+ years = 15 # years of data
+ bigM = 50.0 # big M for disjunction constraints
+
+ # declare sets
+ model.S_meas = RangeSet(1,bpy*years)
+ model.S_meas_small = RangeSet(1,bpy*years-1)
+ model.S_beta = RangeSet(1,bpy)
+
+ # define variable bounds
+ def _gt_zero(m,i):
+ return (0.0,1e7)
+ def _beta_bounds(m):
+ return (None,5.0)
+
+ # define variables
+
+ # log of estimated cases
+ #model.logI = Var(model.S_meas, bounds=_gt_zero)
+ model.logI = Var(model.S_meas, bounds=(0.001,1e7))
+ # log of transmission parameter beta
+ #model.logbeta = Var(model.S_beta, bounds=_gt_zero)
+ model.logbeta = Var(model.S_beta, bounds=(0.0001,5))
+ # binary variable y over all betas
+ #model.y = Var(model.S_beta, within=Binary)
+ # low value of beta
+ #model.logbeta_low = Var(bounds=_beta_bounds)
+ model.logbeta_low = Var(bounds=(0.0001,5))
+ # high value of beta
+ #model.logbeta_high = Var(bounds=_beta_bounds)
+ model.logbeta_high = Var(bounds=(0.0001,5))
+ # dummy variables
+ model.p = Var(model.S_meas, bounds=_gt_zero)
+ model.n = Var(model.S_meas, bounds=_gt_zero)
+
+ # define indexed constants
+
+ # log of measured cases after adjusting for underreporting
+ logIstar = logIstar
+ # changes in susceptible population profile from susceptible reconstruction
+ deltaS = deltaS
+ # mean susceptibles
+ #meanS = 1.04e6
+ meanS = 8.65e5
+ # log of measured population
+ logN = pop
+ # define index for beta over all measurements
+ beta_set = beta_set
+
+ # define objective
+ def _obj_rule(m):
+ expr = sum(m.p[i] + m.n[i] for i in m.S_meas)
+ return expr
+ model.obj = Objective(rule=_obj_rule, sense=minimize)
+
+ # define constraints
+ def _logSIR(m,i):
+ expr = m.logI[i+1] - ( m.logbeta[beta_set[i-1]] + m.logI[i] + math.log(deltaS[i-1] + meanS) - logN[i-1] )
+ return (0.0, expr)
+ model.logSIR = Constraint(model.S_meas_small, rule=_logSIR)
+
+ # objective function constraint
+ def _p_n_const(m,i):
+ expr = logIstar[i-1] - m.logI[i] - m.p[i] + m.n[i]
+ return (0.0, expr)
+ model.p_n_const = Constraint(model.S_meas,rule=_p_n_const)
+
+ # disjuncts
+
+ model.BigM = Suffix()
+ model.y = RangeSet(0,1)
+ def _high_low(disjunct, i, y):
+ model = disjunct.model()
+ if y:
+ disjunct.c = Constraint(expr=model.logbeta_high - model.logbeta[i]== 0.0)
+ else:
+ disjunct.c = Constraint(expr=model.logbeta[i] - model.logbeta_low == 0.0)
+ model.BigM[disjunct.c] = bigM
+ model.high_low = Disjunct(model.S_beta, model.y, rule=_high_low)
+
+ # disjunctions
+ def _disj(model, i):
+ return [model.high_low[i,j] for j in model.y]
+ model.disj = Disjunction(model.S_beta, rule=_disj)
+
+ return model
"""
@@ -137,3 +147,9 @@ def lowbeta_L(m,i):
return (0.0, expr, None)
model.lowbeta_L = Constraint(model.S_beta, rule=lowbeta_L)
"""
+
+if __name__ == "__main__":
+ m = build_model()
+ TransformationFactory('gdp.bigm').apply_to(m)
+ SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;'])
+ m.obj.display()
diff --git a/examples/gdp/jobshop-nodisjuncts.py b/examples/gdp/jobshop-nodisjuncts.py
index 354f6a39e29..ba7db448e2c 100644
--- a/examples/gdp/jobshop-nodisjuncts.py
+++ b/examples/gdp/jobshop-nodisjuncts.py
@@ -29,40 +29,42 @@
# Aldo Vecchietti, LogMIP User's Manual, http://www.logmip.ceride.gov.ar/, 2007
#
-model = AbstractModel()
+def build_model():
+ model = AbstractModel()
-model.JOBS = Set(ordered=True)
-model.STAGES = Set(ordered=True)
-model.I_BEFORE_K = RangeSet(0,1)
+ model.JOBS = Set(ordered=True)
+ model.STAGES = Set(ordered=True)
+ model.I_BEFORE_K = RangeSet(0,1)
-# Task durations
-model.tau = Param(model.JOBS, model.STAGES, default=0)
+ # Task durations
+ model.tau = Param(model.JOBS, model.STAGES, default=0)
-# Total Makespan (this will be the objective)
-model.ms = Var()
-# Start time of each job
-def t_bounds(model, I):
- return (0, sum(value(model.tau[idx]) for idx in model.tau))
-model.t = Var( model.JOBS, within=NonNegativeReals, bounds=t_bounds )
+ # Total Makespan (this will be the objective)
+ model.ms = Var()
+ # Start time of each job
+ def t_bounds(model, I):
+ return (0, sum(value(model.tau[idx]) for idx in model.tau))
+ model.t = Var( model.JOBS, within=NonNegativeReals, bounds=t_bounds )
-# Auto-generate the L set (potential collisions between 2 jobs at any stage.
-def _L_filter(model, I, K, J):
- return I < K and model.tau[I,J] and model.tau[K,J]
-model.L = Set( initialize=model.JOBS * model.JOBS * model.STAGES,
- dimen=3, filter=_L_filter)
+ # Auto-generate the L set (potential collisions between 2 jobs at any stage.
+ def _L_filter(model, I, K, J):
+ return I < K and model.tau[I,J] and model.tau[K,J]
+ model.L = Set( initialize=model.JOBS * model.JOBS * model.STAGES,
+ dimen=3, filter=_L_filter)
-# Makespan is greater than the start time of every job + that job's
-# total duration
-def _feas(model, I):
- return model.ms >= model.t[I] + sum(model.tau[I,M] for M in model.STAGES)
-model.Feas = Constraint(model.JOBS, rule=_feas)
+ # Makespan is greater than the start time of every job + that job's
+ # total duration
+ def _feas(model, I):
+ return model.ms >= model.t[I] + sum(model.tau[I,M] for M in model.STAGES)
+ model.Feas = Constraint(model.JOBS, rule=_feas)
-# Define the disjunctions: either job I occurs before K or K before I
-def _disj(model, I, K, J):
- lhs = model.t[I] + sum([M= model.t[I] + sum(model.tau[I,M] for M in model.STAGES)
-model.Feas = Constraint(model.JOBS, rule=_feas)
+ # Makespan is greater than the start time of every job + that job's
+ # total duration
+ def _feas(model, I):
+ return model.ms >= model.t[I] + sum(model.tau[I,M] for M in model.STAGES)
+ model.Feas = Constraint(model.JOBS, rule=_feas)
-# Disjunctions to prevent clashes at a stage: This creates a set of
-# disjunct pairs: one if job I occurs before job K and the other if job
-# K occurs before job I.
-def _NoClash(disjunct, I, K, J, IthenK):
- model = disjunct.model()
- lhs = model.t[I] + sum([M= model.DemandLB[j,t]
-model.demand_LB = Constraint(model.Products, model.TimePeriods, rule=demand_LB_rule)
-
-
-# FIXED PRICE CONTRACT
-
-# Disjunction for Fixed Price contract buying options
-def FP_contract_disjunct_rule(disjunct, j, t, buy):
- model = disjunct.model()
- if buy:
- disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] <= MAX_AMOUNT_FP)
- else:
- disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] == 0)
-model.FP_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
- model.BuyFPContract, rule=FP_contract_disjunct_rule)
-
-# Fixed price disjunction
-def FP_contract_rule(model, j, t):
- return [model.FP_contract_disjunct[j,t,buy] for buy in model.BuyFPContract]
-model.FP_disjunction = Disjunction(model.RawMaterials, model.TimePeriods,
- rule=FP_contract_rule)
-
-# cost constraint for fixed price contract (independent contraint)
-def FP_contract_cost_rule(model, j, t):
- return model.Cost_FP[j,t] == model.AmountPurchased_FP[j,t] * \
- model.Prices[j,t]
-model.FP_contract_cost = Constraint(model.RawMaterials, model.TimePeriods,
- rule=FP_contract_cost_rule)
-
-
-# DISCOUNT CONTRACT
-
-# Disjunction for Discount contract
-def discount_contract_disjunct_rule(disjunct, j, t, buy):
- model = disjunct.model()
- if buy == 'BelowMin':
- disjunct.belowMin = Constraint(
- expr=model.AmountPurchasedBelowMin_Discount[j,t] <= \
- model.MinAmount_Discount[j,t])
- disjunct.aboveMin = Constraint(
- expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0)
- elif buy == 'AboveMin':
- disjunct.belowMin = Constraint(
- expr=model.AmountPurchasedBelowMin_Discount[j,t] == \
- model.MinAmount_Discount[j,t])
- disjunct.aboveMin = Constraint(
- expr=model.AmountPurchasedAboveMin_Discount[j,t] >= 0)
- elif buy == 'NotSelected':
- disjunct.belowMin = Constraint(
- expr=model.AmountPurchasedBelowMin_Discount[j,t] == 0)
- disjunct.aboveMin = Constraint(
- expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0)
- else:
- raise RuntimeError("Unrecognized choice for discount contract: %s" % buy)
-model.discount_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
- model.BuyDiscountContract, rule=discount_contract_disjunct_rule)
-
-# Discount contract disjunction
-def discount_contract_rule(model, j, t):
- return [model.discount_contract_disjunct[j,t,buy] \
- for buy in model.BuyDiscountContract]
-model.discount_contract = Disjunction(model.RawMaterials, model.TimePeriods,
- rule=discount_contract_rule)
-
-# cost constraint for discount contract (independent constraint)
-def discount_cost_rule(model, j, t):
- return model.Cost_Discount[j,t] == model.RegPrice_Discount[j,t] * \
- model.AmountPurchasedBelowMin_Discount[j,t] + \
- model.DiscountPrice_Discount[j,t] * model.AmountPurchasedAboveMin_Discount[j,t]
-model.discount_cost = Constraint(model.RawMaterials, model.TimePeriods,
- rule=discount_cost_rule)
-
-
-# BULK CONTRACT
-
-# Bulk contract buying options disjunct
-def bulk_contract_disjunct_rule(disjunct, j, t, buy):
- model = disjunct.model()
- if buy == 'BelowMin':
- disjunct.amount = Constraint(
- expr=model.AmountPurchased_Bulk[j,t] <= model.MinAmount_Bulk[j,t])
- disjunct.price = Constraint(
- expr=model.Cost_Bulk[j,t] == model.RegPrice_Bulk[j,t] * \
- model.AmountPurchased_Bulk[j,t])
- elif buy == 'AboveMin':
- disjunct.amount = Constraint(
- expr=model.AmountPurchased_Bulk[j,t] >= model.MinAmount_Bulk[j,t])
- disjunct.price = Constraint(
- expr=model.Cost_Bulk[j,t] == model.DiscountPrice_Bulk[j,t] * \
- model.AmountPurchased_Bulk[j,t])
- elif buy == 'NotSelected':
- disjunct.amount = Constraint(expr=model.AmountPurchased_Bulk[j,t] == 0)
- disjunct.price = Constraint(expr=model.Cost_Bulk[j,t] == 0)
- else:
- raise RuntimeError("Unrecognized choice for bulk contract: %s" % buy)
-model.bulk_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
- model.BuyBulkContract, rule=bulk_contract_disjunct_rule)
-
-# Bulk contract disjunction
-def bulk_contract_rule(model, j, t):
- return [model.bulk_contract_disjunct[j,t,buy] for buy in model.BuyBulkContract]
-model.bulk_contract = Disjunction(model.RawMaterials, model.TimePeriods,
- rule=bulk_contract_rule)
-
-
-# FIXED DURATION CONTRACT
-
-def FD_1mo_contract(disjunct, j, t):
- model = disjunct.model()
- disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
- MIN_AMOUNT_FD_1MONTH)
- disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \
- model.Prices_Length[j,1,t] * model.AmountPurchased_FD[j,t])
-model.FD_1mo_contract = Disjunct(
- model.RawMaterials, model.TimePeriods, rule=FD_1mo_contract)
-
-def FD_2mo_contract(disjunct, j, t):
- model = disjunct.model()
- disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
- model.MinAmount_Length[j,2])
- disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \
- model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j,t])
- # only enforce these if we aren't in the last time period
- if t < model.TimePeriods[-1]:
- disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j, t+1] >= \
- model.MinAmount_Length[j,2])
- disjunct.price2 = Constraint(expr=model.Cost_FD[j,t+1] == \
- model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j, t+1])
-model.FD_2mo_contract = Disjunct(
- model.RawMaterials, model.TimePeriods, rule=FD_2mo_contract)
-
-def FD_3mo_contract(disjunct, j, t):
- model = disjunct.model()
- # NOTE: I think there is a mistake in the GAMS file in line 327.
- # they use the bulk minamount rather than the length one.
- #I am doing the same here for validation purposes.
- disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
- model.MinAmount_Bulk[j,3])
- disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == \
- model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t])
- # check we aren't in one of the last two time periods
- if t < model.TimePeriods[-1]:
- disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] >= \
- model.MinAmount_Length[j,3])
- disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == \
- model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+1])
- if t < model.TimePeriods[-2]:
- disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] >= \
- model.MinAmount_Length[j,3])
- disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == \
- model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+2])
-model.FD_3mo_contract = Disjunct(
- model.RawMaterials, model.TimePeriods, rule=FD_3mo_contract)
-
-def FD_no_contract(disjunct, j, t):
- model = disjunct.model()
- disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] == 0)
- disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == 0)
- if t < model.TimePeriods[-1]:
- disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] == 0)
- disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == 0)
- if t < model.TimePeriods[-2]:
- disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] == 0)
- disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == 0)
-model.FD_no_contract = Disjunct(
- model.RawMaterials, model.TimePeriods, rule=FD_no_contract)
-
-def FD_contract(model, j, t):
- return [ model.FD_1mo_contract[j,t], model.FD_2mo_contract[j,t],
- model.FD_3mo_contract[j,t], model.FD_no_contract[j,t], ]
-model.FD_contract = Disjunction(model.RawMaterials, model.TimePeriods,
- rule=FD_contract)
+from pyomo.environ import *
+from pyomo.gdp import *
+
+# Medium-term Purchasing Contracts problem from http://minlp.org/library/lib.php?lib=GDP
+# This model maximizes profit in a short-term horizon in which various contracts
+# are available for purchasing raw materials. The model decides inventory levels,
+# amounts to purchase, amount sold, and flows through the process nodes while
+# maximizing profit. The four different contracts available are:
+# FIXED PRICE CONTRACT: buy as much as you want at constant price
+# DISCOUNT CONTRACT: quantities below minimum amount cost RegPrice. Any additional quantity
+# above min amount costs DiscoutPrice.
+# BULK CONTRACT: If more than min amount is purchased, whole purchase is at discount price.
+# FIXED DURATION CONTRACT: Depending on length of time contract is valid, there is a purchase
+# price during that time and min quantity that must be purchased
+
+
+# This version of the model is a literal transcription of what is in
+# ShortTermContractCH.gms from the website. Some data is hardcoded into this model,
+# most notably the process structure itself and the mass balance information.
+
+def build_model():
+ model = AbstractModel()
+
+ # Constants (data that was hard-coded in GAMS model)
+ AMOUNT_UB = 1000
+ COST_UB = 1e4
+ MAX_AMOUNT_FP = 1000
+ MIN_AMOUNT_FD_1MONTH = 0
+
+ RandomConst_Line264 = 0.17
+ RandomConst_Line265 = 0.83
+
+ ###################
+ # Sets
+ ###################
+
+ # T
+ # t in GAMS
+ model.TimePeriods = Set(ordered=True)
+
+ # Available length contracts
+ # p in GAMS
+ model.Contracts_Length = Set()
+
+ # JP
+ # final(j) in GAMS
+ # Finished products
+ model.Products = Set()
+
+ # JM
+ # rawmat(J) in GAMS
+ # Set of Raw Materials-- raw materials, intermediate products, and final products partition J
+ model.RawMaterials = Set()
+
+ # C
+ # c in GAMS
+ model.Contracts = Set()
+
+ # I
+ # i in GAMS
+ model.Processes = Set()
+
+ # J
+ # j in GAMS
+ model.Streams = Set()
+
+
+ ##################
+ # Parameters
+ ##################
+
+ # Q_it
+ # excap(i) in GAMS
+ model.Capacity = Param(model.Processes)
+
+ # u_ijt
+ # cov(i) in GAMS
+ model.ProcessConstants = Param(model.Processes)
+
+ # a_jt^U and d_jt^U
+ # spdm(j,t) in GAMS
+ model.SupplyAndDemandUBs = Param(model.Streams, model.TimePeriods, default=0)
+
+ # d_jt^L
+ # lbdm(j, t) in GAMS
+ model.DemandLB = Param(model.Streams, model.TimePeriods, default=0)
+
+ # delta_it
+ # delta(i, t) in GAMS
+ # operating cost of process i at time t
+ model.OperatingCosts = Param(model.Processes, model.TimePeriods)
+
+ # prices of raw materials under FP contract and selling prices of products
+ # pf(j, t) in GAMS
+ # omega_jt and pf_jt
+ model.Prices = Param(model.Streams, model.TimePeriods, default=0)
+
+ # Price for quantities less than min amount under discount contract
+ # pd1(j, t) in GAMS
+ model.RegPrice_Discount = Param(model.Streams, model.TimePeriods)
+ # Discounted price for the quantity purchased exceeding the min amount
+ # pd2(j,t0 in GAMS
+ model.DiscountPrice_Discount = Param(model.Streams, model.TimePeriods)
+
+ # Price for quantities below min amount
+ # pb1(j,t) in GAMS
+ model.RegPrice_Bulk = Param(model.Streams, model.TimePeriods)
+ # Price for quantities aboce min amount
+ # pb2(j, t) in GAMS
+ model.DiscountPrice_Bulk = Param(model.Streams, model.TimePeriods)
+
+ # prices with length contract
+ # pl(j, p, t) in GAMS
+ model.Prices_Length = Param(model.Streams, model.Contracts_Length, model.TimePeriods, default=0)
+
+ # sigmad_jt
+ # sigmad(j, t) in GAMS
+ # Minimum quantity of chemical j that must be bought before recieving a Discount under discount contract
+ model.MinAmount_Discount = Param(model.Streams, model.TimePeriods, default=0)
+
+ # min quantity to recieve discount under bulk contract
+ # sigmab(j, t) in GAMS
+ model.MinAmount_Bulk = Param(model.Streams, model.TimePeriods, default=0)
+
+ # min quantity to recieve discount under length contract
+ # sigmal(j, p) in GAMS
+ model.MinAmount_Length = Param(model.Streams, model.Contracts_Length, default=0)
+
+ # main products of process i
+ # These are 1 (true) if stream j is the main product of process i, false otherwise.
+ # jm(j, i) in GAMS
+ model.MainProducts = Param(model.Streams, model.Processes, default=0)
+
+ # theta_jt
+ # psf(j, t) in GAMS
+ # Shortfall penalty of product j at time t
+ model.ShortfallPenalty = Param(model.Products, model.TimePeriods)
+
+ # shortfall upper bound
+ # sfub(j, t) in GAMS
+ model.ShortfallUB = Param(model.Products, model.TimePeriods, default=0)
+
+ # epsilon_jt
+ # cinv(j, t) in GAMS
+ # inventory cost of material j at time t
+ model.InventoryCost = Param(model.Streams, model.TimePeriods)
+
+ # invub(j, t) in GAMS
+ # inventory upper bound
+ model.InventoryLevelUB = Param(model.Streams, model.TimePeriods, default=0)
+
+ ## UPPER BOUNDS HARDCODED INTO GAMS MODEL
+
+ # All of these upper bounds are hardcoded. So I am just leaving them that way.
+ # This means they all have to be the same as each other right now.
+ def getAmountUBs(model, j, t):
+ return AMOUNT_UB
+
+ def getCostUBs(model, j, t):
+ return COST_UB
+
+ model.AmountPurchasedUB_FP = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+ model.AmountPurchasedUB_Discount = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+ model.AmountPurchasedBelowMinUB_Discount = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+ model.AmountPurchasedAboveMinUB_Discount = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+ model.AmountPurchasedUB_FD = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+ model.AmountPurchasedUB_Bulk = Param(model.Streams, model.TimePeriods,
+ initialize=getAmountUBs)
+
+ model.CostUB_FP = Param(model.Streams, model.TimePeriods, initialize=getCostUBs)
+ model.CostUB_FD = Param(model.Streams, model.TimePeriods, initialize=getCostUBs)
+ model.CostUB_Discount = Param(model.Streams, model.TimePeriods, initialize=getCostUBs)
+ model.CostUB_Bulk = Param(model.Streams, model.TimePeriods, initialize=getCostUBs)
+
+
+ ####################
+ #VARIABLES
+ ####################
+
+ # prof in GAMS
+ # will be objective
+ model.Profit = Var()
+
+ # f(j, t) in GAMS
+ # mass flow rates in tons per time interval t
+ model.FlowRate = Var(model.Streams, model.TimePeriods, within=NonNegativeReals)
+
+ # V_jt
+ # inv(j, t) in GAMS
+ # inventory level of chemical j at time period t
+ def getInventoryBounds(model, i, j):
+ return (0, model.InventoryLevelUB[i,j])
+ model.InventoryLevel = Var(model.Streams, model.TimePeriods,
+ bounds=getInventoryBounds)
+
+ # SF_jt
+ # sf(j, t) in GAMS
+ # Shortfall of demand for chemical j at time period t
+ def getShortfallBounds(model, i, j):
+ return(0, model.ShortfallUB[i,j])
+ model.Shortfall = Var(model.Products, model.TimePeriods,
+ bounds=getShortfallBounds)
+
+
+ # amounts purchased under different contracts
+
+ # spf(j, t) in GAMS
+ # Amount of raw material j bought under fixed price contract at time period t
+ def get_FP_bounds(model, j, t):
+ return (0, model.AmountPurchasedUB_FP[j,t])
+ model.AmountPurchased_FP = Var(model.Streams, model.TimePeriods,
+ bounds=get_FP_bounds)
+
+ # spd(j, t) in GAMS
+ def get_Discount_Total_bounds(model, j, t):
+ return (0, model.AmountPurchasedUB_Discount[j,t])
+ model.AmountPurchasedTotal_Discount = Var(model.Streams, model.TimePeriods,
+ bounds=get_Discount_Total_bounds)
+
+ # Amount purchased below min amount for discount under discount contract
+ # spd1(j, t) in GAMS
+ def get_Discount_BelowMin_bounds(model, j, t):
+ return (0, model.AmountPurchasedBelowMinUB_Discount[j,t])
+ model.AmountPurchasedBelowMin_Discount = Var(model.Streams,
+ model.TimePeriods, bounds=get_Discount_BelowMin_bounds)
+
+ # spd2(j, t) in GAMS
+ # Amount purchased above min amount for discount under discount contract
+ def get_Discount_AboveMin_bounds(model, j, t):
+ return (0, model.AmountPurchasedBelowMinUB_Discount[j,t])
+ model.AmountPurchasedAboveMin_Discount = Var(model.Streams,
+ model.TimePeriods, bounds=get_Discount_AboveMin_bounds)
+
+ # Amount purchased under bulk contract
+ # spb(j, t) in GAMS
+ def get_bulk_bounds(model, j, t):
+ return (0, model.AmountPurchasedUB_Bulk[j,t])
+ model.AmountPurchased_Bulk = Var(model.Streams, model.TimePeriods,
+ bounds=get_bulk_bounds)
+
+ # spl(j, t) in GAMS
+ # Amount purchased under Fixed Duration contract
+ def get_FD_bounds(model, j, t):
+ return (0, model.AmountPurchasedUB_FD[j,t])
+ model.AmountPurchased_FD = Var(model.Streams, model.TimePeriods,
+ bounds=get_FD_bounds)
+
+
+ # costs
+
+ # costpl(j, t) in GAMS
+ # cost of variable length contract
+ def get_CostUBs_FD(model, j, t):
+ return (0, model.CostUB_FD[j,t])
+ model.Cost_FD = Var(model.Streams, model.TimePeriods, bounds=get_CostUBs_FD)
+
+ # costpf(j, t) in GAMS
+ # cost of fixed duration contract
+ def get_CostUBs_FP(model, j, t):
+ return (0, model.CostUB_FP[j,t])
+ model.Cost_FP = Var(model.Streams, model.TimePeriods, bounds=get_CostUBs_FP)
+
+ # costpd(j, t) in GAMS
+ # cost of discount contract
+ def get_CostUBs_Discount(model, j, t):
+ return (0, model.CostUB_Discount[j,t])
+ model.Cost_Discount = Var(model.Streams, model.TimePeriods,
+ bounds=get_CostUBs_Discount)
+
+ # costpb(j, t) in GAMS
+ # cost of bulk contract
+ def get_CostUBs_Bulk(model, j, t):
+ return (0, model.CostUB_Bulk[j,t])
+ model.Cost_Bulk = Var(model.Streams, model.TimePeriods, bounds=get_CostUBs_Bulk)
+
+
+ # binary variables
+
+ model.BuyFPContract = RangeSet(0,1)
+ model.BuyDiscountContract = Set(initialize=('BelowMin', 'AboveMin', 'NotSelected'))
+ model.BuyBulkContract = Set(initialize=('BelowMin', 'AboveMin', 'NotSelected'))
+ model.BuyFDContract = Set(initialize=('1Month', '2Month', '3Month', 'NotSelected'))
+
+
+ ################
+ # CONSTRAINTS
+ ################
+
+ # Objective: maximize profit
+ def profit_rule(model):
+ salesIncome = sum(model.Prices[j,t] * model.FlowRate[j,t]
+ for j in model.Products for t in model.TimePeriods)
+ purchaseCost = sum(model.Cost_FD[j,t]
+ for j in model.RawMaterials for t in model.TimePeriods) + \
+ sum(model.Cost_Discount[j,t]
+ for j in model.RawMaterials for t in model.TimePeriods) + \
+ sum(model.Cost_Bulk[j,t]
+ for j in model.RawMaterials for t in model.TimePeriods) + \
+ sum(model.Cost_FP[j,t]
+ for j in model.RawMaterials for t in model.TimePeriods)
+ productionCost = sum(model.OperatingCosts[i,t] * sum(model.FlowRate[j,t]
+ for j in model.Streams if model.MainProducts[j,i])
+ for i in model.Processes for t in model.TimePeriods)
+ shortfallCost = sum(model.Shortfall[j,t] * model.ShortfallPenalty[j, t]
+ for j in model.Products for t in model.TimePeriods)
+ inventoryCost = sum(model.InventoryCost[j,t] * model.InventoryLevel[j,t]
+ for j in model.Products for t in model.TimePeriods)
+ return salesIncome - purchaseCost - productionCost - inventoryCost - shortfallCost
+ model.profit = Objective(rule=profit_rule, sense=maximize)
+
+ # flow of raw materials is the total amount purchased (accross all contracts)
+ def raw_material_flow_rule(model, j, t):
+ return model.FlowRate[j,t] == model.AmountPurchased_FD[j,t] + \
+ model.AmountPurchased_FP[j,t] + model.AmountPurchased_Bulk[j,t] + \
+ model.AmountPurchasedTotal_Discount[j,t]
+ model.raw_material_flow = Constraint(model.RawMaterials, model.TimePeriods,
+ rule=raw_material_flow_rule)
+
+ def discount_amount_total_rule(model, j, t):
+ return model.AmountPurchasedTotal_Discount[j,t] == \
+ model.AmountPurchasedBelowMin_Discount[j,t] + \
+ model.AmountPurchasedAboveMin_Discount[j,t]
+ model.discount_amount_total_rule = Constraint(model.RawMaterials, model.TimePeriods,
+ rule=discount_amount_total_rule)
+
+ # mass balance equations for each node
+ # these are specific to the process network in this example.
+ def mass_balance_rule1(model, t):
+ return model.FlowRate[1, t] == model.FlowRate[2, t] + model.FlowRate[3, t]
+ model.mass_balance1 = Constraint(model.TimePeriods, rule=mass_balance_rule1)
+
+ def mass_balance_rule2(model, t):
+ return model.FlowRate[5, t] == model.FlowRate[4, t] + model.FlowRate[8,t]
+ model.mass_balance2 = Constraint(model.TimePeriods, rule=mass_balance_rule2)
+
+ def mass_balance_rule3(model, t):
+ return model.FlowRate[6, t] == model.FlowRate[7, t]
+ model.mass_balance3 = Constraint(model.TimePeriods, rule=mass_balance_rule3)
+
+ def mass_balance_rule4(model, t):
+ return model.FlowRate[3, t] == 10*model.FlowRate[5, t]
+ model.mass_balance4 = Constraint(model.TimePeriods, rule=mass_balance_rule4)
+
+ # process input/output constraints
+ # these are also totally specific to the process network
+ def process_balance_rule1(model, t):
+ return model.FlowRate[9, t] == model.ProcessConstants[1] * model.FlowRate[2, t]
+ model.process_balance1 = Constraint(model.TimePeriods, rule=process_balance_rule1)
+
+ def process_balance_rule2(model, t):
+ return model.FlowRate[10, t] == model.ProcessConstants[2] * \
+ (model.FlowRate[5, t] + model.FlowRate[3, t])
+ model.process_balance2 = Constraint(model.TimePeriods, rule=process_balance_rule2)
+
+ def process_balance_rule3(model, t):
+ return model.FlowRate[8, t] == RandomConst_Line264 * \
+ model.ProcessConstants[3] * model.FlowRate[7, t]
+ model.process_balance3 = Constraint(model.TimePeriods, rule=process_balance_rule3)
+
+ def process_balance_rule4(model, t):
+ return model.FlowRate[11, t] == RandomConst_Line265 * \
+ model.ProcessConstants[3] * model.FlowRate[7, t]
+ model.process_balance4 = Constraint(model.TimePeriods, rule=process_balance_rule4)
+
+ # process capacity contraints
+ # these are hardcoded based on the three processes and the process flow structure
+ def process_capacity_rule1(model, t):
+ return model.FlowRate[9, t] <= model.Capacity[1]
+ model.process_capacity1 = Constraint(model.TimePeriods, rule=process_capacity_rule1)
+
+ def process_capacity_rule2(model, t):
+ return model.FlowRate[10, t] <= model.Capacity[2]
+ model.process_capacity2 = Constraint(model.TimePeriods, rule=process_capacity_rule2)
+
+ def process_capacity_rule3(model, t):
+ return model.FlowRate[11, t] + model.FlowRate[8, t] <= model.Capacity[3]
+ model.process_capacity3 = Constraint(model.TimePeriods, rule=process_capacity_rule3)
+
+ # Inventory balance of final products
+ # again, these are hardcoded.
+
+ def inventory_balance1(model, t):
+ prev = 0 if t == min(model.TimePeriods) else model.InventoryLevel[12, t-1]
+ return prev + model.FlowRate[9, t] == model.FlowRate[12, t] + model.InventoryLevel[12,t]
+ model.inventory_balance1 = Constraint(model.TimePeriods, rule=inventory_balance1)
+
+ def inventory_balance_rule2(model, t):
+ if t != 1:
+ return Constraint.Skip
+ return model.FlowRate[10, t] + model.FlowRate[11, t] == \
+ model.InventoryLevel[13,t] + model.FlowRate[13, t]
+ model.inventory_balance2 = Constraint(model.TimePeriods, rule=inventory_balance_rule2)
+
+ def inventory_balance_rule3(model, t):
+ if t <= 1:
+ return Constraint.Skip
+ return model.InventoryLevel[13, t-1] + model.FlowRate[10, t] + \
+ model.FlowRate[11,t] == model.InventoryLevel[13, t] + model.FlowRate[13, t]
+ model.inventory_balance3 = Constraint(model.TimePeriods, rule=inventory_balance_rule3)
+
+ # Max capacities of inventories
+ def inventory_capacity_rule(model, j, t):
+ return model.InventoryLevel[j,t] <= model.InventoryLevelUB[j,t]
+ model.inventory_capacity_rule = Constraint(model.Products, model.TimePeriods, rule=inventory_capacity_rule)
+
+ # Shortfall calculation
+ def shortfall_rule(model, j, t):
+ return model.Shortfall[j, t] == model.SupplyAndDemandUBs[j, t] - model.FlowRate[j,t]
+ model.shortfall = Constraint(model.Products, model.TimePeriods, rule=shortfall_rule)
+
+ # maximum shortfall allowed
+ def shortfall_max_rule(model, j, t):
+ return model.Shortfall[j, t] <= model.ShortfallUB[j, t]
+ model.shortfall_max = Constraint(model.Products, model.TimePeriods, rule=shortfall_max_rule)
+
+ # maxiumum capacities of suppliers
+ def supplier_capacity_rule(model, j, t):
+ return model.FlowRate[j, t] <= model.SupplyAndDemandUBs[j, t]
+ model.supplier_capacity = Constraint(model.RawMaterials, model.TimePeriods, rule=supplier_capacity_rule)
+
+ # demand upper bound
+ def demand_UB_rule(model, j, t):
+ return model.FlowRate[j, t] <= model.SupplyAndDemandUBs[j,t]
+ model.demand_UB = Constraint(model.Products, model.TimePeriods, rule=demand_UB_rule)
+ # demand lower bound
+ def demand_LB_rule(model, j, t):
+ return model.FlowRate[j, t] >= model.DemandLB[j,t]
+ model.demand_LB = Constraint(model.Products, model.TimePeriods, rule=demand_LB_rule)
+
+
+ # FIXED PRICE CONTRACT
+
+ # Disjunction for Fixed Price contract buying options
+ def FP_contract_disjunct_rule(disjunct, j, t, buy):
+ model = disjunct.model()
+ if buy:
+ disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] <= MAX_AMOUNT_FP)
+ else:
+ disjunct.c = Constraint(expr=model.AmountPurchased_FP[j,t] == 0)
+ model.FP_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
+ model.BuyFPContract, rule=FP_contract_disjunct_rule)
+
+ # Fixed price disjunction
+ def FP_contract_rule(model, j, t):
+ return [model.FP_contract_disjunct[j,t,buy] for buy in model.BuyFPContract]
+ model.FP_disjunction = Disjunction(model.RawMaterials, model.TimePeriods,
+ rule=FP_contract_rule)
+
+ # cost constraint for fixed price contract (independent contraint)
+ def FP_contract_cost_rule(model, j, t):
+ return model.Cost_FP[j,t] == model.AmountPurchased_FP[j,t] * \
+ model.Prices[j,t]
+ model.FP_contract_cost = Constraint(model.RawMaterials, model.TimePeriods,
+ rule=FP_contract_cost_rule)
+
+
+ # DISCOUNT CONTRACT
+
+ # Disjunction for Discount contract
+ def discount_contract_disjunct_rule(disjunct, j, t, buy):
+ model = disjunct.model()
+ if buy == 'BelowMin':
+ disjunct.belowMin = Constraint(
+ expr=model.AmountPurchasedBelowMin_Discount[j,t] <= \
+ model.MinAmount_Discount[j,t])
+ disjunct.aboveMin = Constraint(
+ expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0)
+ elif buy == 'AboveMin':
+ disjunct.belowMin = Constraint(
+ expr=model.AmountPurchasedBelowMin_Discount[j,t] == \
+ model.MinAmount_Discount[j,t])
+ disjunct.aboveMin = Constraint(
+ expr=model.AmountPurchasedAboveMin_Discount[j,t] >= 0)
+ elif buy == 'NotSelected':
+ disjunct.belowMin = Constraint(
+ expr=model.AmountPurchasedBelowMin_Discount[j,t] == 0)
+ disjunct.aboveMin = Constraint(
+ expr=model.AmountPurchasedAboveMin_Discount[j,t] == 0)
+ else:
+ raise RuntimeError("Unrecognized choice for discount contract: %s" % buy)
+ model.discount_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
+ model.BuyDiscountContract, rule=discount_contract_disjunct_rule)
+
+ # Discount contract disjunction
+ def discount_contract_rule(model, j, t):
+ return [model.discount_contract_disjunct[j,t,buy] \
+ for buy in model.BuyDiscountContract]
+ model.discount_contract = Disjunction(model.RawMaterials, model.TimePeriods,
+ rule=discount_contract_rule)
+
+ # cost constraint for discount contract (independent constraint)
+ def discount_cost_rule(model, j, t):
+ return model.Cost_Discount[j,t] == model.RegPrice_Discount[j,t] * \
+ model.AmountPurchasedBelowMin_Discount[j,t] + \
+ model.DiscountPrice_Discount[j,t] * model.AmountPurchasedAboveMin_Discount[j,t]
+ model.discount_cost = Constraint(model.RawMaterials, model.TimePeriods,
+ rule=discount_cost_rule)
+
+
+ # BULK CONTRACT
+
+ # Bulk contract buying options disjunct
+ def bulk_contract_disjunct_rule(disjunct, j, t, buy):
+ model = disjunct.model()
+ if buy == 'BelowMin':
+ disjunct.amount = Constraint(
+ expr=model.AmountPurchased_Bulk[j,t] <= model.MinAmount_Bulk[j,t])
+ disjunct.price = Constraint(
+ expr=model.Cost_Bulk[j,t] == model.RegPrice_Bulk[j,t] * \
+ model.AmountPurchased_Bulk[j,t])
+ elif buy == 'AboveMin':
+ disjunct.amount = Constraint(
+ expr=model.AmountPurchased_Bulk[j,t] >= model.MinAmount_Bulk[j,t])
+ disjunct.price = Constraint(
+ expr=model.Cost_Bulk[j,t] == model.DiscountPrice_Bulk[j,t] * \
+ model.AmountPurchased_Bulk[j,t])
+ elif buy == 'NotSelected':
+ disjunct.amount = Constraint(expr=model.AmountPurchased_Bulk[j,t] == 0)
+ disjunct.price = Constraint(expr=model.Cost_Bulk[j,t] == 0)
+ else:
+ raise RuntimeError("Unrecognized choice for bulk contract: %s" % buy)
+ model.bulk_contract_disjunct = Disjunct(model.RawMaterials, model.TimePeriods,
+ model.BuyBulkContract, rule=bulk_contract_disjunct_rule)
+
+ # Bulk contract disjunction
+ def bulk_contract_rule(model, j, t):
+ return [model.bulk_contract_disjunct[j,t,buy] for buy in model.BuyBulkContract]
+ model.bulk_contract = Disjunction(model.RawMaterials, model.TimePeriods,
+ rule=bulk_contract_rule)
+
+
+ # FIXED DURATION CONTRACT
+
+ def FD_1mo_contract(disjunct, j, t):
+ model = disjunct.model()
+ disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
+ MIN_AMOUNT_FD_1MONTH)
+ disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \
+ model.Prices_Length[j,1,t] * model.AmountPurchased_FD[j,t])
+ model.FD_1mo_contract = Disjunct(
+ model.RawMaterials, model.TimePeriods, rule=FD_1mo_contract)
+
+ def FD_2mo_contract(disjunct, j, t):
+ model = disjunct.model()
+ disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
+ model.MinAmount_Length[j,2])
+ disjunct.price1 = Constraint(expr=model.Cost_FD[j,t] == \
+ model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j,t])
+ # only enforce these if we aren't in the last time period
+ if t < model.TimePeriods[-1]:
+ disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j, t+1] >= \
+ model.MinAmount_Length[j,2])
+ disjunct.price2 = Constraint(expr=model.Cost_FD[j,t+1] == \
+ model.Prices_Length[j,2,t] * model.AmountPurchased_FD[j, t+1])
+ model.FD_2mo_contract = Disjunct(
+ model.RawMaterials, model.TimePeriods, rule=FD_2mo_contract)
+
+ def FD_3mo_contract(disjunct, j, t):
+ model = disjunct.model()
+ # NOTE: I think there is a mistake in the GAMS file in line 327.
+ # they use the bulk minamount rather than the length one.
+ #I am doing the same here for validation purposes.
+ disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] >= \
+ model.MinAmount_Bulk[j,3])
+ disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == \
+ model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t])
+ # check we aren't in one of the last two time periods
+ if t < model.TimePeriods[-1]:
+ disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] >= \
+ model.MinAmount_Length[j,3])
+ disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == \
+ model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+1])
+ if t < model.TimePeriods[-2]:
+ disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] >= \
+ model.MinAmount_Length[j,3])
+ disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == \
+ model.Prices_Length[j,3,t] * model.AmountPurchased_FD[j,t+2])
+ model.FD_3mo_contract = Disjunct(
+ model.RawMaterials, model.TimePeriods, rule=FD_3mo_contract)
+
+ def FD_no_contract(disjunct, j, t):
+ model = disjunct.model()
+ disjunct.amount1 = Constraint(expr=model.AmountPurchased_FD[j,t] == 0)
+ disjunct.cost1 = Constraint(expr=model.Cost_FD[j,t] == 0)
+ if t < model.TimePeriods[-1]:
+ disjunct.amount2 = Constraint(expr=model.AmountPurchased_FD[j,t+1] == 0)
+ disjunct.cost2 = Constraint(expr=model.Cost_FD[j,t+1] == 0)
+ if t < model.TimePeriods[-2]:
+ disjunct.amount3 = Constraint(expr=model.AmountPurchased_FD[j,t+2] == 0)
+ disjunct.cost3 = Constraint(expr=model.Cost_FD[j,t+2] == 0)
+ model.FD_no_contract = Disjunct(
+ model.RawMaterials, model.TimePeriods, rule=FD_no_contract)
+
+ def FD_contract(model, j, t):
+ return [ model.FD_1mo_contract[j,t], model.FD_2mo_contract[j,t],
+ model.FD_3mo_contract[j,t], model.FD_no_contract[j,t], ]
+ model.FD_contract = Disjunction(model.RawMaterials, model.TimePeriods,
+ rule=FD_contract)
+
+ return model
+
+
+if __name__ == "__main__":
+ m = build_model().create_instance('medTermPurchasing_Literal_Hull.dat')
+ TransformationFactory('gdp.bigm').apply_to(m)
+ SolverFactory('gams').solve(m, solver='baron', tee=True, add_options=['option optcr=1e-6;'])
+ m.profit.display()
diff --git a/examples/gdp/medTermPurchasing_Literal_Chull.dat b/examples/gdp/medTermPurchasing_Literal_Chull.dat
index 68605cbe4a8..712bdcb77ac 100755
--- a/examples/gdp/medTermPurchasing_Literal_Chull.dat
+++ b/examples/gdp/medTermPurchasing_Literal_Chull.dat
@@ -1,535 +1,531 @@
-set TimePeriods := 1 2 3 4 5 6 ;
-set Processes := 1 2 3 ;
-set Streams := 1 2 3 4 5 6 7 8 9 10 11 12 13 ;
-set Products := 12 13 ;
-set RawMaterials := 1 4 6 ;
-set Contracts_Length := 1 2 3 4 ;
-set Contracts := 1 2 3 4 ;
-
-param Capacity :=
- 1 27
- 2 30
- 3 25
-;
-
-param ProcessConstants :=
- 1 0.9
- 2 0.85
- 3 0.8
-;
-
-param SupplyAndDemandUBs :=
- 1 1 100
- 1 2 100
- 1 3 100
- 1 4 100
- 1 5 100
- 1 6 100
- 4 1 30
- 4 2 30
- 4 3 30
- 4 4 30
- 4 5 30
- 4 6 30
- 6 1 100
- 6 2 100
- 6 3 100
- 6 4 100
- 6 5 100
- 6 6 100
- 12 1 20
- 12 2 25
- 12 3 22
- 12 4 30
- 12 5 28
- 12 6 26
- 13 1 51
- 13 2 50
- 13 3 53
- 13 4 60
- 13 5 59
- 13 6 50
-;
-
-
-
-#
-# JDS: Note that you can specify data as a table (similar to GAMS...
-#
-#param DemandLB :=
-# 12 1 5
-# 12 2 5
-# 12 3 5
-# 12 4 5
-# 12 5 5
-# 12 6 5
-# 13 1 5
-# 13 2 5
-# 13 3 5
-# 13 4 5
-# 13 5 5
-# 13 6 5
-#;
-
-param DemandLB:
- 1 2 3 4 5 6 :=
- 12 5 5 5 5 5 5
- 13 5 5 5 5 5 5 ;
-
-# TODO you are apparently here!!
-param OperatingCosts :=
- 1 2 3 4 5 6 :=
-
-
-param OperatingCosts :=
- 1 1 0.6
- 1 2 0.7
- 1 3 0.6
- 1 4 0.6
- 1 5 0.7
- 1 6 0.7
- 2 1 0.5
- 2 2 0.5
- 2 3 0.5
- 2 4 0.4
- 2 5 0.4
- 2 6 0.5
- 3 1 0.6
- 3 2 0.6
- 3 3 0.5
- 3 4 0.6
- 3 5 0.6
- 3 6 0.5
-;
-
-param Prices :=
- 1 1 2.2
- 1 2 2.4
- 1 3 2.4
- 1 4 2.3
- 1 5 2.2
- 1 6 2.2
- 4 1 1.9
- 4 2 2.4
- 4 3 2.4
- 4 4 2.2
- 4 5 2.1
- 4 6 2.1
- 6 1 5.2
- 6 2 5.7
- 6 3 5.5
- 6 4 5.4
- 6 5 5.7
- 6 6 5.7
- 12 1 22.1
- 12 2 23.9
- 12 3 24.4
- 12 4 22.7
- 12 5 27.9
- 12 6 23.6
- 13 1 20.5
- 13 2 21.5
- 13 3 24.5
- 13 4 21.2
- 13 5 22.8
- 13 6 24.9
-;
-
-param RegPrice_Discount :=
- 1 1 2.25
- 1 2 2.25
- 1 3 2.25
- 1 4 2.25
- 1 5 2.25
- 1 6 2.25
- 4 1 2.35
- 4 2 2.35
- 4 3 2.35
- 4 4 2.35
- 4 5 2.35
- 4 6 2.35
- 6 1 5.5
- 6 2 5.5
- 6 3 5.5
- 6 4 5.5
- 6 5 5.5
- 6 6 5.5
-;
-
-param DiscountPrice_Discount :=
- 1 1 2.15
- 1 2 2.15
- 1 3 2.15
- 1 4 2.15
- 1 5 2.15
- 1 6 2.15
- 4 1 2.1
- 4 2 2.1
- 4 3 2.1
- 4 4 2.1
- 4 5 2.1
- 4 6 2.1
- 6 1 5.3
- 6 2 5.3
- 6 3 5.3
- 6 4 5.3
- 6 5 5.3
- 6 6 5.3
-;
-
-param RegPrice_Bulk :=
- 1 1 2.3
- 1 2 2.3
- 1 3 2.3
- 1 4 2.3
- 1 5 2.3
- 1 6 2.3
- 4 1 2.35
- 4 2 2.35
- 4 3 2.35
- 4 4 2.35
- 4 5 2.35
- 4 6 2.35
- 6 1 5.5
- 6 2 5.5
- 6 3 5.5
- 6 4 5.5
- 6 5 5.5
- 6 6 5.5
-;
-
-param DiscountPrice_Bulk :=
- 1 1 2.1
- 1 2 2.1
- 1 3 2.1
- 1 4 2.1
- 1 5 2.1
- 1 6 2.1
- 4 1 2.0
- 4 2 2.0
- 4 3 2.0
- 4 4 2.0
- 4 5 2.0
- 4 6 2.0
- 6 1 5.25
- 6 2 5.25
- 6 3 5.25
- 6 4 5.25
- 6 5 5.25
- 6 6 5.25
-;
-
-param Prices_Length :=
- 1 1 1 2.25
- 1 1 2 2.25
- 1 1 3 2.25
- 1 1 4 2.25
- 1 1 5 2.25
- 1 1 6 2.25
- 4 1 1 2.35
- 4 1 2 2.35
- 4 1 3 2.35
- 4 1 4 2.35
- 4 1 5 2.35
- 4 1 6 2.35
- 6 1 1 5.5
- 6 1 2 5.5
- 6 1 3 5.5
- 6 1 4 5.5
- 6 1 5 5.5
- 6 1 6 5.5
- 1 2 1 2.2
- 1 2 2 2.2
- 1 2 3 2.2
- 1 2 4 2.2
- 1 2 5 2.2
- 1 2 6 2.2
- 4 2 1 2.25
- 4 2 2 2.25
- 4 2 3 2.25
- 4 2 4 2.25
- 4 2 5 2.25
- 4 2 6 2.25
- 6 2 1 5.4
- 6 2 2 5.4
- 6 2 3 5.4
- 6 2 4 5.4
- 6 2 5 5.4
- 6 2 6 5.4
- 1 3 1 2.15
- 1 3 2 2.15
- 1 3 3 2.15
- 1 3 4 2.15
- 1 3 5 2.15
- 1 3 6 2.15
- 4 3 1 2.15
- 4 3 2 2.15
- 4 3 3 2.15
- 4 3 4 2.15
- 4 3 5 2.15
- 4 3 6 2.15
- 6 3 1 5.3
- 6 3 2 5.3
- 6 3 3 5.3
- 6 3 4 5.3
- 6 3 5 5.3
- 6 3 6 5.3
-;
-
-param MinAmount_Discount :=
- 1 1 63
- 1 2 63
- 1 3 63
- 1 4 63
- 1 5 63
- 1 6 63
- 4 1 4
- 4 2 4
- 4 3 4
- 4 4 4
- 4 5 4
- 4 6 4
- 6 1 22
- 6 2 22
- 6 3 22
- 6 4 22
- 6 5 22
- 6 6 22
-;
-
-param MinAmount_Bulk :=
- 1 1 64
- 1 2 64
- 1 3 64
- 1 4 64
- 1 5 64
- 1 6 64
- 4 1 5
- 4 2 5
- 4 3 5
- 4 4 5
- 4 5 5
- 4 6 5
- 6 1 24
- 6 2 24
- 6 3 24
- 6 4 24
- 6 5 24
- 6 6 24
-;
-
-param MinAmount_Length :=
- 1 1 0
- 1 2 62
- 1 3 66
- 4 1 0
- 4 2 3
- 4 3 4
- 6 1 0
- 6 2 11
- 6 3 24
-;
-
-param MainProducts :=
- 9 1 1
- 9 2 0
- 9 3 0
- 10 1 0
- 10 2 1
- 10 3 0
- 11 1 0
- 11 2 0
- 11 3 1
-;
-
-param ShortfallPenalty :=
- 12 1 25
- 12 2 25
- 12 3 25
- 12 4 25
- 12 5 25
- 12 6 25
- 13 1 25
- 13 2 25
- 13 3 25
- 13 4 25
- 13 5 25
- 13 6 25
-;
-
-param ShortfallUB :=
- 12 1 10
- 12 2 10
- 12 3 10
- 12 4 10
- 12 5 10
- 12 6 10
- 13 1 10
- 13 2 10
- 13 3 10
- 13 4 10
- 13 5 10
- 13 6 10
-;
-
-param InventoryCost :=
- 1 1 0
- 1 2 0
- 1 3 0
- 1 4 0
- 1 5 0
- 1 6 0
- 2 1 0
- 2 2 0
- 2 3 0
- 2 4 0
- 2 5 0
- 2 6 0
- 3 1 0
- 3 2 0
- 3 3 0
- 3 4 0
- 3 5 0
- 3 6 0
- 4 1 0
- 4 2 0
- 4 3 0
- 4 4 0
- 4 5 0
- 4 6 0
- 5 1 0
- 5 2 0
- 5 3 0
- 5 4 0
- 5 5 0
- 5 6 0
- 6 1 0
- 6 2 0
- 6 3 0
- 6 4 0
- 6 5 0
- 6 6 0
- 7 1 0
- 7 2 0
- 7 3 0
- 7 4 0
- 7 5 0
- 7 6 0
- 8 1 0
- 8 2 0
- 8 3 0
- 8 4 0
- 8 5 0
- 8 6 0
- 9 1 0
- 9 2 0
- 9 3 0
- 9 4 0
- 9 5 0
- 9 6 0
- 10 1 0
- 10 2 0
- 10 3 0
- 10 4 0
- 10 5 0
- 10 6 0
- 11 1 0
- 11 2 0
- 11 3 0
- 11 4 0
- 11 5 0
- 11 6 0
- 12 1 1
- 12 2 1
- 12 3 1
- 12 4 1
- 12 5 1
- 12 6 1
- 13 1 1
- 13 2 1
- 13 3 1
- 13 4 1
- 13 5 1
- 13 6 1
-;
-
-param InventoryLevelUB :=
- 1 1 0
- 1 2 0
- 1 3 0
- 1 4 0
- 1 5 0
- 1 6 0
- 2 1 0
- 2 2 0
- 2 3 0
- 2 4 0
- 2 5 0
- 2 6 0
- 3 1 0
- 3 2 0
- 3 3 0
- 3 4 0
- 3 5 0
- 3 6 0
- 4 1 0
- 4 2 0
- 4 3 0
- 4 4 0
- 4 5 0
- 4 6 0
- 5 1 0
- 5 2 0
- 5 3 0
- 5 4 0
- 5 5 0
- 5 6 0
- 6 1 0
- 6 2 0
- 6 3 0
- 6 4 0
- 6 5 0
- 6 6 0
- 7 1 0
- 7 2 0
- 7 3 0
- 7 4 0
- 7 5 0
- 7 6 0
- 8 1 0
- 8 2 0
- 8 3 0
- 8 4 0
- 8 5 0
- 8 6 0
- 9 1 0
- 9 2 0
- 9 3 0
- 9 4 0
- 9 5 0
- 9 6 0
- 10 1 0
- 10 2 0
- 10 3 0
- 10 4 0
- 10 5 0
- 10 6 0
- 11 1 0
- 11 2 0
- 11 3 0
- 11 4 0
- 11 5 0
- 11 6 0
- 12 1 30
- 12 2 30
- 12 3 30
- 12 4 30
- 12 5 30
- 12 6 30
- 13 1 30
- 13 2 30
- 13 3 30
- 13 4 30
- 13 5 30
- 13 6 30
-;
+set TimePeriods := 1 2 3 4 5 6 ;
+set Processes := 1 2 3 ;
+set Streams := 1 2 3 4 5 6 7 8 9 10 11 12 13 ;
+set Products := 12 13 ;
+set RawMaterials := 1 4 6 ;
+set Contracts_Length := 1 2 3 4 ;
+set Contracts := 1 2 3 4 ;
+
+param Capacity :=
+ 1 27
+ 2 30
+ 3 25
+;
+
+param ProcessConstants :=
+ 1 0.9
+ 2 0.85
+ 3 0.8
+;
+
+param SupplyAndDemandUBs :=
+ 1 1 100
+ 1 2 100
+ 1 3 100
+ 1 4 100
+ 1 5 100
+ 1 6 100
+ 4 1 30
+ 4 2 30
+ 4 3 30
+ 4 4 30
+ 4 5 30
+ 4 6 30
+ 6 1 100
+ 6 2 100
+ 6 3 100
+ 6 4 100
+ 6 5 100
+ 6 6 100
+ 12 1 20
+ 12 2 25
+ 12 3 22
+ 12 4 30
+ 12 5 28
+ 12 6 26
+ 13 1 51
+ 13 2 50
+ 13 3 53
+ 13 4 60
+ 13 5 59
+ 13 6 50
+;
+
+
+
+#
+# JDS: Note that you can specify data as a table (similar to GAMS...
+#
+#param DemandLB :=
+# 12 1 5
+# 12 2 5
+# 12 3 5
+# 12 4 5
+# 12 5 5
+# 12 6 5
+# 13 1 5
+# 13 2 5
+# 13 3 5
+# 13 4 5
+# 13 5 5
+# 13 6 5
+#;
+
+param DemandLB:
+ 1 2 3 4 5 6 :=
+ 12 5 5 5 5 5 5
+ 13 5 5 5 5 5 5 ;
+
+
+param OperatingCosts :=
+ 1 1 0.6
+ 1 2 0.7
+ 1 3 0.6
+ 1 4 0.6
+ 1 5 0.7
+ 1 6 0.7
+ 2 1 0.5
+ 2 2 0.5
+ 2 3 0.5
+ 2 4 0.4
+ 2 5 0.4
+ 2 6 0.5
+ 3 1 0.6
+ 3 2 0.6
+ 3 3 0.5
+ 3 4 0.6
+ 3 5 0.6
+ 3 6 0.5
+;
+
+param Prices :=
+ 1 1 2.2
+ 1 2 2.4
+ 1 3 2.4
+ 1 4 2.3
+ 1 5 2.2
+ 1 6 2.2
+ 4 1 1.9
+ 4 2 2.4
+ 4 3 2.4
+ 4 4 2.2
+ 4 5 2.1
+ 4 6 2.1
+ 6 1 5.2
+ 6 2 5.7
+ 6 3 5.5
+ 6 4 5.4
+ 6 5 5.7
+ 6 6 5.7
+ 12 1 22.1
+ 12 2 23.9
+ 12 3 24.4
+ 12 4 22.7
+ 12 5 27.9
+ 12 6 23.6
+ 13 1 20.5
+ 13 2 21.5
+ 13 3 24.5
+ 13 4 21.2
+ 13 5 22.8
+ 13 6 24.9
+;
+
+param RegPrice_Discount :=
+ 1 1 2.25
+ 1 2 2.25
+ 1 3 2.25
+ 1 4 2.25
+ 1 5 2.25
+ 1 6 2.25
+ 4 1 2.35
+ 4 2 2.35
+ 4 3 2.35
+ 4 4 2.35
+ 4 5 2.35
+ 4 6 2.35
+ 6 1 5.5
+ 6 2 5.5
+ 6 3 5.5
+ 6 4 5.5
+ 6 5 5.5
+ 6 6 5.5
+;
+
+param DiscountPrice_Discount :=
+ 1 1 2.15
+ 1 2 2.15
+ 1 3 2.15
+ 1 4 2.15
+ 1 5 2.15
+ 1 6 2.15
+ 4 1 2.1
+ 4 2 2.1
+ 4 3 2.1
+ 4 4 2.1
+ 4 5 2.1
+ 4 6 2.1
+ 6 1 5.3
+ 6 2 5.3
+ 6 3 5.3
+ 6 4 5.3
+ 6 5 5.3
+ 6 6 5.3
+;
+
+param RegPrice_Bulk :=
+ 1 1 2.3
+ 1 2 2.3
+ 1 3 2.3
+ 1 4 2.3
+ 1 5 2.3
+ 1 6 2.3
+ 4 1 2.35
+ 4 2 2.35
+ 4 3 2.35
+ 4 4 2.35
+ 4 5 2.35
+ 4 6 2.35
+ 6 1 5.5
+ 6 2 5.5
+ 6 3 5.5
+ 6 4 5.5
+ 6 5 5.5
+ 6 6 5.5
+;
+
+param DiscountPrice_Bulk :=
+ 1 1 2.1
+ 1 2 2.1
+ 1 3 2.1
+ 1 4 2.1
+ 1 5 2.1
+ 1 6 2.1
+ 4 1 2.0
+ 4 2 2.0
+ 4 3 2.0
+ 4 4 2.0
+ 4 5 2.0
+ 4 6 2.0
+ 6 1 5.25
+ 6 2 5.25
+ 6 3 5.25
+ 6 4 5.25
+ 6 5 5.25
+ 6 6 5.25
+;
+
+param Prices_Length :=
+ 1 1 1 2.25
+ 1 1 2 2.25
+ 1 1 3 2.25
+ 1 1 4 2.25
+ 1 1 5 2.25
+ 1 1 6 2.25
+ 4 1 1 2.35
+ 4 1 2 2.35
+ 4 1 3 2.35
+ 4 1 4 2.35
+ 4 1 5 2.35
+ 4 1 6 2.35
+ 6 1 1 5.5
+ 6 1 2 5.5
+ 6 1 3 5.5
+ 6 1 4 5.5
+ 6 1 5 5.5
+ 6 1 6 5.5
+ 1 2 1 2.2
+ 1 2 2 2.2
+ 1 2 3 2.2
+ 1 2 4 2.2
+ 1 2 5 2.2
+ 1 2 6 2.2
+ 4 2 1 2.25
+ 4 2 2 2.25
+ 4 2 3 2.25
+ 4 2 4 2.25
+ 4 2 5 2.25
+ 4 2 6 2.25
+ 6 2 1 5.4
+ 6 2 2 5.4
+ 6 2 3 5.4
+ 6 2 4 5.4
+ 6 2 5 5.4
+ 6 2 6 5.4
+ 1 3 1 2.15
+ 1 3 2 2.15
+ 1 3 3 2.15
+ 1 3 4 2.15
+ 1 3 5 2.15
+ 1 3 6 2.15
+ 4 3 1 2.15
+ 4 3 2 2.15
+ 4 3 3 2.15
+ 4 3 4 2.15
+ 4 3 5 2.15
+ 4 3 6 2.15
+ 6 3 1 5.3
+ 6 3 2 5.3
+ 6 3 3 5.3
+ 6 3 4 5.3
+ 6 3 5 5.3
+ 6 3 6 5.3
+;
+
+param MinAmount_Discount :=
+ 1 1 63
+ 1 2 63
+ 1 3 63
+ 1 4 63
+ 1 5 63
+ 1 6 63
+ 4 1 4
+ 4 2 4
+ 4 3 4
+ 4 4 4
+ 4 5 4
+ 4 6 4
+ 6 1 22
+ 6 2 22
+ 6 3 22
+ 6 4 22
+ 6 5 22
+ 6 6 22
+;
+
+param MinAmount_Bulk :=
+ 1 1 64
+ 1 2 64
+ 1 3 64
+ 1 4 64
+ 1 5 64
+ 1 6 64
+ 4 1 5
+ 4 2 5
+ 4 3 5
+ 4 4 5
+ 4 5 5
+ 4 6 5
+ 6 1 24
+ 6 2 24
+ 6 3 24
+ 6 4 24
+ 6 5 24
+ 6 6 24
+;
+
+param MinAmount_Length :=
+ 1 1 0
+ 1 2 62
+ 1 3 66
+ 4 1 0
+ 4 2 3
+ 4 3 4
+ 6 1 0
+ 6 2 11
+ 6 3 24
+;
+
+param MainProducts :=
+ 9 1 1
+ 9 2 0
+ 9 3 0
+ 10 1 0
+ 10 2 1
+ 10 3 0
+ 11 1 0
+ 11 2 0
+ 11 3 1
+;
+
+param ShortfallPenalty :=
+ 12 1 25
+ 12 2 25
+ 12 3 25
+ 12 4 25
+ 12 5 25
+ 12 6 25
+ 13 1 25
+ 13 2 25
+ 13 3 25
+ 13 4 25
+ 13 5 25
+ 13 6 25
+;
+
+param ShortfallUB :=
+ 12 1 10
+ 12 2 10
+ 12 3 10
+ 12 4 10
+ 12 5 10
+ 12 6 10
+ 13 1 10
+ 13 2 10
+ 13 3 10
+ 13 4 10
+ 13 5 10
+ 13 6 10
+;
+
+param InventoryCost :=
+ 1 1 0
+ 1 2 0
+ 1 3 0
+ 1 4 0
+ 1 5 0
+ 1 6 0
+ 2 1 0
+ 2 2 0
+ 2 3 0
+ 2 4 0
+ 2 5 0
+ 2 6 0
+ 3 1 0
+ 3 2 0
+ 3 3 0
+ 3 4 0
+ 3 5 0
+ 3 6 0
+ 4 1 0
+ 4 2 0
+ 4 3 0
+ 4 4 0
+ 4 5 0
+ 4 6 0
+ 5 1 0
+ 5 2 0
+ 5 3 0
+ 5 4 0
+ 5 5 0
+ 5 6 0
+ 6 1 0
+ 6 2 0
+ 6 3 0
+ 6 4 0
+ 6 5 0
+ 6 6 0
+ 7 1 0
+ 7 2 0
+ 7 3 0
+ 7 4 0
+ 7 5 0
+ 7 6 0
+ 8 1 0
+ 8 2 0
+ 8 3 0
+ 8 4 0
+ 8 5 0
+ 8 6 0
+ 9 1 0
+ 9 2 0
+ 9 3 0
+ 9 4 0
+ 9 5 0
+ 9 6 0
+ 10 1 0
+ 10 2 0
+ 10 3 0
+ 10 4 0
+ 10 5 0
+ 10 6 0
+ 11 1 0
+ 11 2 0
+ 11 3 0
+ 11 4 0
+ 11 5 0
+ 11 6 0
+ 12 1 1
+ 12 2 1
+ 12 3 1
+ 12 4 1
+ 12 5 1
+ 12 6 1
+ 13 1 1
+ 13 2 1
+ 13 3 1
+ 13 4 1
+ 13 5 1
+ 13 6 1
+;
+
+param InventoryLevelUB :=
+ 1 1 0
+ 1 2 0
+ 1 3 0
+ 1 4 0
+ 1 5 0
+ 1 6 0
+ 2 1 0
+ 2 2 0
+ 2 3 0
+ 2 4 0
+ 2 5 0
+ 2 6 0
+ 3 1 0
+ 3 2 0
+ 3 3 0
+ 3 4 0
+ 3 5 0
+ 3 6 0
+ 4 1 0
+ 4 2 0
+ 4 3 0
+ 4 4 0
+ 4 5 0
+ 4 6 0
+ 5 1 0
+ 5 2 0
+ 5 3 0
+ 5 4 0
+ 5 5 0
+ 5 6 0
+ 6 1 0
+ 6 2 0
+ 6 3 0
+ 6 4 0
+ 6 5 0
+ 6 6 0
+ 7 1 0
+ 7 2 0
+ 7 3 0
+ 7 4 0
+ 7 5 0
+ 7 6 0
+ 8 1 0
+ 8 2 0
+ 8 3 0
+ 8 4 0
+ 8 5 0
+ 8 6 0
+ 9 1 0
+ 9 2 0
+ 9 3 0
+ 9 4 0
+ 9 5 0
+ 9 6 0
+ 10 1 0
+ 10 2 0
+ 10 3 0
+ 10 4 0
+ 10 5 0
+ 10 6 0
+ 11 1 0
+ 11 2 0
+ 11 3 0
+ 11 4 0
+ 11 5 0
+ 11 6 0
+ 12 1 30
+ 12 2 30
+ 12 3 30
+ 12 4 30
+ 12 5 30
+ 12 6 30
+ 13 1 30
+ 13 2 30
+ 13 3 30
+ 13 4 30
+ 13 5 30
+ 13 6 30
+;
diff --git a/examples/gdp/simple1.py b/examples/gdp/simple1.py
index 0536a86212c..2073a5bc4f3 100644
--- a/examples/gdp/simple1.py
+++ b/examples/gdp/simple1.py
@@ -7,28 +7,31 @@
from pyomo.core import *
from pyomo.gdp import *
-model = ConcreteModel()
-
-# x >= 0 _|_ y>=0
-model.x = Var(bounds=(0,None))
-model.y = Var(bounds=(0,None))
-
-# Two conditions
-def _d(disjunct, flag):
- model = disjunct.model()
- if flag:
- # x == 0
- disjunct.c = Constraint(expr=model.x == 0)
- else:
- # y == 0
- disjunct.c = Constraint(expr=model.y == 0)
-model.d = Disjunct([0,1], rule=_d)
-
-# Define the disjunction
-def _c(model):
- return [model.d[0], model.d[1]]
-model.c = Disjunction(rule=_c)
-
-model.C = Constraint(expr=model.x+model.y <= 1)
-
-model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
+def build_model():
+
+ model = ConcreteModel()
+
+ # x >= 0 _|_ y>=0
+ model.x = Var(bounds=(0,None))
+ model.y = Var(bounds=(0,None))
+
+ # Two conditions
+ def _d(disjunct, flag):
+ model = disjunct.model()
+ if flag:
+ # x == 0
+ disjunct.c = Constraint(expr=model.x == 0)
+ else:
+ # y == 0
+ disjunct.c = Constraint(expr=model.y == 0)
+ model.d = Disjunct([0,1], rule=_d)
+
+ # Define the disjunction
+ def _c(model):
+ return [model.d[0], model.d[1]]
+ model.c = Disjunction(rule=_c)
+
+ model.C = Constraint(expr=model.x+model.y <= 1)
+
+ model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
+ return model
diff --git a/examples/gdp/simple2.py b/examples/gdp/simple2.py
index 7bbcfd96b22..fbb3ffa190c 100644
--- a/examples/gdp/simple2.py
+++ b/examples/gdp/simple2.py
@@ -6,28 +6,30 @@
from pyomo.core import *
from pyomo.gdp import *
-model = ConcreteModel()
+def build_model():
+ model = ConcreteModel()
-# x >= 0 _|_ y>=0
-model.x = Var(bounds=(0,100))
-model.y = Var(bounds=(0,100))
+ # x >= 0 _|_ y>=0
+ model.x = Var(bounds=(0,100))
+ model.y = Var(bounds=(0,100))
-# Two conditions
-def _d(disjunct, flag):
- model = disjunct.model()
- if flag:
- # x == 0
- disjunct.c = Constraint(expr=model.x == 0)
- else:
- # y == 0
- disjunct.c = Constraint(expr=model.y == 0)
-model.d = Disjunct([0,1], rule=_d)
+ # Two conditions
+ def _d(disjunct, flag):
+ model = disjunct.model()
+ if flag:
+ # x == 0
+ disjunct.c = Constraint(expr=model.x == 0)
+ else:
+ # y == 0
+ disjunct.c = Constraint(expr=model.y == 0)
+ model.d = Disjunct([0,1], rule=_d)
-# Define the disjunction
-def _c(model):
- return [model.d[0], model.d[1]]
-model.c = Disjunction(rule=_c)
+ # Define the disjunction
+ def _c(model):
+ return [model.d[0], model.d[1]]
+ model.c = Disjunction(rule=_c)
-model.C = Constraint(expr=model.x+model.y <= 1)
+ model.C = Constraint(expr=model.x+model.y <= 1)
-model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
+ model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
+ return model
\ No newline at end of file
diff --git a/examples/gdp/simple3.py b/examples/gdp/simple3.py
index 4c90d646e71..73dc27be6a2 100644
--- a/examples/gdp/simple3.py
+++ b/examples/gdp/simple3.py
@@ -6,31 +6,33 @@
from pyomo.core import *
from pyomo.gdp import *
-model = ConcreteModel()
-
-# x >= 0 _|_ y>=0
-model.x = Var(bounds=(0,None))
-model.y = Var(bounds=(0,None))
-
-# Two conditions
-def _d(disjunct, flag):
- model = disjunct.model()
- if flag:
- # x == 0
- disjunct.c = Constraint(expr=model.x == 0)
- else:
- # y == 0
- disjunct.c = Constraint(expr=model.y == 0)
- disjunct.BigM = Suffix()
- disjunct.BigM[disjunct.c] = 1
-model.d = Disjunct([0,1], rule=_d)
-
-# Define the disjunction
-def _c(model):
- return [model.d[0], model.d[1]]
-model.c = Disjunction(rule=_c)
-
-model.C = Constraint(expr=model.x+model.y <= 1)
-
-model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
-
+def build_model():
+ model = ConcreteModel()
+
+ # x >= 0 _|_ y>=0
+ model.x = Var(bounds=(0,None))
+ model.y = Var(bounds=(0,None))
+
+ # Two conditions
+ def _d(disjunct, flag):
+ model = disjunct.model()
+ if flag:
+ # x == 0
+ disjunct.c = Constraint(expr=model.x == 0)
+ else:
+ # y == 0
+ disjunct.c = Constraint(expr=model.y == 0)
+ disjunct.BigM = Suffix()
+ disjunct.BigM[disjunct.c] = 1
+ model.d = Disjunct([0,1], rule=_d)
+
+ # Define the disjunction
+ def _c(model):
+ return [model.d[0], model.d[1]]
+ model.c = Disjunction(rule=_c)
+
+ model.C = Constraint(expr=model.x+model.y <= 1)
+
+ model.o = Objective(expr=2*model.x+3*model.y, sense=maximize)
+
+ return model
diff --git a/examples/gdp/small_lit/basic_step.py b/examples/gdp/small_lit/basic_step.py
index fd62921e06b..89cf0ffc0b0 100644
--- a/examples/gdp/small_lit/basic_step.py
+++ b/examples/gdp/small_lit/basic_step.py
@@ -39,14 +39,14 @@ def disjunctions(model,i):
def solve_base_model():
m_base = build_gdp_model()
- m_chull = TransformationFactory('gdp.chull').create_using(m_base)
+ m_hull = TransformationFactory('gdp.hull').create_using(m_base)
#m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100)
solver = SolverFactory('gams')
- solver.solve(m_chull, solver='baron')
- #m_chull.pprint()
- m_chull.objective.display()
- m_chull.x1.display()
- m_chull.x2.display()
+ solver.solve(m_hull, solver='baron')
+ #m_hull.pprint()
+ m_hull.objective.display()
+ m_hull.x1.display()
+ m_hull.x2.display()
def solve_basic_step_model():
@@ -57,7 +57,7 @@ def solve_basic_step_model():
#with open('pprint.log','w') as outputfile:
# m_base.disjunctions.pprint(outputfile)
- #m_bs_chull = TransformationFactory('gdp.chull').create_using(m_base)
+ #m_bs_hull = TransformationFactory('gdp.hull').create_using(m_base)
m_bigm = TransformationFactory('gdp.bigm').create_using(m_base, bigM=100)
m_bigm.pprint()
diff --git a/examples/gdp/small_lit/nonconvex_HEN.py b/examples/gdp/small_lit/nonconvex_HEN.py
index 1dd276d4dc7..1c3cb9f4e84 100644
--- a/examples/gdp/small_lit/nonconvex_HEN.py
+++ b/examples/gdp/small_lit/nonconvex_HEN.py
@@ -76,7 +76,7 @@ def exchanger_disjunction(m, disjctn):
# Decide whether to reformulate as MINLP and what method to use
reformulation = True
- reformulation_method = 'chull'
+ reformulation_method = 'hull'
model = build_gdp_model()
model.pprint()
@@ -84,8 +84,8 @@ def exchanger_disjunction(m, disjctn):
if reformulation:
if reformulation_method == 'bigm':
TransformationFactory('gdp.bigm').apply_to(model,bigM=600*(50**0.6)+2*46500)
- elif reformulation_method == 'chull':
- TransformationFactory('gdp.chull').apply_to(model)
+ elif reformulation_method == 'hull':
+ TransformationFactory('gdp.hull').apply_to(model)
res = SolverFactory('gams').solve(model, tee=True, solver='baron', add_options=['option optcr = 0;'], keepfiles=True)
else:
# Note: MC++ needs to be properly installed to use strategy GLOA
diff --git a/examples/gdp/strip_packing/strip_packing_8rect.py b/examples/gdp/strip_packing/strip_packing_8rect.py
index 7b7c0344459..9fb96500f03 100644
--- a/examples/gdp/strip_packing/strip_packing_8rect.py
+++ b/examples/gdp/strip_packing/strip_packing_8rect.py
@@ -88,6 +88,6 @@ def no_overlap(m, i, j):
if __name__ == "__main__":
model = build_rect_strip_packing_model()
- TransformationFactory('gdp.chull').apply_to(model)
+ TransformationFactory('gdp.hull').apply_to(model)
opt = SolverFactory('gurobi')
results = opt.solve(model, tee=True)
diff --git a/examples/pyomo/benders/subproblem.py b/examples/pyomo/benders/subproblem.py
index c1128f02921..11d5c9d5c88 100644
--- a/examples/pyomo/benders/subproblem.py
+++ b/examples/pyomo/benders/subproblem.py
@@ -29,15 +29,15 @@
# derived set containing all valid week indices and subsets of interest.
def weeks_rule(model):
- return set(sequence(model.T()))
+ return list(sequence(model.T()))
model.WEEKS = Set(initialize=weeks_rule, within=PositiveIntegers)
def two_plus_weeks_rule(model):
- return set(sequence(2, model.T()))
+ return list(sequence(2, model.T()))
model.TWOPLUSWEEKS = Set(initialize=two_plus_weeks_rule, within=PositiveIntegers)
def three_plus_weeks_rule(model):
- return set(sequence(3, model.T()))
+ return list(sequence(3, model.T()))
model.THREEPLUSWEEKS = Set(initialize=three_plus_weeks_rule, within=PositiveIntegers)
# tons per hour produced
diff --git a/examples/pyomo/tutorials/data.out b/examples/pyomo/tutorials/data.out
index abd03d4a1c6..d1353f87858 100644
--- a/examples/pyomo/tutorials/data.out
+++ b/examples/pyomo/tutorials/data.out
@@ -1,47 +1,65 @@
20 Set Declarations
- A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['A1', 'A2', 'A3']
- B : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- C : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- D : Dim=0, Dimen=2, Size=3, Domain=D_domain, Ordered=False, Bounds=None
- [('A1', 1), ('A2', 2), ('A3', 3)]
- D_domain : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- E : Dim=0, Dimen=3, Size=6, Domain=E_domain, Ordered=False, Bounds=None
- [('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A3')]
- E_domain : Dim=0, Dimen=3, Size=27, Domain=None, Ordered=False, Bounds=None
- Virtual
- E_domain_index_0 : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- F : Dim=1, Dimen=1, Size=9, Domain=None, ArraySize=3, Ordered=False, Bounds=None
- Key : Members
- A1 : [1, 3, 5]
- A2 : [2, 4, 6]
- A3 : [3, 5, 7]
- G : Dim=2, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- G_index : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- H : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['H1', 'H2', 'H3']
- I : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=None
- ['I1', 'I2', 'I3', 'I4']
- J : Dim=0, Dimen=2, Size=3, Domain=None, Ordered=False, Bounds=None
- [('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')]
- K : Dim=0, Dimen=2, Size=3, Domain=None, Ordered=False, Bounds=None
- [('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')]
- T_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- U_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- x : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['A1', 'A2', 'A3']
- y : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['A1', 'A2', 'A3']
- z : Dim=0, Dimen=2, Size=3, Domain=None, Ordered=False, Bounds=None
- [('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')]
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'A1', 'A2', 'A3'}
+ B : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ C : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ D : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : D_domain : 3 : {('A1', 1), ('A2', 2), ('A3', 3)}
+ D_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ E : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain : 6 : {('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A3')}
+ E_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain_index_0*A : 27 : {('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A1', 1, 'A3'), ('A1', 2, 'A1'), ('A1', 2, 'A2'), ('A1', 2, 'A3'), ('A1', 3, 'A1'), ('A1', 3, 'A2'), ('A1', 3, 'A3'), ('A2', 1, 'A1'), ('A2', 1, 'A2'), ('A2', 1, 'A3'), ('A2', 2, 'A1'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A2', 3, 'A1'), ('A2', 3, 'A2'), ('A2', 3, 'A3'), ('A3', 1, 'A1'), ('A3', 1, 'A2'), ('A3', 1, 'A3'), ('A3', 2, 'A1'), ('A3', 2, 'A2'), ('A3', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A2'), ('A3', 3, 'A3')}
+ E_domain_index_0 : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ F : Size=3, Index=A, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ A1 : 1 : Any : 3 : {1, 3, 5}
+ A2 : 1 : Any : 3 : {2, 4, 6}
+ A3 : 1 : Any : 3 : {3, 5, 7}
+ G : Size=0, Index=G_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ G_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ H : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'H1', 'H2', 'H3'}
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 4 : {'I1', 'I2', 'I3', 'I4'}
+ J : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 3 : {('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')}
+ K : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 3 : {('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')}
+ T_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*I : 12 : {('A1', 'I1'), ('A1', 'I2'), ('A1', 'I3'), ('A1', 'I4'), ('A2', 'I1'), ('A2', 'I2'), ('A2', 'I3'), ('A2', 'I4'), ('A3', 'I1'), ('A3', 'I2'), ('A3', 'I3'), ('A3', 'I4')}
+ U_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I*A : 12 : {('I1', 'A1'), ('I1', 'A2'), ('I1', 'A3'), ('I2', 'A1'), ('I2', 'A2'), ('I2', 'A3'), ('I3', 'A1'), ('I3', 'A2'), ('I3', 'A3'), ('I4', 'A1'), ('I4', 'A2'), ('I4', 'A3')}
+ x : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'A1', 'A2', 'A3'}
+ y : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'A1', 'A2', 'A3'}
+ z : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 3 : {('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')}
18 Param Declarations
M : Size=3, Index=K, Domain=Reals, Default=None, Mutable=False
diff --git a/examples/pyomo/tutorials/data.py b/examples/pyomo/tutorials/data.py
index 0196298a8cc..291dfa95f9f 100644
--- a/examples/pyomo/tutorials/data.py
+++ b/examples/pyomo/tutorials/data.py
@@ -2,132 +2,132 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-
-#
-# Imports
-#
-from pyomo.environ import *
-
-##
-## Using a Model
-##
-#
-# Pyomo makes a fundamental distinction between an abstract model and a
-# problem instance. The Pyomo AbstractModel() class is used to manage the
-# declaration of model components (e.g. sets and variables), and to
-# generate a problem instance.
-#
-model = AbstractModel()
-
-##
-## Declaring Sets
-##
-#
-# An unordered set of arbitrary objects
-#
-model.A = Set()
-#
-# An unordered set of numeric values
-#
-model.B = Set()
-#
-# A simple cross-product
-#
-model.C = model.A * model.B
-#
-# A simple cross-product loaded with a tabular data format
-#
-model.D = Set(within=model.A * model.B)
-#
-# A multiple cross-product
-#
-model.E = Set(within=model.A * model.B * model.A)
-
-#
-# An indexed set
-#
-model.F = Set(model.A)
-#
-# An indexed set
-#
-model.G = Set(model.A,model.B)
-#
-# A simple set
-#
-model.H = Set()
-#
-# A simple set
-#
-model.I = Set()
-#
-# A two-dimensional set
-#
-model.J = Set(dimen=2)
-#
-# A two-dimensional set
-#
-model.K = Set(dimen=2)
-
-##
-## Declaring Params
-##
-#
-#
-# A simple parameter
-#
-model.Z = Param()
-model.ZZ = Param()
-#
-# A single-dimension parameter
-#
-model.Y = Param(model.A)
-#
-# An example of initializing two single-dimension parameters together
-#
-model.X = Param(model.A)
-model.W = Param(model.A)
-#
-# Initializing a parameter with two indices
-#
-model.U = Param(model.I,model.A)
-model.T = Param(model.A,model.I)
-#
-# Initializing a parameter with missing data
-#
-model.S = Param(model.A)
-#
-# An example of initializing two single-dimension parameters together with
-# an index set
-#
-model.R = Param(model.H, within=Reals)
-model.Q = Param(model.H, within=Reals)
-#
-# An example of initializing parameters with a two-dimensional index set
-#
-model.P = Param(model.J, within=Reals)
-model.PP = Param(model.J, within=Reals)
-model.O = Param(model.J, within=Reals)
-
-model.z = Set(dimen=2)
-model.y = Set()
-model.x = Set()
-
-model.M = Param(model.K, within=Reals)
-model.N = Param(model.y, within=Reals)
-
-model.MM = Param(model.z)
-model.MMM = Param(model.z)
-model.NNN = Param(model.x)
-
-##
-## Process an input file and confirm that we get appropriate
-## set instances.
-##
-instance = model.create_instance("data.dat")
-instance.pprint()
-
+
+#
+# Imports
+#
+from pyomo.environ import *
+
+##
+## Using a Model
+##
+#
+# Pyomo makes a fundamental distinction between an abstract model and a
+# problem instance. The Pyomo AbstractModel() class is used to manage the
+# declaration of model components (e.g. sets and variables), and to
+# generate a problem instance.
+#
+model = AbstractModel()
+
+##
+## Declaring Sets
+##
+#
+# An unordered set of arbitrary objects
+#
+model.A = Set()
+#
+# An unordered set of numeric values
+#
+model.B = Set()
+#
+# A simple cross-product
+#
+model.C = model.A * model.B
+#
+# A simple cross-product loaded with a tabular data format
+#
+model.D = Set(within=model.A * model.B)
+#
+# A multiple cross-product
+#
+model.E = Set(within=model.A * model.B * model.A)
+
+#
+# An indexed set
+#
+model.F = Set(model.A)
+#
+# An indexed set
+#
+model.G = Set(model.A,model.B)
+#
+# A simple set
+#
+model.H = Set()
+#
+# A simple set
+#
+model.I = Set()
+#
+# A two-dimensional set
+#
+model.J = Set(dimen=2)
+#
+# A two-dimensional set
+#
+model.K = Set(dimen=2)
+
+##
+## Declaring Params
+##
+#
+#
+# A simple parameter
+#
+model.Z = Param()
+model.ZZ = Param()
+#
+# A single-dimension parameter
+#
+model.Y = Param(model.A)
+#
+# An example of initializing two single-dimension parameters together
+#
+model.X = Param(model.A)
+model.W = Param(model.A)
+#
+# Initializing a parameter with two indices
+#
+model.U = Param(model.I,model.A)
+model.T = Param(model.A,model.I)
+#
+# Initializing a parameter with missing data
+#
+model.S = Param(model.A)
+#
+# An example of initializing two single-dimension parameters together with
+# an index set
+#
+model.R = Param(model.H, within=Reals)
+model.Q = Param(model.H, within=Reals)
+#
+# An example of initializing parameters with a two-dimensional index set
+#
+model.P = Param(model.J, within=Reals)
+model.PP = Param(model.J, within=Reals)
+model.O = Param(model.J, within=Reals)
+
+model.z = Set(dimen=2)
+model.y = Set()
+model.x = Set()
+
+model.M = Param(model.K, within=Reals)
+model.N = Param(model.y, within=Reals)
+
+model.MM = Param(model.z)
+model.MMM = Param(model.z)
+model.NNN = Param(model.x)
+
+##
+## Process an input file and confirm that we get appropriate
+## set instances.
+##
+instance = model.create_instance("data.dat")
+instance.pprint()
+
diff --git a/examples/pyomo/tutorials/excel.out b/examples/pyomo/tutorials/excel.out
index 25dd28ddd69..5064d4fa511 100644
--- a/examples/pyomo/tutorials/excel.out
+++ b/examples/pyomo/tutorials/excel.out
@@ -1,36 +1,50 @@
16 Set Declarations
- A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['A1', 'A2', 'A3']
- B : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1.0, 3.0)
- [1.0, 2.0, 3.0]
- C : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- D : Dim=0, Dimen=2, Size=3, Domain=D_domain, Ordered=False, Bounds=None
- [('A1', 1.0), ('A2', 2.0), ('A3', 3.0)]
- D_domain : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- E : Dim=0, Dimen=3, Size=6, Domain=E_domain, Ordered=False, Bounds=None
- [('A1', 1.0, 'A1'), ('A1', 1.0, 'A2'), ('A2', 2.0, 'A2'), ('A2', 2.0, 'A3'), ('A3', 3.0, 'A1'), ('A3', 3.0, 'A3')]
- E_domain : Dim=0, Dimen=3, Size=27, Domain=None, Ordered=False, Bounds=None
- Virtual
- E_domain_index_0 : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- F : Dim=1, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- G : Dim=2, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- G_index : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- H : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['H1', 'H2', 'H3']
- I : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=None
- ['I1', 'I2', 'I3', 'I4']
- J : Dim=0, Dimen=2, Size=3, Domain=None, Ordered=False, Bounds=None
- [('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')]
- T_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- U_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'A1', 'A2', 'A3'}
+ B : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1.0, 2.0, 3.0}
+ C : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1.0), ('A1', 2.0), ('A1', 3.0), ('A2', 1.0), ('A2', 2.0), ('A2', 3.0), ('A3', 1.0), ('A3', 2.0), ('A3', 3.0)}
+ D : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : D_domain : 3 : {('A1', 1.0), ('A2', 2.0), ('A3', 3.0)}
+ D_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1.0), ('A1', 2.0), ('A1', 3.0), ('A2', 1.0), ('A2', 2.0), ('A2', 3.0), ('A3', 1.0), ('A3', 2.0), ('A3', 3.0)}
+ E : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain : 6 : {('A1', 1.0, 'A1'), ('A1', 1.0, 'A2'), ('A2', 2.0, 'A2'), ('A2', 2.0, 'A3'), ('A3', 3.0, 'A1'), ('A3', 3.0, 'A3')}
+ E_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain_index_0*A : 27 : {('A1', 1.0, 'A1'), ('A1', 1.0, 'A2'), ('A1', 1.0, 'A3'), ('A1', 2.0, 'A1'), ('A1', 2.0, 'A2'), ('A1', 2.0, 'A3'), ('A1', 3.0, 'A1'), ('A1', 3.0, 'A2'), ('A1', 3.0, 'A3'), ('A2', 1.0, 'A1'), ('A2', 1.0, 'A2'), ('A2', 1.0, 'A3'), ('A2', 2.0, 'A1'), ('A2', 2.0, 'A2'), ('A2', 2.0, 'A3'), ('A2', 3.0, 'A1'), ('A2', 3.0, 'A2'), ('A2', 3.0, 'A3'), ('A3', 1.0, 'A1'), ('A3', 1.0, 'A2'), ('A3', 1.0, 'A3'), ('A3', 2.0, 'A1'), ('A3', 2.0, 'A2'), ('A3', 2.0, 'A3'), ('A3', 3.0, 'A1'), ('A3', 3.0, 'A2'), ('A3', 3.0, 'A3')}
+ E_domain_index_0 : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1.0), ('A1', 2.0), ('A1', 3.0), ('A2', 1.0), ('A2', 2.0), ('A2', 3.0), ('A3', 1.0), ('A3', 2.0), ('A3', 3.0)}
+ F : Size=0, Index=A, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ G : Size=0, Index=G_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ G_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1.0), ('A1', 2.0), ('A1', 3.0), ('A2', 1.0), ('A2', 2.0), ('A2', 3.0), ('A3', 1.0), ('A3', 2.0), ('A3', 3.0)}
+ H : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'H1', 'H2', 'H3'}
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 4 : {'I1', 'I2', 'I3', 'I4'}
+ J : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 3 : {('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')}
+ T_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*I : 12 : {('A1', 'I1'), ('A1', 'I2'), ('A1', 'I3'), ('A1', 'I4'), ('A2', 'I1'), ('A2', 'I2'), ('A2', 'I3'), ('A2', 'I4'), ('A3', 'I1'), ('A3', 'I2'), ('A3', 'I3'), ('A3', 'I4')}
+ U_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I*A : 12 : {('I1', 'A1'), ('I1', 'A2'), ('I1', 'A3'), ('I2', 'A1'), ('I2', 'A2'), ('I2', 'A3'), ('I3', 'A1'), ('I3', 'A2'), ('I3', 'A3'), ('I4', 'A1'), ('I4', 'A2'), ('I4', 'A3')}
12 Param Declarations
O : Size=3, Index=J, Domain=Reals, Default=None, Mutable=False
diff --git a/examples/pyomo/tutorials/param.out b/examples/pyomo/tutorials/param.out
index 9b47661d979..57e6a752ea5 100644
--- a/examples/pyomo/tutorials/param.out
+++ b/examples/pyomo/tutorials/param.out
@@ -1,14 +1,19 @@
5 Set Declarations
- A : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=(2, 8)
- [2, 4, 6, 8]
- B : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- R_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- W_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- X_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 4 : {2, 4, 6, 8}
+ B : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ R_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(2, 1), (2, 2), (2, 3), (4, 1), (4, 2), (4, 3), (6, 1), (6, 2), (6, 3), (8, 1), (8, 2), (8, 3)}
+ W_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(2, 1), (2, 2), (2, 3), (4, 1), (4, 2), (4, 3), (6, 1), (6, 2), (6, 3), (8, 1), (8, 2), (8, 3)}
+ X_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(2, 1), (2, 2), (2, 3), (4, 1), (4, 2), (4, 3), (6, 1), (6, 2), (6, 3), (8, 1), (8, 2), (8, 3)}
9 Param Declarations
R : Size=12, Index=R_index, Domain=Any, Default=99.0, Mutable=False
diff --git a/examples/pyomo/tutorials/param.py b/examples/pyomo/tutorials/param.py
index 4d1b1192152..e45cdb5834a 100644
--- a/examples/pyomo/tutorials/param.py
+++ b/examples/pyomo/tutorials/param.py
@@ -2,141 +2,141 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-
-#
-# Imports
-#
-from pyomo.environ import *
-
-##
-## Setting up a Model
-##
-#
-# Create the model
-#
-model = AbstractModel()
-#
-# Create sets used to define parameters
-#
-model.A = Set()
-model.B = Set()
-
-##
-## Declaring Params
-##
-#
-#
-# A simple parameter
-#
-model.Z = Param()
-#
-# A single-dimension parameter
-#
-model.Y = Param(model.A)
-#
-# Initializing a parameter with two indices
-#
-model.X = Param(model.A,model.B)
-
-##
-## Parameter Data
-##
-#
-# A parameter can be constructed with the _initialize_ option, which is a
-# function that accepts the parameter indices and model and returns the value
-# of that parameter element:
-#
-def W_init(model, i, j):
- #
- # Create the value of model.W[i,j]
- #
- return i*j
-model.W = Param(model.A, model.B, initialize=W_init)
-#
-# Note that the parameter model.W is not created when this object is
-# constructed. Instead, W_init() is called during the construction of a
-# problem instance.
-#
-# The _initialize_ option can also be used to specify the values in
-# a parameter. These default values may be overriden by later construction
-# steps, or by data in an input file:
-#
-V_init={}
-V_init[1]=1
-V_init[2]=2
-V_init[3]=9
-model.V = Param(model.B, initialize=V_init)
-#
-# Note that parameter V is initialized with a dictionary, which maps
-# tuples from parameter indices to parameter values. Simple, unindexed
-# parameters can be initialized with a scalar value.
-#
-model.U = Param(initialize=9.9)
-#
-# Validation of parameter data is supported in two different ways. First,
-# the domain of feasible parameter values can be specified with the _within_
-# option:
-#
-model.T = Param(within=model.B)
-#
-# Note that the default domain for parameters is Reals, the set of floating
-# point values.
-#
-# Validation of parameter data can also be performed with the _validate_
-# option, which is a function that returns True if a parameter value is valid:
-#
-def S_validate(model, value):
- return value in model.A
-model.S = Param(validate=S_validate)
-
-##
-## Default Values
-##
-#
-# Pyomo assumes that parameter values are specified in a sparse manner. For
-# example, the instance Param(model.A,model.B) declares a parameter indexed
-# over sets A and B. However, not all of these values are necessarily
-# declared in a model. The default value for all parameters not declared
-# is zero. This default can be overriden with the _default_ option.
-#
-# The following example illustrates how a parameter can be declared where
-# every parameter value is nonzero, but the parameter is stored with a sparse
-# representation.
-#
-R_init={}
-R_init[2,1]=1
-R_init[2,2]=1
-R_init[2,3]=1
-model.R = Param(model.A, model.B, default=99.0, initialize=R_init)
-#
-# Note that the parameter default value can also be specified in an input
-# file. See data.dat for an example.
-#
-# Note that the explicit specification of a zero default changes Pyomo
-# behavior. For example, consider:
-#
-# model.a = Param(model.A, default=0.0)
-# model.b = Param(model.A)
-#
-# When model.a[x] is accessed and the index has not been explicitly initialized,
-# the value zero is returned. This is true whether or not the parameter has
-# been initialized with data. Thus, the specification of a default value
-# makes the parameter seem to be densely initialized.
-#
-# However, when model.b[x] is accessed and the
-# index has not been initialized, an error occurs (and a Python exception is
-# thrown). Since the user did not explicitly declare a default, Pyomo
-# treats the reference to model.b[x] as an error.
-#
-
-##
-## Process an input file and confirm that we get appropriate
-## parameter instances.
-##
-instance = model.create_instance("param.dat")
-instance.pprint()
+
+#
+# Imports
+#
+from pyomo.environ import *
+
+##
+## Setting up a Model
+##
+#
+# Create the model
+#
+model = AbstractModel()
+#
+# Create sets used to define parameters
+#
+model.A = Set()
+model.B = Set()
+
+##
+## Declaring Params
+##
+#
+#
+# A simple parameter
+#
+model.Z = Param()
+#
+# A single-dimension parameter
+#
+model.Y = Param(model.A)
+#
+# Initializing a parameter with two indices
+#
+model.X = Param(model.A,model.B)
+
+##
+## Parameter Data
+##
+#
+# A parameter can be constructed with the _initialize_ option, which is a
+# function that accepts the parameter indices and model and returns the value
+# of that parameter element:
+#
+def W_init(model, i, j):
+ #
+ # Create the value of model.W[i,j]
+ #
+ return i*j
+model.W = Param(model.A, model.B, initialize=W_init)
+#
+# Note that the parameter model.W is not created when this object is
+# constructed. Instead, W_init() is called during the construction of a
+# problem instance.
+#
+# The _initialize_ option can also be used to specify the values in
+# a parameter. These default values may be overriden by later construction
+# steps, or by data in an input file:
+#
+V_init={}
+V_init[1]=1
+V_init[2]=2
+V_init[3]=9
+model.V = Param(model.B, initialize=V_init)
+#
+# Note that parameter V is initialized with a dictionary, which maps
+# tuples from parameter indices to parameter values. Simple, unindexed
+# parameters can be initialized with a scalar value.
+#
+model.U = Param(initialize=9.9)
+#
+# Validation of parameter data is supported in two different ways. First,
+# the domain of feasible parameter values can be specified with the _within_
+# option:
+#
+model.T = Param(within=model.B)
+#
+# Note that the default domain for parameters is Reals, the set of floating
+# point values.
+#
+# Validation of parameter data can also be performed with the _validate_
+# option, which is a function that returns True if a parameter value is valid:
+#
+def S_validate(model, value):
+ return value in model.A
+model.S = Param(validate=S_validate)
+
+##
+## Default Values
+##
+#
+# Pyomo assumes that parameter values are specified in a sparse manner. For
+# example, the instance Param(model.A,model.B) declares a parameter indexed
+# over sets A and B. However, not all of these values are necessarily
+# declared in a model. The default value for all parameters not declared
+# is zero. This default can be overriden with the _default_ option.
+#
+# The following example illustrates how a parameter can be declared where
+# every parameter value is nonzero, but the parameter is stored with a sparse
+# representation.
+#
+R_init={}
+R_init[2,1]=1
+R_init[2,2]=1
+R_init[2,3]=1
+model.R = Param(model.A, model.B, default=99.0, initialize=R_init)
+#
+# Note that the parameter default value can also be specified in an input
+# file. See data.dat for an example.
+#
+# Note that the explicit specification of a zero default changes Pyomo
+# behavior. For example, consider:
+#
+# model.a = Param(model.A, default=0.0)
+# model.b = Param(model.A)
+#
+# When model.a[x] is accessed and the index has not been explicitly initialized,
+# the value zero is returned. This is true whether or not the parameter has
+# been initialized with data. Thus, the specification of a default value
+# makes the parameter seem to be densely initialized.
+#
+# However, when model.b[x] is accessed and the
+# index has not been initialized, an error occurs (and a Python exception is
+# thrown). Since the user did not explicitly declare a default, Pyomo
+# treats the reference to model.b[x] as an error.
+#
+
+##
+## Process an input file and confirm that we get appropriate
+## parameter instances.
+##
+instance = model.create_instance("param.dat")
+instance.pprint()
diff --git a/examples/pyomo/tutorials/set.out b/examples/pyomo/tutorials/set.out
index 549517e9116..b01b666c012 100644
--- a/examples/pyomo/tutorials/set.out
+++ b/examples/pyomo/tutorials/set.out
@@ -1,88 +1,113 @@
-27 Set Declarations
- A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- B : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=(2, 5)
- [2, 3, 4, 5]
- C : Dim=2, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- C_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- D : Dim=0, Dimen=1, Size=5, Domain=None, Ordered=False, Bounds=None
- Virtual
- E : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=None
- Virtual
- F : Dim=0, Dimen=1, Size=1, Domain=None, Ordered=False, Bounds=None
- Virtual
- G : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- Virtual
- H : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- Hsub : Dim=0, Dimen=2, Size=3, Domain=Hsub_domain, Ordered=False, Bounds=None
- [(1, 2), (1, 3), (3, 3)]
- Hsub_domain : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- I : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- J : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- [1, 4, 9]
- K : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 9)
- [1, 4, 9]
- K_2 : Dim=0, Dimen=2, Size=2, Domain=None, Ordered=False, Bounds=None
- [(1, 4), (9, 16)]
- L : Dim=0, Dimen=1, Size=2, Domain=A, Ordered=False, Bounds=(1, 3)
- [1, 3]
- M : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 3]
- N : Dim=0, Dimen=2, Size=0, Domain=N_domain, Ordered=False, Bounds=None
- []
- N_domain : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- O : Dim=0, Dimen=1, Size=0, Domain=None, Ordered=False, Bounds=None
- []
- P : Dim=2, Dimen=1, Size=196, Domain=None, ArraySize=16, Ordered=False, Bounds=None
- Key : Members
- (2, 2) : [0, 1, 2, 3]
- (2, 3) : [0, 1, 2, 3, 4, 5]
- (2, 4) : [0, 1, 2, 3, 4, 5, 6, 7]
- (2, 5) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
- (3, 2) : [0, 1, 2, 3, 4, 5]
- (3, 3) : [0, 1, 2, 3, 4, 5, 6, 7, 8]
- (3, 4) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
- (3, 5) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
- (4, 2) : [0, 1, 2, 3, 4, 5, 6, 7]
- (4, 3) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11]
- (4, 4) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]
- (4, 5) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
- (5, 2) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
- (5, 3) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14]
- (5, 4) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19]
- (5, 5) : [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24]
- P_index : Dim=0, Dimen=2, Size=16, Domain=None, Ordered=False, Bounds=None
- Virtual
- R : Dim=1, Dimen=1, Size=9, Domain=None, ArraySize=3, Ordered=False, Bounds=None
- Key : Members
- 2 : [1, 3, 5]
- 3 : [2, 4, 6]
- 4 : [3, 5, 7]
- S : Dim=1, Dimen=1, Size=4, Domain=A, ArraySize=2, Ordered=False, Bounds=None
- Key : Members
- 2 : [1, 3]
- 5 : [2, 3]
- T : Dim=1, Dimen=1, Size=4, Domain=None, ArraySize=2, Ordered=False, Bounds=None
- Key : Members
- 2 : [1, 3]
- 5 : [2, 3]
- U : Dim=0, Dimen=1, Size=5, Domain=None, Ordered=Insertion, Bounds=(1, 120)
- [1, 2, 6, 24, 120]
- V : Dim=1, Dimen=1, Size=20, Domain=None, ArraySize=4, Ordered=Insertion, Bounds=None
- Key : Members
- 1 : [1, 2, 3, 4, 5]
- 2 : [1, 3, 5, 7, 9]
- 3 : [1, 4, 7, 10, 13]
- 4 : [1, 5, 9, 13, 17]
+28 Set Declarations
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ B : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 4 : {2, 3, 4, 5}
+ C : Size=0, Index=C_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ C_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ D : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 1 : A | B : 5 : {1, 2, 3, 4, 5}
+ E : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 1 : B & A : 2 : {2, 3}
+ F : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 1 : A - B : 1 : {1,}
+ G : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 1 : A ^ B : 3 : {1, 4, 5}
+ H : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ Hsub : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Hsub_domain : 3 : {(1, 2), (1, 3), (3, 3)}
+ Hsub_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I_domain : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ I_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ J : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 4, 9}
+ K : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 4, 9}
+ K_2 : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 2 : {(1, 4), (9, 16)}
+ L : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : A : 2 : {1, 3}
+ M : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {1, 3}
+ N : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : N_domain : 0 : {}
+ N_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 12 : {(1, 2), (1, 3), (1, 4), (1, 5), (2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5)}
+ O : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : -- : Any : 0 : {}
+ P : Size=16, Index=P_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ (2, 2) : 1 : Any : 4 : {0, 1, 2, 3}
+ (2, 3) : 1 : Any : 6 : {0, 1, 2, 3, 4, 5}
+ (2, 4) : 1 : Any : 8 : {0, 1, 2, 3, 4, 5, 6, 7}
+ (2, 5) : 1 : Any : 10 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ (3, 2) : 1 : Any : 6 : {0, 1, 2, 3, 4, 5}
+ (3, 3) : 1 : Any : 9 : {0, 1, 2, 3, 4, 5, 6, 7, 8}
+ (3, 4) : 1 : Any : 12 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+ (3, 5) : 1 : Any : 15 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
+ (4, 2) : 1 : Any : 8 : {0, 1, 2, 3, 4, 5, 6, 7}
+ (4, 3) : 1 : Any : 12 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11}
+ (4, 4) : 1 : Any : 16 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15}
+ (4, 5) : 1 : Any : 20 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
+ (5, 2) : 1 : Any : 10 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9}
+ (5, 3) : 1 : Any : 15 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14}
+ (5, 4) : 1 : Any : 20 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19}
+ (5, 5) : 1 : Any : 25 : {0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 19, 20, 21, 22, 23, 24}
+ P_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : B*B : 16 : {(2, 2), (2, 3), (2, 4), (2, 5), (3, 2), (3, 3), (3, 4), (3, 5), (4, 2), (4, 3), (4, 4), (4, 5), (5, 2), (5, 3), (5, 4), (5, 5)}
+ R : Size=3, Index=B, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ 2 : 1 : Any : 3 : {1, 3, 5}
+ 3 : 1 : Any : 3 : {2, 4, 6}
+ 4 : 1 : Any : 3 : {3, 5, 7}
+ S : Size=2, Index=B, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ 2 : 1 : A : 2 : {1, 3}
+ 5 : 1 : A : 2 : {2, 3}
+ T : Size=2, Index=B, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ 2 : 1 : Any : 2 : {1, 3}
+ 5 : 1 : Any : 2 : {2, 3}
+ U : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 5 : {1, 2, 6, 24, 120}
+ V : Size=4, Index=V_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ 1 : 1 : Any : 5 : {1, 2, 3, 4, 5}
+ 2 : 1 : Any : 5 : {1, 3, 5, 7, 9}
+ 3 : 1 : Any : 5 : {1, 4, 7, 10, 13}
+ 4 : 1 : Any : 5 : {1, 5, 9, 13, 17}
1 RangeSet Declarations
- V_index : Dim=0, Dimen=1, Size=4, Domain=Integers, Ordered=True, Bounds=(1, 4)
- Virtual
+ V_index : Dimen=1, Size=4, Bounds=(1, 4)
+ Key : Finite : Members
+ None : True : [1:4]
-28 Declarations: A B C_index C D E F G H Hsub_domain Hsub I J K K_2 L M N_domain N O P_index P R S T U V_index V
+29 Declarations: A B C_index C D E F G H Hsub_domain Hsub I_domain I J K K_2 L M N_domain N O P_index P R S T U V_index V
diff --git a/examples/pyomo/tutorials/set.py b/examples/pyomo/tutorials/set.py
index 511e8938e7f..ef7182df5b8 100644
--- a/examples/pyomo/tutorials/set.py
+++ b/examples/pyomo/tutorials/set.py
@@ -2,207 +2,206 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-
-#
-# Imports
-#
-from pyomo.environ import *
-
-##
-## Creating a model
-##
-model = AbstractModel()
-
-##
-## Declaring Sets
-##
-#
-# An unordered set of arbitrary objects can be defined by creating a Set()
-# object:
-#
-model.A = Set()
-#
-# An index set of sets can also be specified by providing sets as options
-# to the Set() object:
-#
-model.B = Set()
-model.C = Set(model.A,model.B)
-#
-# Set declarations can also use standard set operations to declare
-# a set in a constructive fashion:
-#
-model.D = model.A | model.B
-model.E = model.B & model.A
-model.F = model.A - model.B
-model.G = model.A ^ model.B
-#
-# Also, set cross-products can be specified as A*B
-#
-model.H = model.A * model.B
-#
-# Note that this is different from the following, which specifies that Hsub
-# is a subset of this cross-product.
-#
-model.Hsub = Set(within=model.A * model.B)
-
-##
-## Data for Simple Sets
-##
-#
-# A set can be constructed with the _initialize_ option, which is a function
-# that accepts the set indices and model and returns the value of that set
-# element:
-#
-def I_init(model):
- ans=[]
- for a in model.A:
- for b in model.B:
- ans.append( (a,b) )
- return ans
-model.I = model.A*model.B
-model.I.initialize = I_init
-#
-# Note that the set model.I is not created when this set object is
-# constructed. Instead, I_init() is called during the construction of a
-# problem instance.
-#
-# A set can also be explicitly constructed by add set elements:
-#
-model.J = Set()
-model.J.add(1,4,9)
-#
-# The _initialize_ option can also be used to specify the values in
-# a set. These default values may be overriden by later construction
-# steps, or by data in an input file:
-#
-model.K = Set(initialize=[1,4,9])
-model.K_2 = Set(initialize=[(1,4),(9,16)],dimen=2)
-#
-# Validation of set data is supported in two different ways. First, a
-# superset can be specified with the _within_ option:
-#
-model.L = Set(within=model.A)
-#
-# Validation of set data can also be performed with the _validate_ option,
-# which is a function that returns True if a data belongs in this set:
-#
-def M_validate(model, value):
- return value in model.A
-model.M = Set(validate=M_validate)
-#
-# Although the _within_ option is convenient, it can force the creation of
-# a temporary set. For example, consider the declaration
-#
-model.N = Set(within=model.A*model.B)
-#
-# In this example, the cross-product of sets A and B is needed to validate
-# the members of set C. Pyomo creates this set implicitly and uses
-# it for validation. By contrast, a simple validation function could be used
-# in this example, though with a less intuitive syntax:
-#
-def O_validate(model, value):
- return value[0] in model.A and value[1] in model.B
-model.O = Set(validate=O_validate)
-
-##
-## Data for Set Arrays
-##
-#
-# A set array can be constructed with the _initialize_ option, which is a
-# function that accepts the set indices and model and returns the set for that
-# array index:
-#
-def P_init(model, i, j):
- return range(0,i*j)
-model.P = Set(model.B,model.B)
-model.P.initialize = P_init
-#
-# A set array CANNOT be explicitly constructed by adding set elements
-# to individual arrays. For example, the following is invalid:
-#
-# model.Q = Set(model.B)
-# model.Q[2].add(4)
-# model.Q[4].add(16)
-#
-# The reason is that the line
-#
-# model.Q = Set(model.B)
-#
-# declares set Q with an abstract index set B. However, B is not initialized
-# until the 'model.create_instance()' call is executed at the end of this file. We
-# could, however, execute
-#
-# model.Q[2].add(4)
-# model.Q[4].add(16)
-#
-# after the execution of 'model.create_instance()'.
-#
-# The _initialize_ option can also be used to specify the values in
-# a set array. These default values are defined in a dictionary, which
-# specifies how each array element is initialized:
-#
-R_init={}
-R_init[2] = [1,3,5]
-R_init[3] = [2,4,6]
-R_init[4] = [3,5,7]
-model.R = Set(model.B,initialize=R_init)
-#
-# Validation of a set array is supported with the _within_ option. The
-# elements of all sets in the array must be in this set:
-#
-model.S = Set(model.B, within=model.A)
-#
-# Validation of set arrays can also be performed with the _validate_ option.
-# This is applied to all sets in the array:
-#
-def T_validate(model, value):
- return value in model.A
-model.T = Set(model.B, validate=M_validate)
-
-##
-## Set options
-##
-#
-# By default, sets are unordered. That is, the internal representation
-# may place the set elements in any order. In some cases, we need to know
-# the order in which set elements are declared. In such cases, we can declare
-# a set to be ordered with an additional constructor option.
-#
-# An ordered set can take a initialization function with an additional option
-# that specifies the index into the ordered set. In this case, the function is
-# called repeatedly to construct each element in the set:
-#
-def U_init(model, z):
- if z==6:
- return Set.End
- if z==1:
- return 1
- else:
- return model.U[z-1]*z
-model.U = Set(ordered=True, initialize=U_init)
-#
-# This example can be generalized to array sets. Note that in this case
-# we can use ordered sets to to index the array, thereby guaranteeing that
-# data has been filled. The following example illustrates the use of the
-# RangeSet(a,b) object, which generates an ordered set from 'a' to 'b'
-# (inclusive).
-#
-def V_init(model, z, i):
- if z==6:
- return Set.End
- if i==1:
- return z
- return model.V[i-1][z]+z-1
-model.V = Set(RangeSet(1,4), initialize=V_init, ordered=True)
-
-##
-## Process an input file and confirm that we get appropriate
-## set instances.
-##
-instance = model.create_instance("set.dat")
-instance.pprint()
+
+#
+# Imports
+#
+from pyomo.environ import *
+
+##
+## Creating a model
+##
+model = AbstractModel()
+
+##
+## Declaring Sets
+##
+#
+# An unordered set of arbitrary objects can be defined by creating a Set()
+# object:
+#
+model.A = Set()
+#
+# An index set of sets can also be specified by providing sets as options
+# to the Set() object:
+#
+model.B = Set()
+model.C = Set(model.A,model.B)
+#
+# Set declarations can also use standard set operations to declare
+# a set in a constructive fashion:
+#
+model.D = model.A | model.B
+model.E = model.B & model.A
+model.F = model.A - model.B
+model.G = model.A ^ model.B
+#
+# Also, set cross-products can be specified as A*B
+#
+model.H = model.A * model.B
+#
+# Note that this is different from the following, which specifies that Hsub
+# is a subset of this cross-product.
+#
+model.Hsub = Set(within=model.A * model.B)
+
+##
+## Data for Simple Sets
+##
+#
+# A set can be constructed with the _initialize_ option, which is a function
+# that accepts the set indices and model and returns the value of that set
+# element:
+#
+def I_init(model):
+ ans=[]
+ for a in model.A:
+ for b in model.B:
+ ans.append( (a,b) )
+ return ans
+model.I = Set(within=model.A*model.B, initialize=I_init)
+#
+# Note that the set model.I is not created when this set object is
+# constructed. Instead, I_init() is called during the construction of a
+# problem instance.
+#
+# A set can also be explicitly constructed by adding set elements:
+#
+model.J = Set()
+model.J.construct()
+model.J.add(1,4,9)
+#
+# The _initialize_ option can also be used to specify the values in
+# a set. These default values may be overriden by later construction
+# steps, or by data in an input file:
+#
+model.K = Set(initialize=[1,4,9])
+model.K_2 = Set(initialize=[(1,4),(9,16)],dimen=2)
+#
+# Validation of set data is supported in two different ways. First, a
+# superset can be specified with the _within_ option:
+#
+model.L = Set(within=model.A)
+#
+# Validation of set data can also be performed with the _validate_ option,
+# which is a function that returns True if a data belongs in this set:
+#
+def M_validate(model, value):
+ return value in model.A
+model.M = Set(validate=M_validate)
+#
+# Although the _within_ option is convenient, it can force the creation of
+# a temporary set. For example, consider the declaration
+#
+model.N = Set(within=model.A*model.B)
+#
+# In this example, the cross-product of sets A and B is needed to validate
+# the members of set C. Pyomo creates this set implicitly and uses
+# it for validation. By contrast, a simple validation function could be used
+# in this example, though with a less intuitive syntax:
+#
+def O_validate(model, value):
+ return value[0] in model.A and value[1] in model.B
+model.O = Set(validate=O_validate)
+
+##
+## Data for Set Arrays
+##
+#
+# A set array can be constructed with the _initialize_ option, which is a
+# function that accepts the set indices and model and returns the set for that
+# array index:
+#
+def P_init(model, i, j):
+ return range(0,i*j)
+model.P = Set(model.B,model.B,initialize=P_init)
+#
+# A set array CANNOT be explicitly constructed by adding set elements
+# to individual arrays. For example, the following is invalid:
+#
+# model.Q = Set(model.B)
+# model.Q[2].add(4)
+# model.Q[4].add(16)
+#
+# The reason is that the line
+#
+# model.Q = Set(model.B)
+#
+# declares set Q with an abstract index set B. However, B is not initialized
+# until the 'model.create_instance()' call is executed at the end of this file. We
+# could, however, execute
+#
+# model.Q[2].add(4)
+# model.Q[4].add(16)
+#
+# after the execution of 'model.create_instance()'.
+#
+# The _initialize_ option can also be used to specify the values in
+# a set array. These default values are defined in a dictionary, which
+# specifies how each array element is initialized:
+#
+R_init={}
+R_init[2] = [1,3,5]
+R_init[3] = [2,4,6]
+R_init[4] = [3,5,7]
+model.R = Set(model.B,initialize=R_init)
+#
+# Validation of a set array is supported with the _within_ option. The
+# elements of all sets in the array must be in this set:
+#
+model.S = Set(model.B, within=model.A)
+#
+# Validation of set arrays can also be performed with the _validate_ option.
+# This is applied to all sets in the array:
+#
+def T_validate(model, value):
+ return value in model.A
+model.T = Set(model.B, validate=M_validate)
+
+##
+## Set options
+##
+#
+# By default, sets are unordered. That is, the internal representation
+# may place the set elements in any order. In some cases, we need to know
+# the order in which set elements are declared. In such cases, we can declare
+# a set to be ordered with an additional constructor option.
+#
+# An ordered set can take a initialization function with an additional option
+# that specifies the index into the ordered set. In this case, the function is
+# called repeatedly to construct each element in the set:
+#
+def U_init(model, z):
+ if z==6:
+ return Set.End
+ if z==1:
+ return 1
+ else:
+ return model.U[z-1]*z
+model.U = Set(ordered=True, initialize=U_init)
+#
+# This example can be generalized to array sets. Note that in this case
+# we can use ordered sets to to index the array, thereby guaranteeing that
+# data has been filled. The following example illustrates the use of the
+# RangeSet(a,b) object, which generates an ordered set from 'a' to 'b'
+# (inclusive).
+#
+def V_init(model, z, i):
+ if z==6:
+ return Set.End
+ if i==1:
+ return z
+ return model.V[i-1][z]+z-1
+model.V = Set(RangeSet(1,4), initialize=V_init, ordered=True)
+
+##
+## Process an input file and confirm that we get appropriate
+## set instances.
+##
+instance = model.create_instance("set.dat")
+instance.pprint()
diff --git a/examples/pyomo/tutorials/table.out b/examples/pyomo/tutorials/table.out
index c4feecd934d..1eba28afd19 100644
--- a/examples/pyomo/tutorials/table.out
+++ b/examples/pyomo/tutorials/table.out
@@ -1,36 +1,50 @@
16 Set Declarations
- A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['A1', 'A2', 'A3']
- B : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- C : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- D : Dim=0, Dimen=2, Size=3, Domain=D_domain, Ordered=False, Bounds=None
- [('A1', 1), ('A2', 2), ('A3', 3)]
- D_domain : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- E : Dim=0, Dimen=3, Size=6, Domain=E_domain, Ordered=False, Bounds=None
- [('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A3')]
- E_domain : Dim=0, Dimen=3, Size=27, Domain=None, Ordered=False, Bounds=None
- Virtual
- E_domain_index_0 : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- F : Dim=1, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- G : Dim=2, Dimen=1, Size=0, Domain=None, ArraySize=0, Ordered=False, Bounds=None
- Key : Members
- G_index : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
- H : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=None
- ['H1', 'H2', 'H3']
- I : Dim=0, Dimen=1, Size=4, Domain=None, Ordered=False, Bounds=None
- ['I1', 'I2', 'I3', 'I4']
- J : Dim=0, Dimen=2, Size=3, Domain=None, Ordered=False, Bounds=None
- [('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')]
- T_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
- U_index : Dim=0, Dimen=2, Size=12, Domain=None, Ordered=False, Bounds=None
- Virtual
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'A1', 'A2', 'A3'}
+ B : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ C : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ D : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : D_domain : 3 : {('A1', 1), ('A2', 2), ('A3', 3)}
+ D_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ E : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain : 6 : {('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A3')}
+ E_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 3 : E_domain_index_0*A : 27 : {('A1', 1, 'A1'), ('A1', 1, 'A2'), ('A1', 1, 'A3'), ('A1', 2, 'A1'), ('A1', 2, 'A2'), ('A1', 2, 'A3'), ('A1', 3, 'A1'), ('A1', 3, 'A2'), ('A1', 3, 'A3'), ('A2', 1, 'A1'), ('A2', 1, 'A2'), ('A2', 1, 'A3'), ('A2', 2, 'A1'), ('A2', 2, 'A2'), ('A2', 2, 'A3'), ('A2', 3, 'A1'), ('A2', 3, 'A2'), ('A2', 3, 'A3'), ('A3', 1, 'A1'), ('A3', 1, 'A2'), ('A3', 1, 'A3'), ('A3', 2, 'A1'), ('A3', 2, 'A2'), ('A3', 2, 'A3'), ('A3', 3, 'A1'), ('A3', 3, 'A2'), ('A3', 3, 'A3')}
+ E_domain_index_0 : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ F : Size=0, Index=A, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ G : Size=0, Index=G_index, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ G_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*B : 9 : {('A1', 1), ('A1', 2), ('A1', 3), ('A2', 1), ('A2', 2), ('A2', 3), ('A3', 1), ('A3', 2), ('A3', 3)}
+ H : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {'H1', 'H2', 'H3'}
+ I : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 4 : {'I1', 'I2', 'I3', 'I4'}
+ J : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : Any : 3 : {('A1', 'B1'), ('A2', 'B2'), ('A3', 'B3')}
+ T_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : A*I : 12 : {('A1', 'I1'), ('A1', 'I2'), ('A1', 'I3'), ('A1', 'I4'), ('A2', 'I1'), ('A2', 'I2'), ('A2', 'I3'), ('A2', 'I4'), ('A3', 'I1'), ('A3', 'I2'), ('A3', 'I3'), ('A3', 'I4')}
+ U_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I*A : 12 : {('I1', 'A1'), ('I1', 'A2'), ('I1', 'A3'), ('I2', 'A1'), ('I2', 'A2'), ('I2', 'A3'), ('I3', 'A1'), ('I3', 'A2'), ('I3', 'A3'), ('I4', 'A1'), ('I4', 'A2'), ('I4', 'A3')}
12 Param Declarations
O : Size=3, Index=J, Domain=Reals, Default=None, Mutable=False
diff --git a/examples/pysp/scripting/apps/compile_scenario_tree.py b/examples/pysp/scripting/apps/compile_scenario_tree.py
index 47d19c5306d..b3d0f9e289e 100644
--- a/examples/pysp/scripting/apps/compile_scenario_tree.py
+++ b/examples/pysp/scripting/apps/compile_scenario_tree.py
@@ -102,7 +102,6 @@ def _pickle_compiled_scenario(worker,
param._validate = None
for set_ in block.component_objects(Set):
set_.initialize = None
- set_.filter = None
for ba in block.component_objects(BuildAction):
ba._rule = None
diff --git a/examples/pysp/sizes/models/ReferenceModel.py b/examples/pysp/sizes/models/ReferenceModel.py
index 9abb37b6c66..df70a71290a 100644
--- a/examples/pysp/sizes/models/ReferenceModel.py
+++ b/examples/pysp/sizes/models/ReferenceModel.py
@@ -15,76 +15,76 @@
#Journal of Heuristics, 1996, Vol 2, Pages 111-128.
-from pyomo.core import *
+import pyomo.environ as pyo
#
# Model
#
-model = AbstractModel()
+model = pyo.AbstractModel()
#
# Parameters
#
# the number of product sizes.
-model.NumSizes = Param(within=NonNegativeIntegers)
+model.NumSizes = pyo.Param(within=pyo.NonNegativeIntegers)
# the set of sizes, labeled 1 through NumSizes.
def product_sizes_rule(model):
- return set(range(1, model.NumSizes()+1))
-model.ProductSizes = Set(initialize=product_sizes_rule)
+ return list(range(1, model.NumSizes()+1))
+model.ProductSizes = pyo.Set(initialize=product_sizes_rule)
# the deterministic demands for product at each size.
-model.DemandsFirstStage = Param(model.ProductSizes, within=NonNegativeIntegers)
-model.DemandsSecondStage = Param(model.ProductSizes, within=NonNegativeIntegers)
+model.DemandsFirstStage = pyo.Param(model.ProductSizes, within=pyo.NonNegativeIntegers)
+model.DemandsSecondStage = pyo.Param(model.ProductSizes, within=pyo.NonNegativeIntegers)
# the unit production cost at each size.
-model.UnitProductionCosts = Param(model.ProductSizes, within=NonNegativeReals)
+model.UnitProductionCosts = pyo.Param(model.ProductSizes, within=pyo.NonNegativeReals)
# the setup cost for producing any units of size i.
-model.SetupCosts = Param(model.ProductSizes, within=NonNegativeReals)
+model.SetupCosts = pyo.Param(model.ProductSizes, within=pyo.NonNegativeReals)
# the cost to reduce a unit i to a lower unit j.
-model.UnitReductionCost = Param(within=NonNegativeReals)
+model.UnitReductionCost = pyo.Param(within=pyo.NonNegativeReals)
# a cap on the overall production within any time stage.
-model.Capacity = Param(within=PositiveReals)
+model.Capacity = pyo.Param(within=pyo.PositiveReals)
# a derived set to constrain the NumUnitsCut variable domain.
# TBD: the (i,j) with i >= j set should be a generic utility.
def num_units_cut_domain_rule(model):
- ans = set()
- for i in range(1,model.NumSizes()+1):
- for j in range(1, i+1):
- ans.add((i,j))
- return ans
+ return ((i,j) for i in range(1,model.NumSizes()+1) for j in range(1,i+1))
-model.NumUnitsCutDomain = Set(initialize=num_units_cut_domain_rule, dimen=2)
+model.NumUnitsCutDomain = pyo.Set(initialize=num_units_cut_domain_rule, dimen=2)
#
# Variables
#
# are any products at size i produced?
-model.ProduceSizeFirstStage = Var(model.ProductSizes, domain=Boolean)
-model.ProduceSizeSecondStage = Var(model.ProductSizes, domain=Boolean)
+model.ProduceSizeFirstStage = pyo.Var(model.ProductSizes, domain=pyo.Boolean)
+model.ProduceSizeSecondStage = pyo.Var(model.ProductSizes, domain=pyo.Boolean)
# NOTE: The following (num-produced and num-cut) variables are implicitly integer
# under the normal cost objective, but with the PH cost objective, this isn't
# the case.
# the number of units at each size produced.
-model.NumProducedFirstStage = Var(model.ProductSizes, domain=NonNegativeIntegers, bounds=(0.0, model.Capacity))
-model.NumProducedSecondStage = Var(model.ProductSizes, domain=NonNegativeIntegers, bounds=(0.0, model.Capacity))
+model.NumProducedFirstStage = pyo.Var(model.ProductSizes, domain=pyo.NonNegativeIntegers, bounds=(0.0, model.Capacity))
+model.NumProducedSecondStage = pyo.Var(model.ProductSizes, domain=pyo.NonNegativeIntegers, bounds=(0.0, model.Capacity))
# the number of units of size i cut (down) to meet demand for units of size j.
-model.NumUnitsCutFirstStage = Var(model.NumUnitsCutDomain, domain=NonNegativeIntegers, bounds=(0.0, model.Capacity))
-model.NumUnitsCutSecondStage = Var(model.NumUnitsCutDomain, domain=NonNegativeIntegers, bounds=(0.0, model.Capacity))
+model.NumUnitsCutFirstStage = pyo.Var(model.NumUnitsCutDomain,
+ domain=pyo.NonNegativeIntegers,
+ bounds=(0.0, model.Capacity))
+model.NumUnitsCutSecondStage = pyo.Var(model.NumUnitsCutDomain,
+ domain=pyo.NonNegativeIntegers,
+ bounds=(0.0, model.Capacity))
# stage-specific cost variables for use in the pysp scenario tree / analysis.
-model.FirstStageCost = Var(domain=NonNegativeReals)
-model.SecondStageCost = Var(domain=NonNegativeReals)
+model.FirstStageCost = pyo.Var(domain=pyo.NonNegativeReals)
+model.SecondStageCost = pyo.Var(domain=pyo.NonNegativeReals)
#
# Constraints
@@ -97,8 +97,8 @@ def demand_satisfied_first_stage_rule(model, i):
def demand_satisfied_second_stage_rule(model, i):
return (0.0, sum([model.NumUnitsCutSecondStage[j,i] for j in model.ProductSizes if j >= i]) - model.DemandsSecondStage[i], None)
-model.DemandSatisfiedFirstStage = Constraint(model.ProductSizes, rule=demand_satisfied_first_stage_rule)
-model.DemandSatisfiedSecondStage = Constraint(model.ProductSizes, rule=demand_satisfied_second_stage_rule)
+model.DemandSatisfiedFirstStage = pyo.Constraint(model.ProductSizes, rule=demand_satisfied_first_stage_rule)
+model.DemandSatisfiedSecondStage = pyo.Constraint(model.ProductSizes, rule=demand_satisfied_second_stage_rule)
# ensure that you don't produce any units if the decision has been made to disable producion.
def enforce_production_first_stage_rule(model, i):
@@ -109,8 +109,8 @@ def enforce_production_second_stage_rule(model, i):
# The production capacity per time stage serves as a simple upper bound for "M".
return (None, model.NumProducedSecondStage[i] - model.Capacity * model.ProduceSizeSecondStage[i], 0.0)
-model.EnforceProductionBinaryFirstStage = Constraint(model.ProductSizes, rule=enforce_production_first_stage_rule)
-model.EnforceProductionBinarySecondStage = Constraint(model.ProductSizes, rule=enforce_production_second_stage_rule)
+model.EnforceProductionBinaryFirstStage = pyo.Constraint(model.ProductSizes, rule=enforce_production_first_stage_rule)
+model.EnforceProductionBinarySecondStage = pyo.Constraint(model.ProductSizes, rule=enforce_production_second_stage_rule)
# ensure that the production capacity is not exceeded for each time stage.
def enforce_capacity_first_stage_rule(model):
@@ -119,8 +119,8 @@ def enforce_capacity_first_stage_rule(model):
def enforce_capacity_second_stage_rule(model):
return (None, sum([model.NumProducedSecondStage[i] for i in model.ProductSizes]) - model.Capacity, 0.0)
-model.EnforceCapacityLimitFirstStage = Constraint(rule=enforce_capacity_first_stage_rule)
-model.EnforceCapacityLimitSecondStage = Constraint(rule=enforce_capacity_second_stage_rule)
+model.EnforceCapacityLimitFirstStage = pyo.Constraint(rule=enforce_capacity_first_stage_rule)
+model.EnforceCapacityLimitSecondStage = pyo.Constraint(rule=enforce_capacity_second_stage_rule)
# ensure that you can't generate inventory out of thin air.
def enforce_inventory_first_stage_rule(model, i):
@@ -136,8 +136,8 @@ def enforce_inventory_second_stage_rule(model, i):
- model.NumProducedFirstStage[i] - model.NumProducedSecondStage[i], \
0.0)
-model.EnforceInventoryFirstStage = Constraint(model.ProductSizes, rule=enforce_inventory_first_stage_rule)
-model.EnforceInventorySecondStage = Constraint(model.ProductSizes, rule=enforce_inventory_second_stage_rule)
+model.EnforceInventoryFirstStage = pyo.Constraint(model.ProductSizes, rule=enforce_inventory_first_stage_rule)
+model.EnforceInventorySecondStage = pyo.Constraint(model.ProductSizes, rule=enforce_inventory_second_stage_rule)
# stage-specific cost computations.
def first_stage_cost_rule(model):
@@ -148,7 +148,7 @@ def first_stage_cost_rule(model):
for (i,j) in model.NumUnitsCutDomain if i != j])
return (model.FirstStageCost - production_costs - cut_costs) == 0.0
-model.ComputeFirstStageCost = Constraint(rule=first_stage_cost_rule)
+model.ComputeFirstStageCost = pyo.Constraint(rule=first_stage_cost_rule)
def second_stage_cost_rule(model):
production_costs = sum([model.SetupCosts[i] * model.ProduceSizeSecondStage[i] + \
@@ -158,10 +158,8 @@ def second_stage_cost_rule(model):
for (i,j) in model.NumUnitsCutDomain if i != j])
return (model.SecondStageCost - production_costs - cut_costs) == 0.0
-model.ComputeSecondStageCost = Constraint(rule=second_stage_cost_rule)
+model.ComputeSecondStageCost = pyo.Constraint(rule=second_stage_cost_rule)
-#
-# PySP Auto-generated Objective
#
# minimize: sum of StageCosts
#
@@ -169,5 +167,5 @@ def second_stage_cost_rule(model):
# included here for informational purposes.
def total_cost_rule(model):
return model.FirstStageCost + model.SecondStageCost
-model.Total_Cost_Objective = Objective(rule=total_cost_rule, sense=minimize)
+model.Total_Cost_Objective = pyo.Objective(rule=total_cost_rule, sense=pyo.minimize)
diff --git a/pyomo/__init__.py b/pyomo/__init__.py
index 1d37b8d1410..7aebc635226 100644
--- a/pyomo/__init__.py
+++ b/pyomo/__init__.py
@@ -7,3 +7,6 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+
+from . import common
+from .version import __version__
diff --git a/pyomo/bilevel/plugins/solver2.py b/pyomo/bilevel/plugins/solver2.py
index 1c9714eacd5..6a80ad633cd 100644
--- a/pyomo/bilevel/plugins/solver2.py
+++ b/pyomo/bilevel/plugins/solver2.py
@@ -11,7 +11,6 @@
import time
import pyutilib.misc
import pyomo.opt
-import pyomo.common
from pyomo.core import TransformationFactory, Var, Set
diff --git a/pyomo/bilevel/plugins/solver3.py b/pyomo/bilevel/plugins/solver3.py
index 91c3a7b9c89..21e5912c198 100644
--- a/pyomo/bilevel/plugins/solver3.py
+++ b/pyomo/bilevel/plugins/solver3.py
@@ -12,7 +12,6 @@
import pyutilib.misc
import pyomo.opt
#from pyomo.bilevel.components import SubModel
-import pyomo.common
from pyomo.core import TransformationFactory, Var, Set
diff --git a/pyomo/bilevel/plugins/solver4.py b/pyomo/bilevel/plugins/solver4.py
index 443947bf01b..2ccada7f5be 100644
--- a/pyomo/bilevel/plugins/solver4.py
+++ b/pyomo/bilevel/plugins/solver4.py
@@ -11,7 +11,6 @@
import time
import pyutilib.misc
import pyomo.opt
-import pyomo.common
from pyomo.core import TransformationFactory, Var, Set
diff --git a/pyomo/bilevel/tests/test_blp.py b/pyomo/bilevel/tests/test_blp.py
index 7bc6fb1a978..6dd2ace5a74 100644
--- a/pyomo/bilevel/tests/test_blp.py
+++ b/pyomo/bilevel/tests/test_blp.py
@@ -21,6 +21,7 @@
import pyutilib.th as unittest
import pyutilib.misc
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
import pyomo.opt
import pyomo.scripting.pyomo_main as pyomo_main
from pyomo.scripting.util import cleanup
@@ -28,12 +29,6 @@
from six import iteritems
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
solvers = pyomo.opt.check_available_solvers('cplex', 'glpk', 'ipopt')
class CommonTests:
@@ -93,7 +88,7 @@ def referenceFile(self, problem, solver):
def getObjective(self, fname):
FILE = open(fname,'r')
- data = yaml.load(FILE)
+ data = yaml.load(FILE, **yaml_load_args)
FILE.close()
solutions = data.get('Solution', [])
ans = []
diff --git a/pyomo/bilevel/tests/test_linear_dual.py b/pyomo/bilevel/tests/test_linear_dual.py
index 39800c756cb..35b29a0ccfd 100644
--- a/pyomo/bilevel/tests/test_linear_dual.py
+++ b/pyomo/bilevel/tests/test_linear_dual.py
@@ -19,6 +19,7 @@
import pyutilib.th as unittest
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
import pyomo.opt
import pyomo.scripting.pyomo_main as pyomo_main
from pyomo.scripting.util import cleanup
@@ -26,12 +27,6 @@
from six import iteritems
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
solvers = pyomo.opt.check_available_solvers('cplex', 'glpk')
class CommonTests:
@@ -93,7 +88,7 @@ def referenceFile(self, problem, solver):
def getObjective(self, fname):
FILE = open(fname)
- data = yaml.load(FILE)
+ data = yaml.load(FILE, **yaml_load_args)
FILE.close()
solutions = data.get('Solution', [])
ans = []
diff --git a/pyomo/checker/tests/test_examples.py b/pyomo/checker/tests/test_examples.py
index 76d7d97d330..4f697ed45b9 100644
--- a/pyomo/checker/tests/test_examples.py
+++ b/pyomo/checker/tests/test_examples.py
@@ -10,17 +10,13 @@
import sys
import os
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
import pyutilib.th as unittest
from pyomo.checker import *
from pyomo.checker.plugins.checker import PyomoModelChecker
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
currdir = os.path.dirname(os.path.abspath(__file__))
exdir = os.path.join(currdir, "examples")
@@ -44,7 +40,8 @@ def testMethod(obj, name):
def assignTests(cls):
- defs = yaml.load(open(os.path.join(currdir, 'examples.yml'), 'r'))
+ defs = yaml.load(open(os.path.join(currdir, 'examples.yml'), 'r'),
+ **yaml_load_args)
for package in defs:
for checkerName in defs[package]:
diff --git a/pyomo/common/__init__.py b/pyomo/common/__init__.py
index c089844ba9c..df6558de10b 100644
--- a/pyomo/common/__init__.py
+++ b/pyomo/common/__init__.py
@@ -21,7 +21,7 @@
# The following will be deprecated soon
register_executable, registered_executable, unregister_executable
)
-from . import config
+from . import config, timing
from .deprecation import deprecated
from .errors import DeveloperError
from ._task import pyomo_api, PyomoAPIData, PyomoAPIFactory
diff --git a/pyomo/common/collections/__init__.py b/pyomo/common/collections/__init__.py
new file mode 100644
index 00000000000..2ba62ce0e56
--- /dev/null
+++ b/pyomo/common/collections/__init__.py
@@ -0,0 +1,11 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+from .orderedset import OrderedDict, OrderedSet
diff --git a/pyomo/common/collections/orderedset.py b/pyomo/common/collections/orderedset.py
new file mode 100644
index 00000000000..6740069deb5
--- /dev/null
+++ b/pyomo/common/collections/orderedset.py
@@ -0,0 +1,84 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import six
+from six import itervalues, iteritems
+
+if six.PY3:
+ from collections.abc import MutableSet as collections_MutableSet
+else:
+ from collections import MutableSet as collections_MutableSet
+try:
+ from collections import OrderedDict
+except:
+ from ordereddict import OrderedDict
+
+class OrderedSet(collections_MutableSet):
+ __slots__ = ('_dict')
+
+ def __init__(self, iterable=None):
+ self._dict = OrderedDict()
+ if iterable is not None:
+ self.update(iterable)
+
+ def __str__(self):
+ """String representation of the mapping."""
+ return "OrderedSet(%s)" % (', '.join(repr(x) for x in self))
+
+
+ def update(self, iterable):
+ for val in iterable:
+ self.add(val)
+
+ #
+ # This method must be defined for deepcopy/pickling
+ # because this class relies on Python ids.
+ #
+ def __setstate__(self, state):
+ self._dict = state
+
+ def __getstate__(self):
+ return self._dict
+
+ #
+ # Implement MutableSet abstract methods
+ #
+
+ def __contains__(self, val):
+ return val in self._dict
+
+ def __iter__(self):
+ return iter(self._dict)
+
+ def __len__(self):
+ return len(self._dict)
+
+ def add(self, val):
+ """Add an element."""
+ if val not in self._dict:
+ self._dict[val] = None
+
+ def discard(self, val):
+ """Remove an element. Do not raise an exception if absent."""
+ if val in self._dict:
+ del self._dict[val]
+
+ #
+ # The remaining MutableSet methods have slow default
+ # implementations.
+ #
+
+ def clear(self):
+ """Remove all elements from this set."""
+ self._dict.clear()
+
+ def remove(self, val):
+ """Remove an element. If not a member, raise a KeyError."""
+ del self._dict[val]
diff --git a/pyomo/common/config.py b/pyomo/common/config.py
index 64912988c4d..3e57b28c7ff 100644
--- a/pyomo/common/config.py
+++ b/pyomo/common/config.py
@@ -11,6 +11,7 @@
import os
import platform
+import enum
import six
from pyutilib.misc.config import ConfigBlock, ConfigList, ConfigValue
@@ -157,3 +158,13 @@ def add_docstring_list(docstring, configblock, indent_by=4):
indent_spacing=0,
width=256
).splitlines(True))
+
+
+class ConfigEnum(enum.Enum):
+ @classmethod
+ def from_enum_or_string(cls, arg):
+ if type(arg) is str:
+ return cls[arg]
+ else:
+ # Handles enum or integer inputs
+ return cls(arg)
diff --git a/pyomo/common/dependencies.py b/pyomo/common/dependencies.py
new file mode 100644
index 00000000000..7e6bf692199
--- /dev/null
+++ b/pyomo/common/dependencies.py
@@ -0,0 +1,336 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import inspect
+import importlib
+import logging
+
+class DeferredImportError(ImportError):
+ pass
+
+class ModuleUnavailable(object):
+ """Dummy object that raises a DeferredImportError upon attribute access
+
+ This object is returned by attempt_import() in lieu of the module in
+ the case that the module import fails. Any attempts to access
+ attributes on this object will raise a DeferredImportError
+ exception.
+
+ Parameters
+ ----------
+ message: str
+ The string message to return in the raised exception
+ """
+ def __init__(self, message):
+ self._error_message_ = message
+
+ def __getattr__(self, attr):
+ raise DeferredImportError(self._error_message_)
+
+ def generate_import_warning(self, logger='pyomo.common'):
+ logging.getLogger(logger).warning(
+ self._error_message_)
+
+class DeferredImportModule(object):
+ """Dummy object that serves as a module placeholder until the first time
+ getattr is called, at which point it imports the module and returns
+ the module attribute.
+
+ This object is returned by attempt_import() in lieu of the module in
+ the case that the module import fails. Any attempts to access
+ attributes on this object will raise a DeferredImportError
+ exception.
+ """
+ def __init__(self, indicator):
+ self._indicator_flag = indicator
+
+ def __getattr__(self, attr):
+ self._indicator_flag.resolve()
+ return getattr(self._indicator_flag._module, attr)
+
+
+class _DeferredImportIndicatorBase(object):
+ def __bool__(self):
+ return self.__nonzero__()
+
+ def __and__(self, other):
+ return _DeferredAnd(self, other)
+
+ def __or__(self, other):
+ return _DeferredOr(self, other)
+
+ def __rand__(self, other):
+ return _DeferredAnd(other, self)
+
+ def __ror__(self, other):
+ return _DeferredOr(other, self)
+
+
+class DeferredImportIndicator(_DeferredImportIndicatorBase):
+ """Placeholder indicating if an import was successful.
+
+ This object serves as a placeholder for the Boolean indicator if a
+ deferred module import was successful. Casting this instance to
+ bool will cause the import to be attempted. The actual import logic
+ is here and not in the DeferredImportModule to reduce the number of
+ attributes on the DeferredImportModule.
+ """
+
+ def __init__(self, name, alt_names, error_message, only_catch_importerror,
+ minimum_version, original_globals, callback, importer):
+ self._names = [name]
+ if alt_names:
+ self._names += list(alt_names)
+ for _n in tuple(self._names):
+ if '.' in _n:
+ self._names.append(_n.split('.')[-1])
+ self._error_message = error_message
+ self._only_catch_importerror = only_catch_importerror
+ self._minimum_version = minimum_version
+ self._original_globals = original_globals
+ self._callback = callback
+ self._importer = importer
+ self._module = None
+ self._available = None
+
+ def resolve(self):
+ if self._module is None:
+ try:
+ # Only attempt the import once
+ self._module, self._available = attempt_import(
+ name=self._names[0],
+ error_message=self._error_message,
+ only_catch_importerror=self._only_catch_importerror,
+ minimum_version=self._minimum_version,
+ callback=self._callback,
+ importer=self._importer,
+ defer_check=False,
+ )
+ except:
+ # make sure that we cache the result
+ self._module = ModuleUnavailable(
+ "Exception raised when importing %s" % (self._names[0],))
+ self._available = False
+ raise
+
+ # Replace myself in the original globals() where I was
+ # declared
+ self.replace_self_in_globals(self._original_globals)
+
+ # Replace myself in the caller globals (to avoid calls to
+ # this method in the future)
+ _globals = inspect.currentframe().f_back.f_back.f_globals
+ self.replace_self_in_globals(_globals)
+
+ def replace_self_in_globals(self, _globals):
+ for name in self._names:
+ if ( name in _globals
+ and isinstance(_globals[name], DeferredImportModule)
+ and _globals[name]._indicator_flag is self ):
+ _globals[name] = self._module
+ for flag_name in (name+'_available', 'has_'+name, 'have_'+name):
+ if flag_name in _globals and _globals[flag_name] is self:
+ _globals[flag_name] = self._available
+
+ def __nonzero__(self):
+ self.resolve()
+ return self._available
+
+
+class _DeferredAnd(_DeferredImportIndicatorBase):
+ def __init__(self, a, b):
+ self._a = a
+ self._b = b
+
+ def __nonzero__(self):
+ return bool(self._a) and bool(self._b)
+
+
+class _DeferredOr(_DeferredImportIndicatorBase):
+ def __init__(self, a, b):
+ self._a = a
+ self._b = b
+
+ def __nonzero__(self):
+ return bool(self._a) or bool(self._b)
+
+
+try:
+ from packaging import version as _version
+ _parser = _version.parse
+except ImportError:
+ # pkg_resources is an order of magnitude slower to import than
+ # packaging. Only use it if the preferred (but optional) packaging
+ # library is not present
+ from pkg_resources import parse_version as _parser
+
+def _check_version(module, min_version):
+ version = getattr(module, '__version__', '0.0.0')
+ return _parser(min_version) <= _parser(version)
+
+
+def attempt_import(name, error_message=None, only_catch_importerror=True,
+ minimum_version=None, alt_names=None, callback=None,
+ importer=None, defer_check=True):
+
+ """Attempt to import the specified module.
+
+ This will attempt to import the specified module, returning a
+ (module, available) tuple. If the import was successful, `module`
+ will be the imported module and `available` will be True. If the
+ import results in an exception, then `module` will be an instance of
+ :py:class:`ModuleUnavailable` and `available` will be False
+
+ The following is equivalent to ``import numpy as np``:
+
+ .. doctest::
+
+ >>> from pyomo.common.dependencies import attempt_import
+ >>> numpy, numpy_available = attempt_import('numpy')
+
+ The import can be "deferred" until the first time the code either
+ attempts to access the module or checks the boolean value of the
+ available flag. This allows optional dependencies to be declared at
+ the module scope but not imported until they are actually used by
+ the module (thereby speeding up the initial package import).
+ Deferred imports are handled by two helper classes
+ (DeferredImportModule and DeferredImportIndicator). Upon actual
+ import, DeferredImportIndicator.resolve() attempts to replace those
+ objects (in both the local and original global namespaces) with the
+ imported module and boolean flag so that subsequent uses of the
+ module do not incur any overhead due to the delayed import.
+
+ Parameters
+ ----------
+ name: str
+ The name of the module to import
+
+ error_message: str, optional
+ The message for the exception raised by ModuleUnavailable
+
+ only_catch_importerror: bool, optional
+ If True (the default), exceptions other than ImportError raised
+ during module import will be reraised. If False, any exception
+ will result in returning a ModuleUnavailable object.
+
+ minimum_version: str, optional
+ The minimum acceptable module version (retrieved from
+ module.__version__)
+
+ alt_names: list, optional
+ A list of common alternate names by which to look for this
+ module in the globals() namespaces. For example, the alt_names
+ for NumPy would be ['np']
+
+ callback: function, optional
+ A function with the signature "`fcn(module, available)`" that
+ will be called after the import is first attempted.
+
+ importer: function, optional
+ A function that will perform the import and return the imported
+ module (or raise an ImportError). This is useful for cases
+ where there are several equivalent modules and you want to
+ import/return the first one that is available.
+
+ defer_check: bool, optional
+ If True (the default), then the attempted import is deferred
+ until the first use of either the module or the availability
+ flag. The method will return instances of DeferredImportModule
+ and DeferredImportIndicator.
+
+ Returns
+ -------
+ : module
+ the imported module, or an instance of
+ :py:class:`ModuleUnavailable`, or an instance of
+ :py:class:`DeferredImportModule`
+ : bool
+ Boolean indicating if the module import succeeded or an instance
+ of "py:class:`DeferredImportIndicator`
+
+ """
+ # If we are going to defer the check until later, return the
+ # deferred import module object
+ if defer_check:
+ indicator = DeferredImportIndicator(
+ name=name,
+ alt_names=alt_names,
+ error_message=error_message,
+ only_catch_importerror=only_catch_importerror,
+ minimum_version=minimum_version,
+ original_globals=inspect.currentframe().f_back.f_globals,
+ callback=callback,
+ importer=importer)
+ return DeferredImportModule(indicator), indicator
+
+ try:
+ if importer is None:
+ module = importlib.import_module(name)
+ else:
+ module = importer()
+ if minimum_version is None or _check_version(module, minimum_version):
+ if callback is not None:
+ callback(module, True)
+ return module, True
+ elif error_message:
+ version = getattr(module, '__version__', 'UNKNOWN')
+ error_message += " (version %s does not satisfy the minimum " \
+ "version %s)" % (version, minimum_version)
+ else:
+ version = getattr(module, '__version__', 'UNKNOWN')
+ error_message = "The %s module version %s does not satisfy " \
+ "the minimum version %s" % (
+ name, version, minimum_version)
+ except ImportError:
+ pass
+ except:
+ if only_catch_importerror:
+ raise
+
+ if not error_message:
+ error_message = "The %s module (an optional Pyomo dependency) " \
+ "failed to import" % (name,)
+
+ module = ModuleUnavailable(error_message)
+ if callback is not None:
+ callback(module, False)
+ return module, False
+
+#
+# Common optional dependencies used throughout Pyomo
+#
+
+yaml_load_args = {}
+def _finalize_yaml(module, available):
+ # Recent versions of PyYAML issue warnings if the Loader argument is
+ # not set
+ if available and hasattr(module, 'SafeLoader'):
+ yaml_load_args['Loader'] = module.SafeLoader
+
+def _finalize_scipy(module, available):
+ if available:
+ # Import key subpackages that we will want to assume are present
+ import scipy.sparse
+ import scipy.spatial
+ import scipy.stats
+
+def _finalize_pympler(module, available):
+ if available:
+ # Import key subpackages that we will want to assume are present
+ import pympler.muppy
+
+yaml, yaml_available = attempt_import('yaml', callback=_finalize_yaml)
+pympler, pympler_available = attempt_import(
+ 'pympler', callback=_finalize_pympler)
+numpy, numpy_available = attempt_import('numpy', alt_names=['np'])
+scipy, scipy_available = attempt_import('scipy', callback=_finalize_scipy)
+networkx, networkx_available = attempt_import('networkx', alt_names=['nx'])
+pandas, pandas_available = attempt_import('pandas')
+dill, dill_available = attempt_import('dill')
diff --git a/pyomo/common/deprecation.py b/pyomo/common/deprecation.py
index c981105813f..4ad44dc4a99 100644
--- a/pyomo/common/deprecation.py
+++ b/pyomo/common/deprecation.py
@@ -41,9 +41,14 @@ def deprecation_warning(msg, logger='pyomo.core', version=None, remove_in=None):
Args:
msg (str): the deprecation message to format
"""
- msg = _default_msg(msg, version, remove_in)
- logging.getLogger(logger).warning(
- textwrap.fill('DEPRECATED: %s' % (msg,), width=70) )
+ msg = textwrap.fill('DEPRECATED: %s' % (_default_msg(msg, version, remove_in),),
+ width=70)
+ try:
+ caller = inspect.getframeinfo(inspect.stack()[2][0])
+ msg += "\n(called from %s:%s)" % (caller.filename.strip(), caller.lineno)
+ except:
+ pass
+ logging.getLogger(logger).warning(msg)
def deprecated(msg=None, logger='pyomo.core', version=None, remove_in=None):
diff --git a/pyomo/common/download.py b/pyomo/common/download.py
index 0707c1c5206..b3cbe519b90 100644
--- a/pyomo/common/download.py
+++ b/pyomo/common/download.py
@@ -14,21 +14,29 @@
import logging
import os
import platform
+import re
import ssl
import sys
import zipfile
-from six.moves.urllib.request import urlopen
+from pyutilib.subprocess import run
from .config import PYOMO_CONFIG_DIR
+from .deprecation import deprecated
from .errors import DeveloperError
import pyomo.common
+from pyomo.common.dependencies import attempt_import
+
+request = attempt_import('six.moves.urllib.request')[0]
+distro, distro_available = attempt_import('distro')
logger = logging.getLogger('pyomo.common.download')
DownloadFactory = pyomo.common.Factory('library downloaders')
class FileDownloader(object):
+ _os_version = None
+
def __init__(self, insecure=False, cacert=None):
self._fname = None
self.target = None
@@ -41,7 +49,8 @@ def __init__(self, insecure=False, cacert=None):
% (self.cacert,))
- def get_sysinfo(self):
+ @classmethod
+ def get_sysinfo(cls):
"""Return a tuple (platform_name, bits) for the current system
Returns
@@ -57,8 +66,145 @@ def get_sysinfo(self):
bits = 64 if sys.maxsize > 2**32 else 32
return system, bits
+ @classmethod
+ def _get_distver_from_os_release(cls):
+ dist = ''
+ ver = ''
+ with open('/etc/os-release', 'rt') as FILE:
+ for line in FILE:
+ line = line.strip()
+ if not line:
+ continue
+ key,val = line.lower().split('=')
+ if key == 'id':
+ dist = val
+ elif key == 'version_id':
+ if val[0] == val[-1] and val[0] in '"\'':
+ ver = val[1:-1]
+ else:
+ ver = val
+ return cls._map_dist(dist), ver
+
+ @classmethod
+ def _get_distver_from_redhat_release(cls):
+ # RHEL6 did not include /etc/os-release
+ with open('/etc/redhat-release', 'rt') as FILE:
+ dist = FILE.readline().lower().strip()
+ ver = ''
+ for word in dist.split():
+ if re.match('^[0-9\.]+', word):
+ ver = word
+ break
+ return cls._map_dist(dist), ver
+
+ @classmethod
+ def _get_distver_from_lsb_release(cls):
+ rc, dist = run(['lsb_release', '-si'])
+ rc, ver = run(['lsb_release', '-sr'])
+ return cls._map_dist(dist.lower().strip()), ver.strip()
+
+ @classmethod
+ def _get_distver_from_distro(cls):
+ return distro.id(), distro.version(best=True)
+
+ @classmethod
+ def _map_dist(cls, dist):
+ dist = dist.lower()
+ _map = {
+ 'centos': 'centos',
+ 'redhat': 'rhel',
+ 'red hat': 'rhel', # RHEL6 reports 'red hat enterprise'
+ 'fedora': 'fedora',
+ 'debian': 'debian',
+ 'ubuntu': 'ubuntu',
+ }
+ for key in _map:
+ if key in dist:
+ return _map[key]
+ return dist
+
+ @classmethod
+ def _get_os_version(cls):
+ _os = cls.get_sysinfo()[0]
+ if _os == 'linux':
+ if distro_available:
+ dist, ver = cls._get_distver_from_distro()
+ elif os.path.exists('/etc/redhat-release'):
+ dist, ver = cls._get_distver_from_redhat_release()
+ elif run(['lsb_release'])[0] == 0:
+ dist, ver = cls._get_distver_from_lsb_release()
+ elif os.path.exists('/etc/os-release'):
+ # Note that (at least on centos), os_release is an
+ # imprecise version string
+ dist, ver = cls._get_distver_from_os_release()
+ else:
+ dist, ver = '',''
+ return dist, ver
+ elif _os == 'darwin':
+ return 'macos', platform.mac_ver()[0]
+ elif _os == 'windows':
+ return 'win', platform.win32_ver()[0]
+ else:
+ return '', ''
+
+ @classmethod
+ def get_os_version(cls, normalize=True):
+ """Return a standardized representation of the OS version
+
+ This method was designed to help identify compatible binaries,
+ and will return strings similar to:
+ - rhel6
+ - fedora24
+ - ubuntu18.04
+ - macos10.13
+ - win10
+
+ Parameters
+ ----------
+ normalize : bool, optional
+ If True (the default) returns a simplified normalized string
+ (e.g., `'rhel7'`) instead of the raw (os, version) tuple
+ (e.g., `('centos', '7.7.1908')`)
+
+ """
+ if FileDownloader._os_version is None:
+ FileDownloader._os_version = cls._get_os_version()
+
+ if not normalize:
+ return FileDownloader._os_version
+ _os, _ver = FileDownloader._os_version
+ _map = {
+ 'centos': 'rhel',
+ }
+ if _os in _map:
+ _os = _map[_os]
+
+ if _os in {'ubuntu','macos','win'}:
+ return _os + ''.join(_ver.split('.')[:2])
+ else:
+ return _os + _ver.split('.')[0]
+
+ @deprecated("get_url() is deprecated. Use get_platform_url()",
+ version='5.6.9')
def get_url(self, urlmap):
+ return self.get_platform_url(urlmap)
+
+
+ def get_platform_url(self, urlmap):
+ """Select the url for this platform
+
+ Given a `urlmap` dict that maps the platform name (from
+ `FileDownloader.get_sysinfo()`) to a platform-specific URL,
+ return the URL that matches the current platform.
+
+ Parameters
+ ----------
+ urlmap: dict
+ Map of platform name (e.g., `linux`, `windows`, `cygwin`,
+ `darwin`) to URL
+
+ """
system, bits = self.get_sysinfo()
url = urlmap.get(system, None)
if url is None:
@@ -135,25 +281,38 @@ def retrieve_url(self, url):
if self.insecure:
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
- fetch = urlopen(url, context=ctx)
+ fetch = request.urlopen(url, context=ctx)
except AttributeError:
# Revert to pre-2.7.9 syntax
- fetch = urlopen(url)
+ fetch = request.urlopen(url)
ans = fetch.read()
logger.info(" ...downloaded %s bytes" % (len(ans),))
return ans
- def get_binary_file(self, url):
+ def get_file(self, url, binary):
if self._fname is None:
raise DeveloperError("target file name has not been initialized "
"with set_destination_filename")
- with open(self._fname, 'wb') as FILE:
+ with open(self._fname, 'wb' if binary else 'wt') as FILE:
raw_file = self.retrieve_url(url)
- FILE.write(raw_file)
+ if binary:
+ FILE.write(raw_file)
+ else:
+ FILE.write(raw_file.decode())
logger.info(" ...wrote %s bytes" % (len(raw_file),))
+ def get_binary_file(self, url):
+ """Retrieve the specified url and write as a binary file"""
+ return self.get_file(url, binary=True)
+
+
+ def get_text_file(self, url):
+ """Retrieve the specified url and write as a text file"""
+ return self.get_file(url, binary=False)
+
+
def get_binary_file_from_zip_archive(self, url, srcname):
if self._fname is None:
raise DeveloperError("target file name has not been initialized "
diff --git a/pyomo/common/fileutils.py b/pyomo/common/fileutils.py
index d47c289fd9e..d0668166882 100644
--- a/pyomo/common/fileutils.py
+++ b/pyomo/common/fileutils.py
@@ -8,6 +8,7 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+import ctypes.util
import glob
import inspect
import logging
@@ -29,11 +30,14 @@ def this_file(stack_offset=1):
# __file__ fails if script is called in different ways on Windows
# __file__ fails if someone does os.chdir() before
# sys.argv[0] also fails because it does not always contains the path
- callerFrame = inspect.stack()[stack_offset]
- frameName = callerFrame[1]
+ callerFrame = inspect.currentframe()
+ while stack_offset:
+ callerFrame = callerFrame.f_back
+ stack_offset -= 1
+ frameName = callerFrame.f_code.co_filename
if frameName and frameName[0] == '<' and frameName[-1] == '>':
return frameName
- return os.path.abspath(inspect.getfile(callerFrame[0]))
+ return os.path.abspath(inspect.getfile(callerFrame))
def this_file_dir():
@@ -267,6 +271,10 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None):
uses :py:func:find_file(), the filename and search paths may contain
wildcards.
+ If the explicit path search fails to locate a library, then this
+ returns the result from passing the basename (with 'lib' and extension
+ removed) to ctypes.util.find_library()
+
Parameters
----------
libname : str
@@ -293,6 +301,7 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None):
``allow_pathlist_deep_references=True``, so libnames containing
relative paths will be matched relative to all paths in
pathlist.
+
"""
if pathlist is None:
# Note: PYOMO_CONFIG_DIR/lib comes before LD_LIBRARY_PATH, and
@@ -308,7 +317,22 @@ def find_library(libname, cwd=True, include_PATH=True, pathlist=None):
if include_PATH:
pathlist.extend(_path())
ext = _libExt.get(_system(), None)
- return find_file(libname, cwd=cwd, ext=ext, pathlist=pathlist)
+ # Search 1: original filename (with extensions) in our paths
+ lib = find_file(libname, cwd=cwd, ext=ext, pathlist=pathlist)
+ if lib is None and not libname.startswith('lib'):
+ # Search 2: prepend 'lib' (with extensions) in our paths
+ lib = find_file('lib'+libname, cwd=cwd, ext=ext, pathlist=pathlist)
+ if lib is not None:
+ return lib
+ # Search 3: use ctypes.util.find_library (which expects 'lib' and
+ # extension to be removed from the name)
+ if libname.startswith('lib') and _system() != 'windows':
+ libname = libname[3:]
+ libname_base, ext = os.path.splitext(os.path.basename(libname))
+ if ext.lower().startswith(('.so','.dll','.dylib')):
+ return ctypes.util.find_library(libname_base)
+ else:
+ return ctypes.util.find_library(libname)
def find_executable(exename, cwd=True, include_PATH=True, pathlist=None):
diff --git a/pyomo/common/getGSL.py b/pyomo/common/getGSL.py
index f86dd188669..002b601322e 100644
--- a/pyomo/common/getGSL.py
+++ b/pyomo/common/getGSL.py
@@ -34,7 +34,7 @@ def find_GSL():
def get_gsl(downloader):
system, bits = downloader.get_sysinfo()
- url = downloader.get_url(urlmap) % (bits,)
+ url = downloader.get_platform_url(urlmap) % (bits,)
downloader.set_destination_filename(os.path.join('lib', 'amplgsl.dll'))
diff --git a/pyomo/contrib/pynumero/linalg/intrinsics.py b/pyomo/common/tests/dep_mod.py
similarity index 60%
rename from pyomo/contrib/pynumero/linalg/intrinsics.py
rename to pyomo/common/tests/dep_mod.py
index 0880390ac1e..93feb2510b7 100644
--- a/pyomo/contrib/pynumero/linalg/intrinsics.py
+++ b/pyomo/common/tests/dep_mod.py
@@ -7,20 +7,14 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from pyomo.contrib.pynumero.sparse import BlockVector
-import numpy as np
-__all__ = ['norm']
+from pyomo.common.dependencies import attempt_import
+__version__ = '1.5'
-def norm(x, ord=None):
-
- f = np.linalg.norm
- if isinstance(x, np.ndarray):
- return f(x, ord=ord)
- elif isinstance(x, BlockVector):
- flat_x = x.flatten()
- return f(flat_x, ord=ord)
- else:
- raise NotImplementedError()
+numpy, numpy_available = attempt_import('numpy', defer_check=True)
+bogus_nonexisting_module, bogus_nonexisting_module_available \
+ = attempt_import('bogus_nonexisting_module',
+ alt_names=['bogus_nem'],
+ defer_check=True)
diff --git a/pyomo/contrib/pynumero/extensions/__init__.py b/pyomo/common/tests/dep_mod_except.py
similarity index 92%
rename from pyomo/contrib/pynumero/extensions/__init__.py
rename to pyomo/common/tests/dep_mod_except.py
index cd6b0b75748..2c991485b4a 100644
--- a/pyomo/contrib/pynumero/extensions/__init__.py
+++ b/pyomo/common/tests/dep_mod_except.py
@@ -7,3 +7,5 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+
+raise ValueError("cannot import module")
diff --git a/pyomo/common/tests/test_config.py b/pyomo/common/tests/test_config.py
index 530a5afbf05..e21b6856d29 100644
--- a/pyomo/common/tests/test_config.py
+++ b/pyomo/common/tests/test_config.py
@@ -15,7 +15,7 @@
ConfigBlock, ConfigList, ConfigValue,
PositiveInt, NegativeInt, NonPositiveInt, NonNegativeInt,
PositiveFloat, NegativeFloat, NonPositiveFloat, NonNegativeFloat,
- In, Path, PathList
+ In, Path, PathList, ConfigEnum
)
class TestConfig(unittest.TestCase):
@@ -338,3 +338,15 @@ def norm(x):
c.a = ()
self.assertEqual(len(c.a), 0)
self.assertIs(type(c.a), list)
+
+ def test_ConfigEnum(self):
+ class TestEnum(ConfigEnum):
+ ITEM_ONE = 1
+ ITEM_TWO = 2
+
+ self.assertEqual(TestEnum.from_enum_or_string(1),
+ TestEnum.ITEM_ONE)
+ self.assertEqual(TestEnum.from_enum_or_string(
+ TestEnum.ITEM_TWO), TestEnum.ITEM_TWO)
+ self.assertEqual(TestEnum.from_enum_or_string('ITEM_ONE'),
+ TestEnum.ITEM_ONE)
diff --git a/pyomo/common/tests/test_dependencies.py b/pyomo/common/tests/test_dependencies.py
new file mode 100644
index 00000000000..55e1d524c08
--- /dev/null
+++ b/pyomo/common/tests/test_dependencies.py
@@ -0,0 +1,232 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import inspect
+from six import StringIO
+
+import pyutilib.th as unittest
+
+from pyomo.common.log import LoggingIntercept
+from pyomo.common.dependencies import (
+ attempt_import, ModuleUnavailable, DeferredImportModule,
+ DeferredImportIndicator, DeferredImportError,
+ _DeferredAnd, _DeferredOr
+)
+
+import pyomo.common.tests.dep_mod as dep_mod
+from pyomo.common.tests.dep_mod import (
+ numpy, numpy_available,
+ bogus_nonexisting_module as bogus_nem,
+ bogus_nonexisting_module_available as has_bogus_nem,
+)
+
+bogus, bogus_available \
+ = attempt_import('nonexisting.module.bogus', defer_check=True)
+
+class TestDependencies(unittest.TestCase):
+ def test_import_error(self):
+ module_obj, module_available = attempt_import(
+ '__there_is_no_module_named_this__',
+ 'Testing import of a non-existant module',
+ defer_check=False)
+ self.assertFalse(module_available)
+ with self.assertRaisesRegex(
+ DeferredImportError, 'Testing import of a non-existant module'):
+ module_obj.try_to_call_a_method()
+
+ def test_import_success(self):
+ module_obj, module_available = attempt_import(
+ 'pyutilib','Testing import of PyUtilib', defer_check=False)
+ self.assertTrue(module_available)
+ import pyutilib
+ self.assertTrue(module_obj is pyutilib)
+
+ def test_local_deferred_import(self):
+ self.assertIs(type(bogus_available), DeferredImportIndicator)
+ self.assertIs(type(bogus), DeferredImportModule)
+ if bogus_available:
+ self.fail("Casting bogus_available to bool returned True")
+ self.assertIs(bogus_available, False)
+ # Note: this also tests the implicit alt_names for dotted imports
+ self.assertIs(type(bogus), ModuleUnavailable)
+ with self.assertRaisesRegexp(
+ DeferredImportError, "The nonexisting.module.bogus module "
+ "\(an optional Pyomo dependency\) failed to import"):
+ bogus.hello
+
+ def test_imported_deferred_import(self):
+ self.assertIs(type(has_bogus_nem), DeferredImportIndicator)
+ self.assertIs(type(bogus_nem), DeferredImportModule)
+ with self.assertRaisesRegexp(
+ DeferredImportError, "The bogus_nonexisting_module module "
+ "\(an optional Pyomo dependency\) failed to import"):
+ bogus_nem.hello
+ self.assertIs(has_bogus_nem, False)
+ self.assertIs(type(bogus_nem), ModuleUnavailable)
+ self.assertIs(dep_mod.bogus_nonexisting_module_available, False)
+ self.assertIs(type(dep_mod.bogus_nonexisting_module), ModuleUnavailable)
+
+ def test_min_version(self):
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod',
+ minimum_version='1.0',
+ defer_check=False)
+ self.assertTrue(avail)
+ self.assertTrue(inspect.ismodule(mod))
+
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod',
+ minimum_version='2.0',
+ defer_check=False)
+ self.assertFalse(avail)
+ self.assertIs(type(mod), ModuleUnavailable)
+ with self.assertRaisesRegex(
+ DeferredImportError, "The pyomo.common.tests.dep_mod module "
+ "version 1.5 does not satisfy the minimum version 2.0"):
+ mod.hello
+
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod',
+ error_message="Failed import",
+ minimum_version='2.0',
+ defer_check=False)
+ self.assertFalse(avail)
+ self.assertIs(type(mod), ModuleUnavailable)
+ with self.assertRaisesRegex(
+ DeferredImportError, "Failed import "
+ "\(version 1.5 does not satisfy the minimum version 2.0\)"):
+ mod.hello
+
+ def test_and_or(self):
+ mod0, avail0 = attempt_import('pyutilib',
+ defer_check=True)
+ mod1, avail1 = attempt_import('pyomo.common.tests.dep_mod',
+ defer_check=True)
+ mod2, avail2 = attempt_import('pyomo.common.tests.dep_mod',
+ minimum_version='2.0',
+ defer_check=True)
+
+ _and = avail0 & avail1
+ self.assertIsInstance(_and, _DeferredAnd)
+
+ _or = avail1 | avail2
+ self.assertIsInstance(_or, _DeferredOr)
+
+ # Nothing has been resolved yet
+ self.assertIsNone(avail0._available)
+ self.assertIsNone(avail1._available)
+ self.assertIsNone(avail2._available)
+
+ # Shortcut boolean evaluation only partially resolves things
+ self.assertTrue(_or)
+ self.assertIsNone(avail0._available)
+ self.assertTrue(avail1._available)
+ self.assertIsNone(avail2._available)
+
+ self.assertTrue(_and)
+ self.assertTrue(avail0._available)
+ self.assertTrue(avail1._available)
+ self.assertIsNone(avail2._available)
+
+ # Testing compound operations
+ _and_and = avail0 & avail1 & avail2
+ self.assertFalse(_and_and)
+
+ _and_or = avail0 & avail1 | avail2
+ self.assertTrue(_and_or)
+
+ # Verify operator prescedence
+ _or_and = avail0 | avail2 & avail2
+ self.assertTrue(_or_and)
+ _or_and = (avail0 | avail2) & avail2
+ self.assertFalse(_or_and)
+
+ _or_or = avail0 | avail1 | avail2
+ self.assertTrue(_or_or)
+
+ # Verify rand / ror
+ _rand = True & avail1
+ self.assertIsInstance(_rand, _DeferredAnd)
+ self.assertTrue(_rand)
+
+ _ror = False | avail1
+ self.assertIsInstance(_ror, _DeferredOr)
+ self.assertTrue(_ror)
+
+
+ def test_callbacks(self):
+ ans = []
+ def _record_avail(module, avail):
+ ans.append(avail)
+
+ mod0, avail0 = attempt_import('pyutilib',
+ defer_check=True,
+ callback=_record_avail)
+ mod1, avail1 = attempt_import('pyomo.common.tests.dep_mod',
+ minimum_version='2.0',
+ defer_check=True,
+ callback=_record_avail)
+
+ self.assertEqual(ans, [])
+ self.assertTrue(avail0)
+ self.assertEqual(ans, [True])
+ self.assertFalse(avail1)
+ self.assertEqual(ans, [True,False])
+
+ def test_import_exceptions(self):
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod_except',
+ defer_check=True)
+ with self.assertRaisesRegex(ValueError, "cannot import module"):
+ bool(avail)
+ # second test will not re-trigger the exception
+ self.assertFalse(avail)
+
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod_except',
+ defer_check=True,
+ only_catch_importerror=False)
+ self.assertFalse(avail)
+ self.assertFalse(avail)
+
+ def test_generate_warning(self):
+ mod, avail = attempt_import('pyomo.common.tests.dep_mod_except',
+ defer_check=True,
+ only_catch_importerror=False)
+
+ # Test generate warning
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo.common'):
+ mod.generate_import_warning()
+ self.assertEqual(
+ log.getvalue(), "The pyomo.common.tests.dep_mod_except module "
+ "(an optional Pyomo dependency) failed to import\n")
+
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo.core.base'):
+ mod.generate_import_warning('pyomo.core.base')
+ self.assertEqual(
+ log.getvalue(), "The pyomo.common.tests.dep_mod_except module "
+ "(an optional Pyomo dependency) failed to import\n")
+
+ def test_importer(self):
+ attempted_import = []
+ def _importer():
+ attempted_import.append(True)
+ return attempt_import('pyomo.common.tests.dep_mod',
+ defer_check=False)[0]
+
+ mod, avail = attempt_import('foo',
+ importer=_importer,
+ defer_check=True)
+
+ self.assertEqual(attempted_import, [])
+ self.assertIsInstance(mod, DeferredImportModule)
+ self.assertTrue(avail)
+ self.assertEqual(attempted_import, [True])
+ self.assertIs(mod._indicator_flag._module, dep_mod)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/common/tests/test_download.py b/pyomo/common/tests/test_download.py
index 29cd2ed8852..abdb275a93e 100644
--- a/pyomo/common/tests/test_download.py
+++ b/pyomo/common/tests/test_download.py
@@ -8,18 +8,22 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+import io
import os
import platform
+import re
+import six
import shutil
import tempfile
import pyutilib.th as unittest
from pyutilib.misc import capture_output
+from pyutilib.subprocess import run
from pyomo.common import DeveloperError
from pyomo.common.config import PYOMO_CONFIG_DIR
from pyomo.common.fileutils import this_file
-from pyomo.common.download import FileDownloader
+from pyomo.common.download import FileDownloader, distro_available
class Test_FileDownloader(unittest.TestCase):
def setUp(self):
@@ -137,15 +141,84 @@ def test_get_sysinfo(self):
self.assertFalse(any(c in ans[0] for c in '.-_'))
self.assertIn(ans[1], (32,64))
- def test_get_url(self):
+ def test_get_os_version(self):
+ f = FileDownloader()
+ _os, _ver = f.get_os_version(normalize=False)
+ _norm = f.get_os_version(normalize=True)
+ #print(_os,_ver,_norm)
+ _sys = f.get_sysinfo()[0]
+ if _sys == 'linux':
+ dist, dist_ver = re.match('^([^0-9]+)(.*)', _norm).groups()
+ self.assertNotIn('.', dist_ver)
+ self.assertGreater(int(dist_ver), 0)
+ if dist == 'ubuntu':
+ self.assertEqual(dist_ver, ''.join(_ver.split('.')[:2]))
+ else:
+ self.assertEqual(dist_ver, _ver.split('.')[0])
+
+ if distro_available:
+ d, v = f._get_distver_from_distro()
+ #print(d,v)
+ self.assertEqual(_os, d)
+ self.assertEqual(_ver, v)
+ self.assertTrue(v.replace('.','').startswith(dist_ver))
+
+ if os.path.exists('/etc/redhat-release'):
+ d, v = f._get_distver_from_redhat_release()
+ #print(d,v)
+ self.assertEqual(_os, d)
+ self.assertEqual(_ver, v)
+ self.assertTrue(v.replace('.','').startswith(dist_ver))
+
+ if run(['lsb_release'])[0] == 0:
+ d, v = f._get_distver_from_lsb_release()
+ #print(d,v)
+ self.assertEqual(_os, d)
+ self.assertEqual(_ver, v)
+ self.assertTrue(v.replace('.','').startswith(dist_ver))
+
+ if os.path.exists('/etc/os-release'):
+ d, v = f._get_distver_from_os_release()
+ #print(d,v)
+ self.assertEqual(_os, d)
+ # Note that (at least on centos), os_release is an
+ # imprecise version string
+ self.assertTrue(_ver.startswith(v))
+ self.assertTrue(v.replace('.','').startswith(dist_ver))
+
+ elif _sys == 'darwin':
+ dist, dist_ver = re.match('^([^0-9]+)(.*)', _norm).groups()
+ self.assertEqual(_os, 'macos')
+ self.assertEqual(dist, 'macos')
+ self.assertNotIn('.', dist_ver)
+ self.assertGreater(int(dist_ver), 0)
+ self.assertEqual(_norm, _os+''.join(_ver.split('.')[:2]))
+ elif _sys == 'windows':
+ self.assertEqual(_os, 'win')
+ self.assertEqual(_norm, _os+''.join(_ver.split('.')[:2]))
+ else:
+ self.assertEqual(ans, '')
+
+ self.assertEqual((_os, _ver), FileDownloader._os_version)
+ # Exercise the fetch from CACHE
+ try:
+ FileDownloader._os_version, tmp \
+ = ("test", '2'), FileDownloader._os_version
+ self.assertEqual(f.get_os_version(False), ("test","2"))
+ self.assertEqual(f.get_os_version(), "test2")
+ finally:
+ FileDownloader._os_version = tmp
+
+
+ def test_get_platform_url(self):
f = FileDownloader()
urlmap = {'bogus_sys': 'bogus'}
with self.assertRaisesRegexp(
RuntimeError, "cannot infer the correct url for platform '.*'"):
- f.get_url(urlmap)
+ f.get_platform_url(urlmap)
urlmap[f.get_sysinfo()[0]] = 'correct'
- self.assertEqual(f.get_url(urlmap), 'correct')
+ self.assertEqual(f.get_platform_url(urlmap), 'correct')
def test_get_files_requires_set_destination(self):
@@ -161,3 +234,28 @@ def test_get_files_requires_set_destination(self):
with self.assertRaisesRegexp(
DeveloperError, 'target file name has not been initialized'):
f.get_gzipped_binary_file('bogus')
+
+ def test_get_test_binary_file(self):
+ tmpdir = tempfile.mkdtemp()
+ try:
+ f = FileDownloader()
+
+ # Mock retrieve_url so network connections are not necessary
+ if six.PY3:
+ f.retrieve_url = lambda url: bytes("\n", encoding='utf-8')
+ else:
+ f.retrieve_url = lambda url: str("\n")
+
+ # Binary files will preserve line endings
+ target = os.path.join(tmpdir, 'bin.txt')
+ f.set_destination_filename(target)
+ f.get_binary_file(None)
+ self.assertEqual(os.path.getsize(target), 1)
+
+ # Text files will convert line endings to the local platform
+ target = os.path.join(tmpdir, 'txt.txt')
+ f.set_destination_filename(target)
+ f.get_text_file(None)
+ self.assertEqual(os.path.getsize(target), len(os.linesep))
+ finally:
+ shutil.rmtree(tmpdir)
diff --git a/pyomo/common/tests/test_fileutils.py b/pyomo/common/tests/test_fileutils.py
index 72b53368f87..42ab3a1e6e9 100755
--- a/pyomo/common/tests/test_fileutils.py
+++ b/pyomo/common/tests/test_fileutils.py
@@ -8,6 +8,7 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+import ctypes
import logging
import os
import platform
@@ -27,6 +28,7 @@
this_file, this_file_dir, find_file, find_library, find_executable,
PathManager, _system, _path, _exeExt, _libExt, _ExecutableData,
)
+from pyomo.common.download import FileDownloader
try:
samefile = os.path.samefile
@@ -192,6 +194,26 @@ def test_find_library(self):
self.tmpdir = os.path.abspath(tempfile.mkdtemp())
os.chdir(self.tmpdir)
+ # Find a system library (before we muck with the PATH)
+ _args = {'cwd':False, 'include_PATH':False, 'pathlist':[]}
+ if FileDownloader.get_sysinfo()[0] == 'windows':
+ a = find_library('ntdll', **_args)
+ b = find_library('ntdll.dll', **_args)
+ c = find_library('foo\\bar\\ntdll.dll', **_args)
+ else:
+ a = find_library('c', **_args)
+ b = find_library('libc.so', **_args)
+ c = find_library('foo/bar/libc.so', **_args)
+ self.assertIsNotNone(a)
+ self.assertIsNotNone(b)
+ self.assertIsNotNone(c)
+ self.assertEqual(a,b)
+ self.assertEqual(a,c)
+ # Verify that the library is loadable (they are all the same
+ # file, so only check one)
+ _lib = ctypes.cdll.LoadLibrary(a)
+ self.assertIsNotNone(_lib)
+
config.PYOMO_CONFIG_DIR = self.tmpdir
config_libdir = os.path.join(self.tmpdir, 'lib')
os.mkdir(config_libdir)
@@ -242,9 +264,17 @@ def test_find_library(self):
os.path.join(pathdir, f_in_path),
find_library(f_in_path)
)
- self.assertIsNone(
- find_library(f_in_path, include_PATH=False)
- )
+ if _system() == 'windows':
+ self._check_file(
+ os.path.join(pathdir, f_in_path),
+ find_library(f_in_path, include_PATH=False)
+ )
+ else:
+ # Note that on Windows, ctypes.util.find_library *always*
+ # searches the PATH
+ self.assertIsNone(
+ find_library(f_in_path, include_PATH=False)
+ )
self._check_file(
os.path.join(pathdir, f_in_path),
find_library(f_in_path, pathlist=os.pathsep+pathdir+os.pathsep)
diff --git a/pyomo/common/tests/test_orderedset.py b/pyomo/common/tests/test_orderedset.py
new file mode 100644
index 00000000000..d43460c6c9c
--- /dev/null
+++ b/pyomo/common/tests/test_orderedset.py
@@ -0,0 +1,70 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import pickle
+import pyutilib.th as unittest
+
+from pyomo.common.collections import OrderedSet
+
+class testOrderedSet(unittest.TestCase):
+ def test_constructor(self):
+ a = OrderedSet()
+ self.assertEqual(len(a), 0)
+ self.assertEqual(list(a), [])
+ self.assertEqual(str(a), 'OrderedSet()')
+
+ ref = [1,9,'a',4,2,None]
+ a = OrderedSet(ref)
+ self.assertEqual(len(a), 6)
+ self.assertEqual(list(a), ref)
+ self.assertEqual(str(a), "OrderedSet(1, 9, 'a', 4, 2, None)")
+
+ def test_in_add(self):
+ a = OrderedSet()
+ self.assertNotIn(1, a)
+ self.assertNotIn(None, a)
+
+ a.add(None)
+ self.assertNotIn(1, a)
+ self.assertIn(None, a)
+
+ a.add(1)
+ self.assertIn(1, a)
+ self.assertIn(None, a)
+
+ a.add(0)
+ self.assertEqual(list(a), [None,1,0])
+
+ # Adding a member alrady in the set does not change the ordering
+ a.add(1)
+ self.assertEqual(list(a), [None,1,0])
+
+ def test_discard_remove_clear(self):
+ a = OrderedSet([1,3,2,4])
+ a.discard(3)
+ self.assertEqual(list(a), [1,2,4])
+ a.discard(3)
+ self.assertEqual(list(a), [1,2,4])
+
+ a.remove(2)
+ self.assertEqual(list(a), [1,4])
+ with self.assertRaisesRegex(KeyError,'2'):
+ a.remove(2)
+
+ a.clear()
+ self.assertEqual(list(a), [])
+
+ def test_pickle(self):
+ ref = [1,9,'a',4,2,None]
+ a = OrderedSet(ref)
+ b = pickle.loads(pickle.dumps(a))
+ self.assertEqual(a, b)
+ self.assertIsNot(a, b)
+ self.assertIsNot(a._dict, b._dict)
diff --git a/pyomo/common/tests/test_task.py b/pyomo/common/tests/test_task.py
index 0d432541b77..3574746da64 100644
--- a/pyomo/common/tests/test_task.py
+++ b/pyomo/common/tests/test_task.py
@@ -16,13 +16,6 @@
from six import StringIO
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
-
class TestData(unittest.TestCase):
def test_print_PyomoAPIData_string(self):
@@ -46,7 +39,6 @@ def test_print_PyomoAPIData_string(self):
y: 2""")
self.assertEqual(len(data._dirty_), 0)
- @unittest.skipIf(not yaml_available, "No YAML interface available")
def test_print_PyomoAPIData_repr(self):
#"""Print PyomoAPIData representation"""
data = PyomoAPIData()
diff --git a/pyomo/common/tests/test_timing.py b/pyomo/common/tests/test_timing.py
new file mode 100644
index 00000000000..0637831ce27
--- /dev/null
+++ b/pyomo/common/tests/test_timing.py
@@ -0,0 +1,56 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import pyutilib.th as unittest
+
+from six import StringIO
+
+from pyomo.common.log import LoggingIntercept
+from pyomo.common.timing import ConstructionTimer, report_timing
+from pyomo.environ import ConcreteModel, RangeSet, Var
+
+class TestTiming(unittest.TestCase):
+ def test_raw_construction_timer(self):
+ a = ConstructionTimer(None)
+ self.assertIn(
+ "ConstructionTimer object for NoneType (unknown); ",
+ str(a))
+
+ def test_report_timing(self):
+ # Create a set to ensure that the global sets have already been
+ # constructed (this is an issue until the new set system is
+ # merged in and the GlobalSet objects are not automatically
+ # created by pyomo.core
+ m = ConcreteModel()
+ m.x = Var([1,2])
+
+ ref = """
+ 0 seconds to construct Block ConcreteModel; 1 index total
+ 0 seconds to construct RangeSet FiniteSimpleRangeSet; 1 index total
+ 0 seconds to construct Var x; 2 indicies total
+""".strip()
+
+ os = StringIO()
+ try:
+ report_timing(os)
+ m = ConcreteModel()
+ m.r = RangeSet(2)
+ m.x = Var(m.r)
+ self.assertEqual(os.getvalue().strip(), ref)
+ finally:
+ report_timing(False)
+ buf = StringIO()
+ with LoggingIntercept(buf, 'pyomo'):
+ m = ConcreteModel()
+ m.r = RangeSet(2)
+ m.x = Var(m.r)
+ self.assertEqual(os.getvalue().strip(), ref)
+ self.assertEqual(buf.getvalue().strip(), "")
+
diff --git a/pyomo/common/timing.py b/pyomo/common/timing.py
index 0ff5f0c2f5e..792a253ca20 100644
--- a/pyomo/common/timing.py
+++ b/pyomo/common/timing.py
@@ -35,7 +35,10 @@ def report(self):
def __str__(self):
total_time = self.timer
- idx = len(self.obj.index_set())
+ try:
+ idx = len(self.obj.index_set())
+ except AttributeError:
+ idx = 1
try:
name = self.obj.name
except RuntimeError:
@@ -43,16 +46,22 @@ def __str__(self):
name = self.obj.local_name
except RuntimeError:
name = '(unknown)'
+ except AttributeError:
+ name = '(unknown)'
+ try:
+ _type = self.obj.ctype.__name__
+ except AttributeError:
+ _type = type(self.obj).__name__
try:
return self.fmt % ( 2 if total_time>=0.005 else 0,
- self.obj.type().__name__,
+ _type,
name,
idx,
'indicies' if idx > 1 else 'index',
) % total_time
except TypeError:
return "ConstructionTimer object for %s %s; %s elapsed seconds" % (
- self.obj.type().__name__,
+ _type,
name,
self.timer.toc("") )
diff --git a/pyomo/contrib/benders/benders_cuts.py b/pyomo/contrib/benders/benders_cuts.py
index 5992691896d..815fa295ea9 100644
--- a/pyomo/contrib/benders/benders_cuts.py
+++ b/pyomo/contrib/benders/benders_cuts.py
@@ -70,6 +70,7 @@
solver_dual_sign_convention['gurobi_persistent'] = -1
solver_dual_sign_convention['cplex'] = -1
solver_dual_sign_convention['cplex_direct'] = -1
+solver_dual_sign_convention['cplexdirect'] = -1
solver_dual_sign_convention['cplex_persistent'] = -1
solver_dual_sign_convention['glpk'] = -1
solver_dual_sign_convention['cbc'] = -1
@@ -150,7 +151,8 @@ def __init__(self, component):
if not numpy_available:
raise ImportError('BendersCutGenerator requires numpy.')
_BlockData.__init__(self, component)
- self.num_subproblems_by_rank = np.zeros(MPI.COMM_WORLD.Get_size())
+
+ self.num_subproblems_by_rank = 0 #np.zeros(self.comm.Get_size())
self.subproblems = list()
self.complicating_vars_maps = list()
self.master_vars = list()
@@ -160,22 +162,31 @@ def __init__(self, component):
self.subproblem_solvers = list()
self.tol = None
self.all_master_etas = list()
+ self._subproblem_ndx_map = dict() # map from ndx in self.subproblems (local) to the global subproblem ndx
- def set_input(self, master_vars, tol=1e-6):
+
+ def global_num_subproblems(self):
+ return int(self.num_subproblems_by_rank.sum())
+
+ def local_num_subproblems(self):
+ return len(self.subproblems)
+
+ def set_input(self, master_vars, tol=1e-6, comm = None):
"""
It is very important for master_vars to be in the same order for every process.
Parameters
----------
master_vars
- master_eta
tol
-
- Returns
- -------
-
"""
- self.num_subproblems_by_rank = np.zeros(MPI.COMM_WORLD.Get_size())
+ self.comm = None
+
+ if comm is not None:
+ self.comm = comm
+ else:
+ self.comm = MPI.COMM_WORLD
+ self.num_subproblems_by_rank = np.zeros(self.comm.Get_size())
del self.cuts
self.cuts = pe.ConstraintList()
self.subproblems = list()
@@ -188,17 +199,19 @@ def set_input(self, master_vars, tol=1e-6):
self.tol = tol
self.subproblem_solvers = list()
self.all_master_etas = list()
+ self._subproblem_ndx_map = dict()
def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, master_eta, subproblem_solver='gurobi_persistent', relax_subproblem_cons=False):
_rank = np.argmin(self.num_subproblems_by_rank)
self.num_subproblems_by_rank[_rank] += 1
self.all_master_etas.append(master_eta)
- if _rank == MPI.COMM_WORLD.Get_rank():
+ if _rank == self.comm.Get_rank():
self.master_etas.append(master_eta)
subproblem, complicating_vars_map = subproblem_fn(**subproblem_fn_kwargs)
self.subproblems.append(subproblem)
self.complicating_vars_maps.append(complicating_vars_map)
_setup_subproblem(subproblem, master_vars=[complicating_vars_map[i] for i in self.master_vars if i in complicating_vars_map], relax_subproblem_cons=relax_subproblem_cons)
+ self._subproblem_ndx_map[len(self.subproblems) - 1] = self.global_num_subproblems() - 1
if isinstance(subproblem_solver, str):
subproblem_solver = pe.SolverFactory(subproblem_solver)
@@ -207,15 +220,16 @@ def add_subproblem(self, subproblem_fn, subproblem_fn_kwargs, master_eta, subpro
subproblem_solver.set_instance(subproblem)
def generate_cut(self):
- coefficients = np.zeros(len(self.subproblems)*len(self.master_vars), dtype='d')
- constants = np.zeros(len(self.subproblems), dtype='d')
- eta_coeffs = np.zeros(len(self.subproblems), dtype='d')
+ coefficients = np.zeros(self.global_num_subproblems() * len(self.master_vars), dtype='d')
+ constants = np.zeros(self.global_num_subproblems(), dtype='d')
+ eta_coeffs = np.zeros(self.global_num_subproblems(), dtype='d')
- coeff_ndx = 0
- for subproblem_ndx in range(len(self.subproblems)):
- subproblem = self.subproblems[subproblem_ndx]
- complicating_vars_map = self.complicating_vars_maps[subproblem_ndx]
- master_eta = self.master_etas[subproblem_ndx]
+ for local_subproblem_ndx in range(len(self.subproblems)):
+ subproblem = self.subproblems[local_subproblem_ndx]
+ global_subproblem_ndx = self._subproblem_ndx_map[local_subproblem_ndx]
+ complicating_vars_map = self.complicating_vars_maps[local_subproblem_ndx]
+ master_eta = self.master_etas[local_subproblem_ndx]
+ coeff_ndx = global_subproblem_ndx * len(self.master_vars)
subproblem.fix_complicating_vars = pe.ConstraintList()
var_to_con_map = pe.ComponentMap()
@@ -228,9 +242,9 @@ def generate_cut(self):
subproblem.fix_eta = pe.Constraint(expr=subproblem._eta - master_eta.value == 0)
subproblem._eta.value = master_eta.value
- subproblem_solver = self.subproblem_solvers[subproblem_ndx]
+ subproblem_solver = self.subproblem_solvers[local_subproblem_ndx]
if subproblem_solver.name not in solver_dual_sign_convention:
- raise NotImplementedError('BendersCutGenerator is unaware of the dual sign convention of subproblem solver ' + self.subproblem_solver.name)
+ raise NotImplementedError('BendersCutGenerator is unaware of the dual sign convention of subproblem solver ' + subproblem_solver.name)
sign_convention = solver_dual_sign_convention[subproblem_solver.name]
if isinstance(subproblem_solver, PersistentSolver):
@@ -248,8 +262,8 @@ def generate_cut(self):
raise RuntimeError('Unable to generate cut because subproblem failed to converge.')
subproblem.solutions.load_from(res)
- constants[subproblem_ndx] = pe.value(subproblem._z)
- eta_coeffs[subproblem_ndx] = sign_convention * pe.value(subproblem.dual[subproblem.obj_con])
+ constants[global_subproblem_ndx] = pe.value(subproblem._z)
+ eta_coeffs[global_subproblem_ndx] = sign_convention * pe.value(subproblem.dual[subproblem.obj_con])
for master_var in self.master_vars:
if master_var in complicating_vars_map:
c = var_to_con_map[master_var]
@@ -264,15 +278,15 @@ def generate_cut(self):
del subproblem.fix_complicating_vars_index
del subproblem.fix_eta
- total_num_subproblems = int(np.sum(self.num_subproblems_by_rank))
+ total_num_subproblems = self.global_num_subproblems()
global_constants = np.zeros(total_num_subproblems, dtype='d')
global_coeffs = np.zeros(total_num_subproblems*len(self.master_vars), dtype='d')
global_eta_coeffs = np.zeros(total_num_subproblems, dtype='d')
- comm = MPI.COMM_WORLD
- comm.Allgatherv([constants, MPI.DOUBLE], [global_constants, MPI.DOUBLE])
- comm.Allgatherv([coefficients, MPI.DOUBLE], [global_coeffs, MPI.DOUBLE])
- comm.Allgatherv([eta_coeffs, MPI.DOUBLE], [global_eta_coeffs, MPI.DOUBLE])
+ comm = self.comm
+ comm.Allreduce([constants, MPI.DOUBLE], [global_constants, MPI.DOUBLE])
+ comm.Allreduce([eta_coeffs, MPI.DOUBLE], [global_eta_coeffs, MPI.DOUBLE])
+ comm.Allreduce([coefficients, MPI.DOUBLE], [global_coeffs, MPI.DOUBLE])
global_constants = [float(i) for i in global_constants]
global_coeffs = [float(i) for i in global_coeffs]
@@ -280,11 +294,11 @@ def generate_cut(self):
coeff_ndx = 0
cuts_added = list()
- for subproblem_ndx in range(total_num_subproblems):
- cut_expr = global_constants[subproblem_ndx]
+ for global_subproblem_ndx in range(total_num_subproblems):
+ cut_expr = global_constants[global_subproblem_ndx]
if cut_expr > self.tol:
- master_eta = self.all_master_etas[subproblem_ndx]
- cut_expr -= global_eta_coeffs[subproblem_ndx] * (master_eta - master_eta.value)
+ master_eta = self.all_master_etas[global_subproblem_ndx]
+ cut_expr -= global_eta_coeffs[global_subproblem_ndx] * (master_eta - master_eta.value)
for master_var in self.master_vars:
coeff = global_coeffs[coeff_ndx]
cut_expr -= coeff * (master_var - master_var.value)
@@ -293,4 +307,5 @@ def generate_cut(self):
cuts_added.append(new_cut)
else:
coeff_ndx += len(self.master_vars)
+
return cuts_added
diff --git a/pyomo/contrib/benders/tests/test_benders.py b/pyomo/contrib/benders/tests/test_benders.py
index 4746ae0f21c..603dea06c9b 100644
--- a/pyomo/contrib/benders/tests/test_benders.py
+++ b/pyomo/contrib/benders/tests/test_benders.py
@@ -13,9 +13,124 @@
numpy_available = False
-class TestBenders(unittest.TestCase):
+ipopt_opt = pe.SolverFactory('ipopt')
+ipopt_available = ipopt_opt.available(exception_flag=False)
+
+cplex_opt = pe.SolverFactory('cplex_direct')
+cplex_available = cplex_opt.available(exception_flag=False)
+
+
+@unittest.category('mpi', 'nightly')
+class MPITestBenders(unittest.TestCase):
+ @unittest.skipIf(not mpi4py_available, 'mpi4py is not available.')
+ @unittest.skipIf(not numpy_available, 'numpy is not available.')
+ @unittest.skipIf(not cplex_available, 'cplex is not available.')
+ def test_farmer(self):
+ class Farmer(object):
+ def __init__(self):
+ self.crops = ['WHEAT', 'CORN', 'SUGAR_BEETS']
+ self.total_acreage = 500
+ self.PriceQuota = {'WHEAT': 100000.0, 'CORN': 100000.0, 'SUGAR_BEETS': 6000.0}
+ self.SubQuotaSellingPrice = {'WHEAT': 170.0, 'CORN': 150.0, 'SUGAR_BEETS': 36.0}
+ self.SuperQuotaSellingPrice = {'WHEAT': 0.0, 'CORN': 0.0, 'SUGAR_BEETS': 10.0}
+ self.CattleFeedRequirement = {'WHEAT': 200.0, 'CORN': 240.0, 'SUGAR_BEETS': 0.0}
+ self.PurchasePrice = {'WHEAT': 238.0, 'CORN': 210.0, 'SUGAR_BEETS': 100000.0}
+ self.PlantingCostPerAcre = {'WHEAT': 150.0, 'CORN': 230.0, 'SUGAR_BEETS': 260.0}
+ self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario']
+ self.crop_yield = dict()
+ self.crop_yield['BelowAverageScenario'] = {'WHEAT': 2.0, 'CORN': 2.4, 'SUGAR_BEETS': 16.0}
+ self.crop_yield['AverageScenario'] = {'WHEAT': 2.5, 'CORN': 3.0, 'SUGAR_BEETS': 20.0}
+ self.crop_yield['AboveAverageScenario'] = {'WHEAT': 3.0, 'CORN': 3.6, 'SUGAR_BEETS': 24.0}
+ self.scenario_probabilities = dict()
+ self.scenario_probabilities['BelowAverageScenario'] = 0.3333
+ self.scenario_probabilities['AverageScenario'] = 0.3334
+ self.scenario_probabilities['AboveAverageScenario'] = 0.3333
+
+ def create_master(farmer):
+ m = pe.ConcreteModel()
+
+ m.crops = pe.Set(initialize=farmer.crops, ordered=True)
+ m.scenarios = pe.Set(initialize=farmer.scenarios, ordered=True)
+
+ m.devoted_acreage = pe.Var(m.crops, bounds=(0, farmer.total_acreage))
+ m.eta = pe.Var(m.scenarios)
+ for s in m.scenarios:
+ m.eta[s].setlb(-432000 * farmer.scenario_probabilities[s])
+
+ m.total_acreage_con = pe.Constraint(expr=sum(m.devoted_acreage.values()) <= farmer.total_acreage)
+
+ m.obj = pe.Objective(
+ expr=sum(farmer.PlantingCostPerAcre[crop] * m.devoted_acreage[crop] for crop in m.crops) + sum(
+ m.eta.values()))
+ return m
+
+ def create_subproblem(master, farmer, scenario):
+ m = pe.ConcreteModel()
+
+ m.crops = pe.Set(initialize=farmer.crops, ordered=True)
+
+ m.devoted_acreage = pe.Var(m.crops)
+ m.QuantitySubQuotaSold = pe.Var(m.crops, bounds=(0.0, None))
+ m.QuantitySuperQuotaSold = pe.Var(m.crops, bounds=(0.0, None))
+ m.QuantityPurchased = pe.Var(m.crops, bounds=(0.0, None))
+
+ def EnforceCattleFeedRequirement_rule(m, i):
+ return (farmer.CattleFeedRequirement[i] <= (farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) +
+ m.QuantityPurchased[i] - m.QuantitySubQuotaSold[i] - m.QuantitySuperQuotaSold[i])
+
+ m.EnforceCattleFeedRequirement = pe.Constraint(m.crops, rule=EnforceCattleFeedRequirement_rule)
+
+ def LimitAmountSold_rule(m, i):
+ return m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - (
+ farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0
+
+ m.LimitAmountSold = pe.Constraint(m.crops, rule=LimitAmountSold_rule)
+
+ def EnforceQuotas_rule(m, i):
+ return (0.0, m.QuantitySubQuotaSold[i], farmer.PriceQuota[i])
+
+ m.EnforceQuotas = pe.Constraint(m.crops, rule=EnforceQuotas_rule)
+
+ obj_expr = sum(farmer.PurchasePrice[crop] * m.QuantityPurchased[crop] for crop in m.crops)
+ obj_expr -= sum(farmer.SubQuotaSellingPrice[crop] * m.QuantitySubQuotaSold[crop] for crop in m.crops)
+ obj_expr -= sum(farmer.SuperQuotaSellingPrice[crop] * m.QuantitySuperQuotaSold[crop] for crop in m.crops)
+ m.obj = pe.Objective(expr=farmer.scenario_probabilities[scenario] * obj_expr)
+
+ complicating_vars_map = pe.ComponentMap()
+ for crop in m.crops:
+ complicating_vars_map[master.devoted_acreage[crop]] = m.devoted_acreage[crop]
+
+ return m, complicating_vars_map
+
+ farmer = Farmer()
+ m = create_master(farmer=farmer)
+ master_vars = list(m.devoted_acreage.values())
+ m.benders = BendersCutGenerator()
+ m.benders.set_input(master_vars=master_vars, tol=1e-8)
+ for s in farmer.scenarios:
+ subproblem_fn_kwargs = dict()
+ subproblem_fn_kwargs['master'] = m
+ subproblem_fn_kwargs['farmer'] = farmer
+ subproblem_fn_kwargs['scenario'] = s
+ m.benders.add_subproblem(subproblem_fn=create_subproblem,
+ subproblem_fn_kwargs=subproblem_fn_kwargs,
+ master_eta=m.eta[s],
+ subproblem_solver='cplex_direct')
+ opt = pe.SolverFactory('cplex_direct')
+
+ for i in range(30):
+ res = opt.solve(m, tee=False)
+ cuts_added = m.benders.generate_cut()
+ if len(cuts_added) == 0:
+ break
+
+ self.assertAlmostEqual(m.devoted_acreage['CORN'].value, 80, 7)
+ self.assertAlmostEqual(m.devoted_acreage['SUGAR_BEETS'].value, 250, 7)
+ self.assertAlmostEqual(m.devoted_acreage['WHEAT'].value, 170, 7)
+
@unittest.skipIf(not mpi4py_available, 'mpi4py is not available.')
@unittest.skipIf(not numpy_available, 'numpy is not available.')
+ @unittest.skipIf(not ipopt_available, 'ipopt is not available.')
def test_grothey(self):
def create_master():
m = pe.ConcreteModel()
@@ -58,8 +173,9 @@ def create_subproblem(master):
@unittest.skipIf(not mpi4py_available, 'mpi4py is not available.')
@unittest.skipIf(not numpy_available, 'numpy is not available.')
- def test_farmer(self):
- class Farmer(object):
+ @unittest.skipIf(not cplex_available, 'cplex is not available.')
+ def test_four_scen_farmer(self):
+ class FourScenFarmer(object):
def __init__(self):
self.crops = ['WHEAT', 'CORN', 'SUGAR_BEETS']
self.total_acreage = 500
@@ -69,15 +185,17 @@ def __init__(self):
self.CattleFeedRequirement = {'WHEAT': 200.0, 'CORN': 240.0, 'SUGAR_BEETS': 0.0}
self.PurchasePrice = {'WHEAT': 238.0, 'CORN': 210.0, 'SUGAR_BEETS': 100000.0}
self.PlantingCostPerAcre = {'WHEAT': 150.0, 'CORN': 230.0, 'SUGAR_BEETS': 260.0}
- self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario']
+ self.scenarios = ['BelowAverageScenario', 'AverageScenario', 'AboveAverageScenario', 'Scenario4']
self.crop_yield = dict()
self.crop_yield['BelowAverageScenario'] = {'WHEAT': 2.0, 'CORN': 2.4, 'SUGAR_BEETS': 16.0}
self.crop_yield['AverageScenario'] = {'WHEAT': 2.5, 'CORN': 3.0, 'SUGAR_BEETS': 20.0}
self.crop_yield['AboveAverageScenario'] = {'WHEAT': 3.0, 'CORN': 3.6, 'SUGAR_BEETS': 24.0}
+ self.crop_yield['Scenario4'] = {'WHEAT':2.0, 'CORN':3.0, 'SUGAR_BEETS':24.0}
self.scenario_probabilities = dict()
- self.scenario_probabilities['BelowAverageScenario'] = 0.3333
- self.scenario_probabilities['AverageScenario'] = 0.3334
- self.scenario_probabilities['AboveAverageScenario'] = 0.3333
+ self.scenario_probabilities['BelowAverageScenario'] = 0.25
+ self.scenario_probabilities['AverageScenario'] = 0.25
+ self.scenario_probabilities['AboveAverageScenario'] = 0.25
+ self.scenario_probabilities['Scenario4'] = 0.25
def create_master(farmer):
m = pe.ConcreteModel()
@@ -115,7 +233,7 @@ def EnforceCattleFeedRequirement_rule(m, i):
def LimitAmountSold_rule(m, i):
return m.QuantitySubQuotaSold[i] + m.QuantitySuperQuotaSold[i] - (
- farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0
+ farmer.crop_yield[scenario][i] * m.devoted_acreage[i]) <= 0.0
m.LimitAmountSold = pe.Constraint(m.crops, rule=LimitAmountSold_rule)
@@ -135,7 +253,7 @@ def EnforceQuotas_rule(m, i):
return m, complicating_vars_map
- farmer = Farmer()
+ farmer = FourScenFarmer()
m = create_master(farmer=farmer)
master_vars = list(m.devoted_acreage.values())
m.benders = BendersCutGenerator()
@@ -148,8 +266,8 @@ def EnforceQuotas_rule(m, i):
m.benders.add_subproblem(subproblem_fn=create_subproblem,
subproblem_fn_kwargs=subproblem_fn_kwargs,
master_eta=m.eta[s],
- subproblem_solver='glpk')
- opt = pe.SolverFactory('glpk')
+ subproblem_solver='cplex_direct')
+ opt = pe.SolverFactory('cplex_direct')
for i in range(30):
res = opt.solve(m, tee=False)
@@ -157,6 +275,8 @@ def EnforceQuotas_rule(m, i):
if len(cuts_added) == 0:
break
- self.assertAlmostEqual(m.devoted_acreage['CORN'].value, 80, 7)
+ self.assertAlmostEqual(m.devoted_acreage['CORN'].value ,100, 7)
self.assertAlmostEqual(m.devoted_acreage['SUGAR_BEETS'].value, 250, 7)
- self.assertAlmostEqual(m.devoted_acreage['WHEAT'].value, 170, 7)
+ self.assertAlmostEqual(m.devoted_acreage['WHEAT'].value, 150, 7)
+
+
diff --git a/pyomo/contrib/fbbt/fbbt.py b/pyomo/contrib/fbbt/fbbt.py
index b0570611df5..52e5b251ae3 100644
--- a/pyomo/contrib/fbbt/fbbt.py
+++ b/pyomo/contrib/fbbt/fbbt.py
@@ -319,7 +319,7 @@ def _prop_bnds_leaf_to_root_asin(node, bnds_dict, feasibility_tol):
assert len(node.args) == 1
arg = node.args[0]
lb1, ub1 = bnds_dict[arg]
- bnds_dict[node] = interval.asin(lb1, ub1, -interval.inf, interval.inf)
+ bnds_dict[node] = interval.asin(lb1, ub1, -interval.inf, interval.inf, feasibility_tol)
def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol):
@@ -339,7 +339,7 @@ def _prop_bnds_leaf_to_root_acos(node, bnds_dict, feasibility_tol):
assert len(node.args) == 1
arg = node.args[0]
lb1, ub1 = bnds_dict[arg]
- bnds_dict[node] = interval.acos(lb1, ub1, -interval.inf, interval.inf)
+ bnds_dict[node] = interval.acos(lb1, ub1, -interval.inf, interval.inf, feasibility_tol)
def _prop_bnds_leaf_to_root_atan(node, bnds_dict, feasibility_tol):
@@ -809,7 +809,7 @@ def _prop_bnds_root_to_leaf_sin(node, bnds_dict, feasibility_tol):
arg = node.args[0]
lb0, ub0 = bnds_dict[node]
lb1, ub1 = bnds_dict[arg]
- _lb1, _ub1 = interval.asin(lb0, ub0, lb1, ub1)
+ _lb1, _ub1 = interval.asin(lb0, ub0, lb1, ub1, feasibility_tol)
if _lb1 > lb1:
lb1 = _lb1
if _ub1 < ub1:
@@ -835,7 +835,7 @@ def _prop_bnds_root_to_leaf_cos(node, bnds_dict, feasibility_tol):
arg = node.args[0]
lb0, ub0 = bnds_dict[node]
lb1, ub1 = bnds_dict[arg]
- _lb1, _ub1 = interval.acos(lb0, ub0, lb1, ub1)
+ _lb1, _ub1 = interval.acos(lb0, ub0, lb1, ub1, feasibility_tol)
if _lb1 > lb1:
lb1 = _lb1
if _ub1 < ub1:
@@ -1330,7 +1330,7 @@ def _fbbt_block(m, config):
else:
var_ubs[v] = value(v.ub)
var_to_con_map[v].append(c)
- n_cons += 1
+ n_cons += 1
for _v in m.component_data_objects(ctype=Var, active=True, descend_into=True, sort=True):
if _v.is_fixed():
@@ -1358,7 +1358,7 @@ def _fbbt_block(m, config):
var_ubs[v] = vub
while len(improved_vars) > 0:
- if n_fbbt > n_cons * config.max_iter:
+ if n_fbbt >= n_cons * config.max_iter:
break
v = improved_vars.pop()
for c in var_to_con_map[v]:
@@ -1404,14 +1404,14 @@ def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibi
region is removed due to floating point arithmetic and to prevent math domain errors (a larger value
is more conservative).
max_iter: int
- Used for Blocks only (i.e., comp.type() == Block). When performing FBBT on a Block, we first perform FBBT on
+ Used for Blocks only (i.e., comp.ctype == Block). When performing FBBT on a Block, we first perform FBBT on
every constraint in the Block. We then attempt to identify which constraints to repeat FBBT on based on the
improvement in variable bounds. If the bounds on a variable improve by more than improvement_tol, then FBBT
is performed on the constraints using that Var. However, this algorithm is not guaranteed to converge, so
max_iter limits the total number of times FBBT is performed to max_iter times the number of constraints
in the Block.
improvement_tol: float
- Used for Blocks only (i.e., comp.type() == Block). When performing FBBT on a Block, we first perform FBBT on
+ Used for Blocks only (i.e., comp.ctype == Block). When performing FBBT on a Block, we first perform FBBT on
every constraint in the Block. We then attempt to identify which constraints to repeat FBBT on based on the
improvement in variable bounds. If the bounds on a variable improve by more than improvement_tol, then FBBT
is performed on the constraints using that Var.
@@ -1435,7 +1435,7 @@ def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibi
config.declare('improvement_tol', improvement_tol_config)
new_var_bounds = ComponentMap()
- if comp.type() == Constraint:
+ if comp.ctype == Constraint:
if comp.is_indexed():
for _c in comp.values():
_new_var_bounds = _fbbt_con(comp, config)
@@ -1443,7 +1443,7 @@ def fbbt(comp, deactivate_satisfied_constraints=False, integer_tol=1e-5, feasibi
else:
_new_var_bounds = _fbbt_con(comp, config)
new_var_bounds.update(_new_var_bounds)
- elif comp.type() in {Block, Disjunct}:
+ elif comp.ctype in {Block, Disjunct}:
_new_var_bounds = _fbbt_block(comp, config)
new_var_bounds.update(_new_var_bounds)
else:
@@ -1468,8 +1468,13 @@ def compute_bounds_on_expr(expr):
bnds_dict = ComponentMap()
visitor = _FBBTVisitorLeafToRoot(bnds_dict)
visitor.dfs_postorder_stack(expr)
+ lb, ub = bnds_dict[expr]
+ if lb == -interval.inf:
+ lb = None
+ if ub == interval.inf:
+ ub = None
- return bnds_dict[expr]
+ return lb, ub
class BoundsManager(object):
@@ -1477,7 +1482,7 @@ def __init__(self, comp):
self._vars = ComponentSet()
self._saved_bounds = list()
- if comp.type() == Constraint:
+ if comp.ctype == Constraint:
if comp.is_indexed():
for c in comp.values():
self._vars.update(identify_variables(c.body))
diff --git a/pyomo/contrib/fbbt/interval.py b/pyomo/contrib/fbbt/interval.py
index ee4b59f29b9..df305cfde93 100644
--- a/pyomo/contrib/fbbt/interval.py
+++ b/pyomo/contrib/fbbt/interval.py
@@ -418,7 +418,7 @@ def tan(xl, xu):
return lb, ub
-def asin(xl, xu, yl, yu):
+def asin(xl, xu, yl, yu, feasibility_tol):
"""
y = asin(x); propagate bounds from x to y
x = sin(y)
@@ -471,7 +471,7 @@ def asin(xl, xu, yl, yu):
# satisfies xl = sin(y)
lb1 = i1 + dist
lb2 = i2 + dist
- if lb1 >= yl:
+ if lb1 >= yl - feasibility_tol:
lb = lb1
else:
lb = lb2
@@ -486,7 +486,7 @@ def asin(xl, xu, yl, yu):
dist = pi / 2 - y_tmp
lb1 = i1 + dist
lb2 = i2 + dist
- if lb1 >= yl:
+ if lb1 >= yl - feasibility_tol:
lb = lb1
else:
lb = lb2
@@ -506,7 +506,7 @@ def asin(xl, xu, yl, yu):
dist = pi / 2 - y_tmp
ub1 = i1 - dist
ub2 = i2 - dist
- if ub1 <= yu:
+ if ub1 <= yu + feasibility_tol:
ub = ub1
else:
ub = ub2
@@ -521,7 +521,7 @@ def asin(xl, xu, yl, yu):
dist = y_tmp - (-pi / 2)
ub1 = i1 - dist
ub2 = i2 - dist
- if ub1 <= yu:
+ if ub1 <= yu + feasibility_tol:
ub = ub1
else:
ub = ub2
@@ -529,7 +529,7 @@ def asin(xl, xu, yl, yu):
return lb, ub
-def acos(xl, xu, yl, yu):
+def acos(xl, xu, yl, yu, feasibility_tol):
"""
y = acos(x); propagate bounds from x to y
x = cos(y)
@@ -582,7 +582,7 @@ def acos(xl, xu, yl, yu):
# satisfies xl = sin(y)
lb1 = i1 + dist
lb2 = i2 + dist
- if lb1 >= yl:
+ if lb1 >= yl - feasibility_tol:
lb = lb1
else:
lb = lb2
@@ -598,7 +598,7 @@ def acos(xl, xu, yl, yu):
dist = y_tmp
lb1 = i1 + dist
lb2 = i2 + dist
- if lb1 >= yl:
+ if lb1 >= yl - feasibility_tol:
lb = lb1
else:
lb = lb2
@@ -618,7 +618,7 @@ def acos(xl, xu, yl, yu):
dist = y_tmp
ub1 = i1 - dist
ub2 = i2 - dist
- if ub1 <= yu:
+ if ub1 <= yu + feasibility_tol:
ub = ub1
else:
ub = ub2
@@ -633,7 +633,7 @@ def acos(xl, xu, yl, yu):
dist = pi - y_tmp
ub1 = i1 - dist
ub2 = i2 - dist
- if ub1 <= yu:
+ if ub1 <= yu + feasibility_tol:
ub = ub1
else:
ub = ub2
diff --git a/pyomo/contrib/fbbt/tests/test_fbbt.py b/pyomo/contrib/fbbt/tests/test_fbbt.py
index 9afaa4b8223..8c96d26f10b 100644
--- a/pyomo/contrib/fbbt/tests/test_fbbt.py
+++ b/pyomo/contrib/fbbt/tests/test_fbbt.py
@@ -2,17 +2,12 @@
import pyomo.environ as pe
from pyomo.contrib.fbbt.fbbt import fbbt, compute_bounds_on_expr
from pyomo.contrib.fbbt import interval
+from pyomo.common.dependencies import numpy as np, numpy_available
from pyomo.common.errors import InfeasibleConstraintException
from pyomo.core.expr.numeric_expr import ProductExpression, UnaryFunctionExpression
import math
import logging
import io
-try:
- import numpy as np
- numpy_available = True
-except ImportError:
- numpy_available = False
-
class DummyExpr(ProductExpression):
pass
@@ -697,6 +692,30 @@ def test_always_feasible(self):
fbbt(m, deactivate_satisfied_constraints=True)
self.assertFalse(m.c.active)
+ def test_iteration_limit(self):
+ m = pe.ConcreteModel()
+ m.x_set = pe.Set(initialize=[0, 1, 2], ordered=True)
+ m.c_set = pe.Set(initialize=[0, 1], ordered=True)
+ m.x = pe.Var(m.x_set)
+ m.c = pe.Constraint(m.c_set)
+ m.c[0] = m.x[0] == m.x[1]
+ m.c[1] = m.x[1] == m.x[2]
+ m.x[2].setlb(-1)
+ m.x[2].setub(1)
+ fbbt(m, max_iter=1)
+ self.assertEqual(m.x[1].lb, -1)
+ self.assertEqual(m.x[1].ub, 1)
+ self.assertEqual(m.x[0].lb, None)
+ self.assertEqual(m.x[0].ub, None)
+
+ def test_inf_bounds_on_expr(self):
+ m = pe.ConcreteModel()
+ m.x = pe.Var(bounds=(-1, 1))
+ m.y = pe.Var()
+ lb, ub = compute_bounds_on_expr(m.x + m.y)
+ self.assertEqual(lb, None)
+ self.assertEqual(ub, None)
+
@unittest.skip('This test passes locally, but not on travis or appveyor. I will add an issue.')
def test_skip_unknown_expression1(self):
@@ -778,3 +797,22 @@ def test_encountered_bugs2(self):
self.assertEqual(m.x.ub, None)
self.assertEqual(m.y.lb, None)
self.assertEqual(m.y.ub, None)
+
+ def test_encountered_bugs3(self):
+ xl = 0.033689710575092756
+ xu = 0.04008169994804723
+ yl = 0.03369608678342047
+ yu = 0.04009243987444148
+
+ m = pe.ConcreteModel()
+ m.x = pe.Var(bounds=(xl, xu))
+ m.y = pe.Var(bounds=(yl, yu))
+
+ m.c = pe.Constraint(expr=m.x == pe.sin(m.y))
+
+ fbbt(m.c)
+
+ self.assertAlmostEqual(m.x.lb, xl)
+ self.assertAlmostEqual(m.x.ub, xu)
+ self.assertAlmostEqual(m.y.lb, yl)
+ self.assertAlmostEqual(m.y.ub, yu)
diff --git a/pyomo/contrib/fbbt/tests/test_interval.py b/pyomo/contrib/fbbt/tests/test_interval.py
index 5a275bdd120..0160c7163e7 100644
--- a/pyomo/contrib/fbbt/tests/test_interval.py
+++ b/pyomo/contrib/fbbt/tests/test_interval.py
@@ -1,13 +1,9 @@
-import pyutilib.th as unittest
import math
-import pyomo.contrib.fbbt.interval as interval
+import pyutilib.th as unittest
+from pyomo.common.dependencies import numpy as np, numpy_available
from pyomo.common.errors import InfeasibleConstraintException
-try:
- import numpy as np
- numpy_available = True
- np.random.seed(0)
-except ImportError:
- numpy_available = False
+import pyomo.contrib.fbbt.interval as interval
+
try:
isfinite = math.isfinite
except AttributeError:
@@ -16,6 +12,10 @@ def isfinite(x):
return not (math.isnan(x) or math.isinf(x))
class TestInterval(unittest.TestCase):
+ def setUp(self):
+ if numpy_available:
+ np.random.seed(0)
+
@unittest.skipIf(not numpy_available, 'Numpy is not available.')
def test_add(self):
xl = -2.5
@@ -252,55 +252,55 @@ def test_tan(self):
@unittest.skipIf(not numpy_available, 'Numpy is not available.')
def test_asin(self):
- yl, yu = interval.asin(-0.5, 0.5, -interval.inf, interval.inf)
+ yl, yu = interval.asin(-0.5, 0.5, -interval.inf, interval.inf, feasibility_tol=1e-8)
self.assertEqual(yl, -interval.inf)
self.assertEqual(yu, interval.inf)
- yl, yu = interval.asin(-0.5, 0.5, -math.pi, math.pi)
+ yl, yu = interval.asin(-0.5, 0.5, -math.pi, math.pi, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -math.pi, 12)
self.assertAlmostEqual(yu, math.pi, 12)
- yl, yu = interval.asin(-0.5, 0.5, -math.pi/2, math.pi/2)
+ yl, yu = interval.asin(-0.5, 0.5, -math.pi/2, math.pi/2, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.asin(-0.5))
self.assertAlmostEqual(yu, math.asin(0.5))
- yl, yu = interval.asin(-0.5, 0.5, -math.pi/2-0.1, math.pi/2+0.1)
+ yl, yu = interval.asin(-0.5, 0.5, -math.pi/2-0.1, math.pi/2+0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.asin(-0.5))
self.assertAlmostEqual(yu, math.asin(0.5))
- yl, yu = interval.asin(-0.5, 0.5, -math.pi/2+0.1, math.pi/2-0.1)
+ yl, yu = interval.asin(-0.5, 0.5, -math.pi/2+0.1, math.pi/2-0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.asin(-0.5))
self.assertAlmostEqual(yu, math.asin(0.5))
- yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi, 1.5*math.pi)
+ yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi, 1.5*math.pi, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -3.6651914291880920, 12)
self.assertAlmostEqual(yu, 3.6651914291880920, 12)
- yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi-0.1, 1.5*math.pi+0.1)
+ yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi-0.1, 1.5*math.pi+0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -3.6651914291880920, 12)
self.assertAlmostEqual(yu, 3.6651914291880920, 12)
- yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1)
+ yl, yu = interval.asin(-0.5, 0.5, -1.5*math.pi+0.1, 1.5*math.pi-0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -3.6651914291880920, 12)
self.assertAlmostEqual(yu, 3.6651914291880920, 12)
@unittest.skipIf(not numpy_available, 'Numpy is not available.')
def test_acos(self):
- yl, yu = interval.acos(-0.5, 0.5, -interval.inf, interval.inf)
+ yl, yu = interval.acos(-0.5, 0.5, -interval.inf, interval.inf, feasibility_tol=1e-8)
self.assertEqual(yl, -interval.inf)
self.assertEqual(yu, interval.inf)
- yl, yu = interval.acos(-0.5, 0.5, -0.5*math.pi, 0.5*math.pi)
+ yl, yu = interval.acos(-0.5, 0.5, -0.5*math.pi, 0.5*math.pi, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -0.5*math.pi, 12)
self.assertAlmostEqual(yu, 0.5*math.pi, 12)
- yl, yu = interval.acos(-0.5, 0.5, 0, math.pi)
+ yl, yu = interval.acos(-0.5, 0.5, 0, math.pi, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.acos(0.5))
self.assertAlmostEqual(yu, math.acos(-0.5))
- yl, yu = interval.acos(-0.5, 0.5, 0-0.1, math.pi+0.1)
+ yl, yu = interval.acos(-0.5, 0.5, 0-0.1, math.pi+0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.acos(0.5))
self.assertAlmostEqual(yu, math.acos(-0.5))
- yl, yu = interval.acos(-0.5, 0.5, 0+0.1, math.pi-0.1)
+ yl, yu = interval.acos(-0.5, 0.5, 0+0.1, math.pi-0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, math.acos(0.5))
self.assertAlmostEqual(yu, math.acos(-0.5))
- yl, yu = interval.acos(-0.5, 0.5, -math.pi, 0)
+ yl, yu = interval.acos(-0.5, 0.5, -math.pi, 0, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -math.acos(-0.5), 12)
self.assertAlmostEqual(yu, -math.acos(0.5), 12)
- yl, yu = interval.acos(-0.5, 0.5, -math.pi-0.1, 0+0.1)
+ yl, yu = interval.acos(-0.5, 0.5, -math.pi-0.1, 0+0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -math.acos(-0.5), 12)
self.assertAlmostEqual(yu, -math.acos(0.5), 12)
- yl, yu = interval.acos(-0.5, 0.5, -math.pi+0.1, 0-0.1)
+ yl, yu = interval.acos(-0.5, 0.5, -math.pi+0.1, 0-0.1, feasibility_tol=1e-8)
self.assertAlmostEqual(yl, -math.acos(-0.5), 12)
self.assertAlmostEqual(yu, -math.acos(0.5), 12)
diff --git a/pyomo/contrib/pynumero/cmake/third_party/HSL/README b/pyomo/contrib/fme/__init__.py
similarity index 100%
rename from pyomo/contrib/pynumero/cmake/third_party/HSL/README
rename to pyomo/contrib/fme/__init__.py
diff --git a/pyomo/contrib/fme/fourier_motzkin_elimination.py b/pyomo/contrib/fme/fourier_motzkin_elimination.py
new file mode 100644
index 00000000000..ac45e34a7d8
--- /dev/null
+++ b/pyomo/contrib/fme/fourier_motzkin_elimination.py
@@ -0,0 +1,469 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+from pyomo.core import (Var, Block, Constraint, Param, Set, Suffix, Expression,
+ Objective, SortComponents, value, ConstraintList)
+from pyomo.core.base import TransformationFactory, _VarData
+from pyomo.core.base.block import _BlockData
+from pyomo.core.base.param import _ParamData
+from pyomo.core.base.constraint import _ConstraintData
+from pyomo.core.plugins.transform.hierarchy import Transformation
+from pyomo.common.config import ConfigBlock, ConfigValue
+from pyomo.common.modeling import unique_component_name
+from pyomo.repn.standard_repn import generate_standard_repn
+from pyomo.core.kernel.component_map import ComponentMap
+from pyomo.core.kernel.component_set import ComponentSet
+from pyomo.opt import TerminationCondition
+
+import logging
+
+from six import iteritems
+import inspect
+
+logger = logging.getLogger('pyomo.contrib.fourier_motzkin_elimination')
+
+def _check_var_bounds_filter(constraint):
+ """Check if the constraint is already implied by the variable bounds"""
+ # this is one of our constraints, so we know that it is >=.
+ min_lhs = 0
+ for v, coef in iteritems(constraint['map']):
+ if coef > 0:
+ if v.lb is None:
+ return True # we don't have var bounds with which to imply the
+ # constraint...
+ min_lhs += coef*v.lb
+ elif coef < 0:
+ if v.ub is None:
+ return True # we don't have var bounds with which to imply the
+ # constraint...
+ min_lhs += coef*v.ub
+ # we do need value here since we didn't control v.lb and v.ub above.
+ if value(min_lhs) >= constraint['lower']:
+ return False # constraint implied by var bounds
+ return True
+
+def vars_to_eliminate_list(x):
+ if isinstance(x, (Var, _VarData)):
+ if not x.is_indexed():
+ return ComponentSet([x])
+ ans = ComponentSet()
+ for j in x.index_set():
+ ans.add(x[j])
+ return ans
+ elif hasattr(x, '__iter__'):
+ ans = ComponentSet()
+ for i in x:
+ ans.update(vars_to_eliminate_list(i))
+ return ans
+ else:
+ raise ValueError(
+ "Expected Var or list of Vars."
+ "\n\tRecieved %s" % type(x))
+
+@TransformationFactory.register('contrib.fourier_motzkin_elimination',
+ doc="Project out specified (continuous) "
+ "variables from a linear model.")
+class Fourier_Motzkin_Elimination_Transformation(Transformation):
+ """Project out specified variables from a linear model.
+
+ This transformation requires the following keyword argument:
+ vars_to_eliminate: A user-specified list of continuous variables to
+ project out of the model
+
+ The transformation will deactivate the original constraints of the model
+ and create a new block named "_pyomo_contrib_fme_transformation" with the
+ projected constraints. Note that this transformation will flatten the
+ structure of the original model since there is no obvious mapping between
+ the original model and the transformed one.
+
+ """
+
+ CONFIG = ConfigBlock("contrib.fourier_motzkin_elimination")
+ CONFIG.declare('vars_to_eliminate', ConfigValue(
+ default=None,
+ domain=vars_to_eliminate_list,
+ description="Continuous variable or list of continuous variables to "
+ "project out of the model",
+ doc="""
+ This specifies the list of variables to project out of the model.
+ Note that these variables must all be continuous and the model must be
+ linear."""
+ ))
+ CONFIG.declare('constraint_filtering_callback', ConfigValue(
+ default=_check_var_bounds_filter,
+ description="A callback that determines whether or not new "
+ "constraints generated by Fourier-Motzkin elimination are added "
+ "to the model",
+ doc="""
+ Specify None in order for no constraint filtering to occur during the
+ transformation.
+
+ Specify a function that accepts a constraint (represented in the >=
+ dictionary form used in this transformation) and returns a Boolean
+ indicating whether or not to add it to the model.
+ """
+ ))
+
+ def __init__(self):
+ """Initialize transformation object"""
+ super(Fourier_Motzkin_Elimination_Transformation, self).__init__()
+
+ def _apply_to(self, instance, **kwds):
+ config = self.CONFIG(kwds.pop('options', {}))
+ config.set_value(kwds)
+ vars_to_eliminate = config.vars_to_eliminate
+ self.constraint_filter = config.constraint_filtering_callback
+ if vars_to_eliminate is None:
+ raise RuntimeError("The Fourier-Motzkin Elimination transformation "
+ "requires the argument vars_to_eliminate, a "
+ "list of Vars to be projected out of the model.")
+
+ # make transformation block
+ transBlockName = unique_component_name(
+ instance,
+ '_pyomo_contrib_fme_transformation')
+ transBlock = Block()
+ instance.add_component(transBlockName, transBlock)
+ projected_constraints = transBlock.projected_constraints = \
+ ConstraintList()
+
+ # collect all of the constraints
+ # NOTE that we are ignoring deactivated constraints
+ constraints = []
+ ctypes_not_to_transform = set((Block, Param, Objective, Set, Expression,
+ Suffix))
+ for obj in instance.component_data_objects(
+ descend_into=Block,
+ sort=SortComponents.deterministic,
+ active=True):
+ if obj.ctype in ctypes_not_to_transform:
+ continue
+ elif obj.ctype is Constraint:
+ cons_list = self._process_constraint(obj)
+ constraints.extend(cons_list)
+ obj.deactivate() # the truth will be on our transformation block
+ elif obj.ctype is Var:
+ # variable bounds are constraints, but we only need them if this
+ # is a variable we are projecting out
+ if obj not in vars_to_eliminate:
+ continue
+ if obj.lb is not None:
+ constraints.append({'body': generate_standard_repn(obj),
+ 'lower': value(obj.lb),
+ 'map': ComponentMap([(obj, 1)])})
+ if obj.ub is not None:
+ constraints.append({'body': generate_standard_repn(-obj),
+ 'lower': -value(obj.ub),
+ 'map': ComponentMap([(obj, -1)])})
+ else:
+ raise RuntimeError(
+ "Found active component %s of type %s. The "
+ "Fourier-Motzkin Elimination transformation can only "
+ "handle purely algebraic models. That is, only "
+ "Sets, Params, Vars, Constraints, Expressions, Blocks, "
+ "and Objectives may be active on the model." % (obj.name,
+ obj.ctype))
+
+ new_constraints = self._fourier_motzkin_elimination(constraints,
+ vars_to_eliminate)
+
+ # put the new constraints on the transformation block
+ for cons in new_constraints:
+ if self.constraint_filter is not None:
+ try:
+ keep = self.constraint_filter(cons)
+ except:
+ logger.error("Problem calling constraint filter callback "
+ "on constraint with right-hand side %s and "
+ "body:\n%s" % (cons['lower'], cons['body']))
+ raise
+ if not keep:
+ continue
+ body = cons['body']
+ lhs = sum(coef*var for (coef, var) in zip(body.linear_coefs,
+ body.linear_vars)) + \
+ sum(coef*v1*v2 for (coef, (v1, v2)) in zip(body.quadratic_coefs,
+ body.quadratic_vars))
+ if body.nonlinear_expr is not None:
+ lhs += body.nonlinear_expr
+ lower = cons['lower']
+ if type(lhs >= lower) is bool:
+ if lhs >= lower:
+ continue
+ else:
+ # This would actually make a lot of sense in this case...
+ #projected_constraints.add(Constraint.Infeasible)
+ raise RuntimeError("Fourier-Motzkin found the model is "
+ "infeasible!")
+ else:
+ projected_constraints.add(lhs >= lower)
+
+ def _process_constraint(self, constraint):
+ """Transforms a pyomo Constraint object into a list of dictionaries
+ representing only >= constraints. That is, if the constraint has both an
+ ub and a lb, it is transformed into two constraints. Otherwise it is
+ flipped if it is <=. Each dictionary contains the keys 'lower',
+ and 'body' where, after the process, 'lower' will be a constant, and
+ 'body' will be the standard repn of the body. (The constant will be
+ moved to the RHS and we know that the upper bound is None after this).
+ """
+ body = constraint.body
+ std_repn = generate_standard_repn(body)
+ # make sure that we store the lower bound's value so that we need not
+ # worry again during the transformation
+ cons_dict = {'lower': value(constraint.lower),
+ 'body': std_repn
+ }
+ upper = value(constraint.upper)
+ constraints_to_add = [cons_dict]
+ if upper is not None:
+ # if it has both bounds
+ if cons_dict['lower'] is not None:
+ # copy the constraint and flip
+ leq_side = {'lower': -upper,
+ 'body': generate_standard_repn(-1.0*body)}
+ self._move_constant_and_add_map(leq_side)
+ constraints_to_add.append(leq_side)
+
+ # If it has only an upper bound, we just need to flip it
+ else:
+ # just flip the constraint
+ cons_dict['lower'] = -upper
+ cons_dict['body'] = generate_standard_repn(-1.0*body)
+ self._move_constant_and_add_map(cons_dict)
+
+ return constraints_to_add
+
+ def _move_constant_and_add_map(self, cons_dict):
+ """Takes constraint in dicionary form already in >= form,
+ and moves the constant to the RHS
+ """
+ body = cons_dict['body']
+ constant = value(body.constant)
+ cons_dict['lower'] -= constant
+ body.constant = 0
+
+ # store a map of vars to coefficients. We can't use this in place of
+ # standard repn because determinism, but this will save a lot of linear
+ # time searches later. Note also that we will take the value of the
+ # coeficient here so that we never have to worry about it again during
+ # the transformation.
+ cons_dict['map'] = ComponentMap(zip(body.linear_vars,
+ [value(coef) for coef in
+ body.linear_coefs]))
+
+ def _fourier_motzkin_elimination(self, constraints, vars_to_eliminate):
+ """Performs FME on the constraint list in the argument
+ (which is assumed to be all >= constraints and stored in the
+ dictionary representation), projecting out each of the variables in
+ vars_to_eliminate"""
+
+ # We only need to eliminate variables that actually appear in
+ # this set of constraints... Revise our list.
+ vars_that_appear = []
+ for cons in constraints:
+ std_repn = cons['body']
+ if not std_repn.is_linear():
+ # as long as none of vars_that_appear are in the nonlinear part,
+ # we are actually okay.
+ nonlinear_vars = ComponentSet(v for two_tuple in
+ std_repn.quadratic_vars for
+ v in two_tuple)
+ nonlinear_vars.update(v for v in std_repn.nonlinear_vars)
+ for var in nonlinear_vars:
+ if var in vars_to_eliminate:
+ raise RuntimeError("Variable %s appears in a nonlinear "
+ "constraint. The Fourier-Motzkin "
+ "Elimination transformation can only "
+ "be used to eliminate variables "
+ "which only appear linearly." %
+ var.name)
+ for var in std_repn.linear_vars:
+ if var in vars_to_eliminate:
+ vars_that_appear.append(var)
+
+ # we actually begin the recursion here
+ while vars_that_appear:
+ # first var we will project out
+ the_var = vars_that_appear.pop()
+
+ # we are 'reorganizing' the constraints, we sort based on the sign
+ # of the coefficient of the_var: This tells us whether we have
+ # the_var <= other stuff or vice versa.
+ leq_list = []
+ geq_list = []
+ waiting_list = []
+
+ for cons in constraints:
+ leaving_var_coef = cons['map'].get(the_var)
+ if leaving_var_coef is None or leaving_var_coef == 0:
+ waiting_list.append(cons)
+ continue
+
+ # we know the constraint is a >= constraint, using that
+ # assumption below.
+ # NOTE: neither of the scalar multiplications below flip the
+ # constraint. So we are sure to have only geq constraints
+ # forever, which is exactly what we want.
+ if leaving_var_coef < 0:
+ leq_list.append(
+ self._nonneg_scalar_multiply_linear_constraint(
+ cons, -1.0/leaving_var_coef))
+ else:
+ geq_list.append(
+ self._nonneg_scalar_multiply_linear_constraint(
+ cons, 1.0/leaving_var_coef))
+
+ constraints = waiting_list
+ for leq in leq_list:
+ for geq in geq_list:
+ constraints.append(self._add_linear_constraints(leq, geq))
+
+ return constraints
+
+ def _nonneg_scalar_multiply_linear_constraint(self, cons, scalar):
+ """Multiplies all coefficients and the RHS of a >= constraint by scalar.
+ There is no logic for flipping the equality, so this is just the
+ special case with a nonnegative scalar, which is all we need.
+ """
+ body = cons['body']
+ body.linear_coefs = [scalar*coef for coef in body.linear_coefs]
+ body.quadratic_coefs = [scalar*coef for coef in body.quadratic_coefs]
+ body.nonlinear_expr = scalar*body.nonlinear_expr if \
+ body.nonlinear_expr is not None else None
+ # and update the map... (It isn't lovely that I am storing this in two
+ # places...)
+ for var, coef in cons['map'].items():
+ cons['map'][var] = coef*scalar
+
+ # assume scalar >= 0 and constraint only has lower bound
+ if cons['lower'] is not None:
+ cons['lower'] *= scalar
+
+ return cons
+
+ def _add_linear_constraints(self, cons1, cons2):
+ """Adds two >= constraints"""
+ ans = {'lower': None, 'body': None, 'map': ComponentMap()}
+ cons1_body = cons1['body']
+ cons2_body = cons2['body']
+
+ # Need this to be both deterministic and to account for the fact that
+ # Vars aren't hashable.
+ all_vars = list(cons1_body.linear_vars)
+ seen = ComponentSet(all_vars)
+ for v in cons2_body.linear_vars:
+ if v not in seen:
+ all_vars.append(v)
+
+ expr = 0
+ for var in all_vars:
+ coef = cons1['map'].get(var, 0) + cons2['map'].get(var, 0)
+ ans['map'][var] = coef
+ expr += coef*var
+ # deal with nonlinear stuff if there is any
+ for cons in [cons1_body, cons2_body]:
+ if cons.nonlinear_expr is not None:
+ expr += cons.nonlinear_expr
+ expr += sum(coef*v1*v2 for (coef, (v1, v2)) in
+ zip(cons.quadratic_coefs, cons.quadratic_vars))
+
+ ans['body'] = generate_standard_repn(expr)
+
+ # upper is None and lower exists, so this gets the constant
+ ans['lower'] = cons1['lower'] + cons2['lower']
+
+ return ans
+
+ def post_process_fme_constraints(self, m, solver_factory, tolerance=0):
+ """Function that solves a sequence of LPs problems to check if
+ constraints are implied by each other. Deletes any that are.
+
+ Parameters
+ ----------------
+ m: A model, already transformed with FME. Note that if constraints
+ have been added, activated, or deactivated, we will check for
+ redundancy against the whole active part of the model. If you call
+ this straight after FME, you are only checking within the projected
+ constraints, but otherwise it is up to the user.
+ solver_factory: A SolverFactory object (constructed with a solver
+ which can solve the continuous relaxation of the
+ active constraints on the model. That is, if you
+ had nonlinear constraints unrelated to the variables
+ being projected, you need to either deactivate them or
+ provide a solver which will do the right thing.)
+ tolerance: Tolerance at which we decide a constraint is implied by the
+ others. Default is 0, meaning we remove the constraint if
+ the LP solve finds the constraint can be tight but not
+ violated. Setting this to a small positive value would
+ remove constraints more conservatively. Setting it to a
+ negative value would result in a relaxed problem.
+ """
+ # make sure m looks like what we expect
+ if not hasattr(m, "_pyomo_contrib_fme_transformation"):
+ raise RuntimeError("It looks like model %s has not been "
+ "transformed with the "
+ "fourier_motzkin_elimination transformation!"
+ % m.name)
+ transBlock = m._pyomo_contrib_fme_transformation
+ constraints = transBlock.projected_constraints
+
+ # relax integrality so that we can do this with LP solves.
+ TransformationFactory('core.relax_integer_vars').apply_to(
+ m, transform_deactivated_blocks=True)
+ # deactivate any active objectives on the model, and save what we did so
+ # we can undo it after.
+ active_objs = []
+ for obj in m.component_data_objects(Objective, descend_into=True):
+ if obj.active:
+ active_objs.append(obj)
+ obj.deactivate()
+ # add placeholder for our own objective
+ obj_name = unique_component_name(m, '_fme_post_process_obj')
+ obj = Objective(expr=0)
+ m.add_component(obj_name, obj)
+ for i in constraints:
+ # If someone wants us to ignore it and leave it in the model, we
+ # can.
+ if not constraints[i].active:
+ continue
+ # deactivate the constraint
+ constraints[i].deactivate()
+ m.del_component(obj)
+ # make objective to maximize its infeasibility
+ obj = Objective(expr=constraints[i].body - constraints[i].lower)
+ m.add_component(obj_name, obj)
+ results = solver_factory.solve(m)
+ print(results.solver.termination_condition)
+ if results.solver.termination_condition == \
+ TerminationCondition.unbounded:
+ obj_val = -float('inf')
+ elif results.solver.termination_condition != \
+ TerminationCondition.optimal:
+ raise RuntimeError("Unsuccessful subproblem solve when checking"
+ "constraint %s.\n\t"
+ "Termination Condition: %s" %
+ (constraints[i].name,
+ results.solver.termination_condition))
+ else:
+ obj_val = value(obj)
+ # if we couldn't make it infeasible, it's useless
+ if obj_val >= tolerance:
+ m.del_component(constraints[i])
+ del constraints[i]
+ else:
+ constraints[i].activate()
+
+ # clean up
+ m.del_component(obj)
+ for obj in active_objs:
+ obj.activate()
+ # undo relax integrality
+ TransformationFactory('core.relax_integer_vars').apply_to(m, undo=True)
diff --git a/pyomo/contrib/fme/plugins.py b/pyomo/contrib/fme/plugins.py
new file mode 100644
index 00000000000..73e6acc24ce
--- /dev/null
+++ b/pyomo/contrib/fme/plugins.py
@@ -0,0 +1,2 @@
+def load():
+ import pyomo.contrib.fme.fourier_motzkin_elimination
diff --git a/pyomo/contrib/fme/tests/__init__.py b/pyomo/contrib/fme/tests/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py
new file mode 100644
index 00000000000..731ff374cf9
--- /dev/null
+++ b/pyomo/contrib/fme/tests/test_fourier_motzkin_elimination.py
@@ -0,0 +1,683 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import os
+from os.path import abspath, dirname
+currdir = dirname(abspath(__file__))+os.sep
+
+import pyutilib.th as unittest
+from pyomo.common.log import LoggingIntercept
+from pyomo.core import (Var, Constraint, Param, ConcreteModel, NonNegativeReals,
+ Binary, value, Block, Objective)
+from pyomo.core.base import TransformationFactory
+from pyomo.core.expr.current import log
+from pyomo.gdp import Disjunction, Disjunct
+from pyomo.repn.standard_repn import generate_standard_repn
+from pyomo.core.kernel.component_set import ComponentSet
+from pyomo.opt import SolverFactory, check_available_solvers
+import pyomo.contrib.fme.fourier_motzkin_elimination
+
+from six import StringIO
+import logging
+import random
+
+solvers = check_available_solvers('glpk')
+
+class TestFourierMotzkinElimination(unittest.TestCase):
+ def setUp(self):
+ # will need this so we know transformation block names in the test that
+ # includes hull transformation
+ random.seed(666)
+
+ @staticmethod
+ def makeModel():
+ """
+ This is a single-level reformulation of a bilevel model.
+ We project out the dual variables to recover the reformulation in
+ the original space.
+ """
+ m = ConcreteModel()
+ m.x = Var(bounds=(0,2))
+ m.y = Var(domain=NonNegativeReals)
+ m.lamb = Var([1, 2], domain=NonNegativeReals)
+ m.M = Param([1, 2], mutable=True, default=100)
+ m.u = Var([1, 2], domain=Binary)
+
+ m.primal1 = Constraint(expr=m.x - 0.01*m.y <= 1)
+ m.dual1 = Constraint(expr=1 - m.lamb[1] - 0.01*m.lamb[2] == 0)
+
+ @m.Constraint([1, 2])
+ def bound_lambdas(m, i):
+ return m.lamb[i] <= m.u[i]*m.M[i]
+
+ m.bound_y = Constraint(expr=m.y <= 1000*(1 - m.u[1]))
+ m.dual2 = Constraint(expr=-m.x + 0.01*m.y + 1 <= (1 - m.u[2])*1000)
+
+ return m
+
+ def test_no_vars_specified(self):
+ m = self.makeModel()
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "The Fourier-Motzkin Elimination transformation "
+ "requires the argument vars_to_eliminate, a "
+ "list of Vars to be projected out of the model.",
+ TransformationFactory('contrib.fourier_motzkin_elimination').\
+ apply_to,
+ m)
+
+ unfiltered_indices = [1, 2, 3, 6]
+ filtered_indices = [1, 2, 3, 4]
+
+ def check_projected_constraints(self, m, indices):
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+ # x - 0.01y <= 1
+ cons = constraints[indices[0]]
+ self.assertEqual(value(cons.lower), -1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_linear())
+ linear_vars = body.linear_vars
+ coefs = body.linear_coefs
+ self.assertEqual(len(linear_vars), 2)
+ self.assertIs(linear_vars[0], m.x)
+ self.assertEqual(coefs[0], -1)
+ self.assertIs(linear_vars[1], m.y)
+ self.assertEqual(coefs[1], 0.01)
+
+ # y <= 1000*(1 - u_1)
+ cons = constraints[indices[1]]
+ self.assertEqual(value(cons.lower), -1000)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ linear_vars = body.linear_vars
+ coefs = body.linear_coefs
+ self.assertEqual(len(linear_vars), 2)
+ self.assertIs(linear_vars[0], m.y)
+ self.assertEqual(coefs[0], -1)
+ self.assertIs(linear_vars[1], m.u[1])
+ self.assertEqual(coefs[1], -1000)
+
+ # -x + 0.01y + 1 <= 1000*(1 - u_2)
+ cons = constraints[indices[2]]
+ self.assertEqual(value(cons.lower), -999)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ linear_vars = body.linear_vars
+ coefs = body.linear_coefs
+ self.assertEqual(len(linear_vars), 3)
+ self.assertIs(linear_vars[0], m.x)
+ self.assertEqual(coefs[0], 1)
+ self.assertIs(linear_vars[1], m.y)
+ self.assertEqual(coefs[1], -0.01)
+ self.assertIs(linear_vars[2], m.u[2])
+ self.assertEqual(coefs[2], -1000)
+
+ # u_2 + 100u_1 >= 1
+ cons = constraints[indices[3]]
+ self.assertEqual(value(cons.lower), 1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ linear_vars = body.linear_vars
+ coefs = body.linear_coefs
+ self.assertEqual(len(linear_vars), 2)
+ self.assertIs(linear_vars[1], m.u[2])
+ self.assertEqual(coefs[1], 1)
+ self.assertIs(linear_vars[0], m.u[1])
+ self.assertEqual(coefs[0], 100)
+
+ def test_transformed_constraints_indexed_var_arg(self):
+ m = self.makeModel()
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m,
+ vars_to_eliminate = m.lamb,
+ constraint_filtering_callback=None)
+ # we get some trivial constraints too, but let's check that the ones
+ # that should be there really are
+ self.check_projected_constraints(m, self.unfiltered_indices)
+
+ def test_transformed_constraints_varData_list_arg(self):
+ m = self.makeModel()
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m,
+ vars_to_eliminate = [m.lamb[1], m.lamb[2]],
+ constraint_filtering_callback=None)
+
+ self.check_projected_constraints(m, self.unfiltered_indices)
+
+ def test_transformed_constraints_indexedVar_list(self):
+ m = self.makeModel()
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m,
+ vars_to_eliminate = [m.lamb],
+ constraint_filtering_callback=None)
+
+ self.check_projected_constraints(m, self.unfiltered_indices)
+
+ def test_default_constraint_filtering(self):
+ # We will filter constraints which are trivial based on variable bounds
+ # during the transformation. This checks that we removed the constraints
+ # we expect.
+ m = self.makeModel()
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m,
+ vars_to_eliminate = m.lamb)
+
+ # we still have all the right constraints
+ self.check_projected_constraints(m, self.filtered_indices)
+ # but now we *only* have the right constraints
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+ self.assertEqual(len(constraints), 4)
+
+ def test_original_constraints_deactivated(self):
+ m = self.makeModel()
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m,
+ vars_to_eliminate = m.lamb)
+
+ self.assertFalse(m.primal1.active)
+ self.assertFalse(m.dual1.active)
+ self.assertFalse(m.dual2.active)
+ self.assertFalse(m.bound_lambdas[1].active)
+ self.assertFalse(m.bound_lambdas[2].active)
+ self.assertFalse(m.bound_y.active)
+
+ def test_infeasible_model(self):
+ m = ConcreteModel()
+ m.x = Var(bounds=(0, 10))
+ m.cons1 = Constraint(expr=m.x >= 6)
+ m.cons2 = Constraint(expr=m.x <= 2)
+
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "Fourier-Motzkin found the model is infeasible!",
+ TransformationFactory('contrib.fourier_motzkin_elimination').\
+ apply_to,
+ m,
+ vars_to_eliminate=m.x)
+
+ def test_infeasible_model_no_var_bounds(self):
+ m = ConcreteModel()
+ m.x = Var()
+ m.cons1 = Constraint(expr=m.x >= 6)
+ m.cons2 = Constraint(expr=m.x <= 2)
+
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "Fourier-Motzkin found the model is infeasible!",
+ TransformationFactory('contrib.fourier_motzkin_elimination').\
+ apply_to,
+ m,
+ vars_to_eliminate=m.x)
+
+ def test_nonlinear_error(self):
+ m = ConcreteModel()
+ m.x = Var()
+ m.cons = Constraint(expr=m.x**2 >= 2)
+ m.cons2 = Constraint(expr=m.x<= 10)
+
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "Variable x appears in a nonlinear "
+ "constraint. The Fourier-Motzkin "
+ "Elimination transformation can only "
+ "be used to eliminate variables "
+ "which only appear linearly.",
+ TransformationFactory('contrib.fourier_motzkin_elimination').\
+ apply_to,
+ m,
+ vars_to_eliminate=m.x)
+
+ def test_components_we_do_not_understand_error(self):
+ m = self.makeModel()
+ m.disj = Disjunction(expr=[m.x == 0, m.y >= 2])
+
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "Found active component %s of type %s. The "
+ "Fourier-Motzkin Elimination transformation can only "
+ "handle purely algebraic models. That is, only "
+ "Sets, Params, Vars, Constraints, Expressions, Blocks, "
+ "and Objectives may be active on the model." % (m.disj.name,
+ m.disj.type()),
+ TransformationFactory('contrib.fourier_motzkin_elimination').\
+ apply_to,
+ m,
+ vars_to_eliminate=m.x)
+
+ def test_bad_constraint_filtering_callback_error(self):
+ m = self.makeModel()
+ def not_a_callback(cons):
+ raise RuntimeError("I don't know how to do my job.")
+ fme = TransformationFactory('contrib.fourier_motzkin_elimination')
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo.contrib.fourier_motzkin_elimination',
+ logging.ERROR):
+ self.assertRaisesRegexp(
+ RuntimeError,
+ "I don't know how to do my job.",
+ fme.apply_to,
+ m,
+ vars_to_eliminate=m.x,
+ constraint_filtering_callback=not_a_callback)
+ self.assertRegexpMatches(
+ log.getvalue(),
+ "Problem calling constraint filter callback "
+ "on constraint with right-hand side -1.0 and body:*")
+
+ def test_constraint_filtering_callback_not_callable_error(self):
+ m = self.makeModel()
+ fme = TransformationFactory('contrib.fourier_motzkin_elimination')
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo.contrib.fourier_motzkin_elimination',
+ logging.ERROR):
+ self.assertRaisesRegexp(
+ TypeError,
+ "'int' object is not callable",
+ fme.apply_to,
+ m,
+ vars_to_eliminate=m.x,
+ constraint_filtering_callback=5)
+ self.assertRegexpMatches(
+ log.getvalue(),
+ "Problem calling constraint filter callback "
+ "on constraint with right-hand side -1.0 and body:*")
+
+ def test_combine_three_inequalities_and_flatten_blocks(self):
+ m = ConcreteModel()
+ m.x = Var()
+ m.y = Var()
+ m.b = Block()
+ m.b.c = Constraint(expr=m.x >= 2)
+ m.c = Constraint(expr=m.y <= m.x)
+ m.b.b2 = Block()
+ m.b.b2.c = Constraint(expr=m.y >= 4)
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m, vars_to_eliminate=m.y)
+
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+ self.assertEqual(len(constraints), 2)
+ cons = constraints[1]
+ self.assertEqual(value(cons.lower), 2)
+ self.assertIsNone(cons.upper)
+ self.assertIs(cons.body, m.x)
+
+ cons = constraints[2]
+ self.assertEqual(value(cons.lower), 4)
+ self.assertIsNone(cons.upper)
+ self.assertIs(cons.body, m.x)
+
+ def check_hull_projected_constraints(self, m, constraints, indices):
+ # p[1] >= on.ind_var
+ cons = constraints[indices[0]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 2)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.p[1])
+ self.assertEqual(body.linear_coefs[0], 1)
+ self.assertIs(body.linear_vars[1], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[1], -1)
+
+ # p[1] <= 10*on.ind_var + 10*off.ind_var
+ cons = constraints[indices[1]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 3)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[0], 10)
+ self.assertIs(body.linear_vars[1], m.off.indicator_var)
+ self.assertEqual(body.linear_coefs[1], 10)
+ self.assertIs(body.linear_vars[2], m.p[1])
+ self.assertEqual(body.linear_coefs[2], -1)
+
+ # p[1] >= time1_disjuncts[0].ind_var
+ cons = constraints[indices[2]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 2)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[1], m.time1_disjuncts[0].indicator_var)
+ self.assertEqual(body.linear_coefs[1], -1)
+ self.assertIs(body.linear_vars[0], m.p[1])
+ self.assertEqual(body.linear_coefs[0], 1)
+
+ # p[1] <= 10*time1_disjuncts[0].ind_var
+ cons = constraints[indices[3]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 2)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.time1_disjuncts[0].indicator_var)
+ self.assertEqual(body.linear_coefs[0], 10)
+ self.assertIs(body.linear_vars[1], m.p[1])
+ self.assertEqual(body.linear_coefs[1], -1)
+
+ # p[2] - p[1] <= 3*on.ind_var + 2*startup.ind_var
+ cons = constraints[indices[4]]
+ self.assertEqual(value(cons.lower), 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 4)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[3], m.p[2])
+ self.assertEqual(body.linear_coefs[3], -1)
+ self.assertIs(body.linear_vars[0], m.p[1])
+ self.assertEqual(body.linear_coefs[0], 1)
+ self.assertIs(body.linear_vars[1], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[1], 3)
+ self.assertIs(body.linear_vars[2], m.startup.indicator_var)
+ self.assertEqual(body.linear_coefs[2], 2)
+
+ # p[2] >= on.ind_var + startup.ind_var
+ cons = constraints[indices[5]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 3)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.p[2])
+ self.assertEqual(body.linear_coefs[0], 1)
+ self.assertIs(body.linear_vars[1], m.startup.indicator_var)
+ self.assertEqual(body.linear_coefs[1], -1)
+ self.assertIs(body.linear_vars[2], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[2], -1)
+
+ # p[2] <= 10*on.ind_var + 2*startup.ind_var
+ cons = constraints[indices[6]]
+ self.assertEqual(cons.lower, 0)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 3)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[0], 10)
+ self.assertIs(body.linear_vars[1], m.startup.indicator_var)
+ self.assertEqual(body.linear_coefs[1], 2)
+ self.assertIs(body.linear_vars[2], m.p[2])
+ self.assertEqual(body.linear_coefs[2], -1)
+
+ # 1 <= time1_disjuncts[0].ind_var + time_1.disjuncts[1].ind_var
+ cons = constraints[indices[7]]
+ self.assertEqual(cons.lower, 1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 2)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.time1_disjuncts[0].indicator_var)
+ self.assertEqual(body.linear_coefs[0], 1)
+ self.assertIs(body.linear_vars[1], m.time1_disjuncts[1].indicator_var)
+ self.assertEqual(body.linear_coefs[1], 1)
+
+ # 1 >= time1_disjuncts[0].ind_var + time_1.disjuncts[1].ind_var
+ cons = constraints[indices[8]]
+ self.assertEqual(cons.lower, -1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 2)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.time1_disjuncts[0].indicator_var)
+ self.assertEqual(body.linear_coefs[0], -1)
+ self.assertIs(body.linear_vars[1], m.time1_disjuncts[1].indicator_var)
+ self.assertEqual(body.linear_coefs[1], -1)
+
+ # 1 <= on.ind_var + startup.ind_var + off.ind_var
+ cons = constraints[indices[9]]
+ self.assertEqual(cons.lower, 1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 3)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[0], 1)
+ self.assertIs(body.linear_vars[1], m.startup.indicator_var)
+ self.assertEqual(body.linear_coefs[1], 1)
+ self.assertIs(body.linear_vars[2], m.off.indicator_var)
+ self.assertEqual(body.linear_coefs[2], 1)
+
+ # 1 >= on.ind_var + startup.ind_var + off.ind_var
+ cons = constraints[indices[10]]
+ self.assertEqual(cons.lower, -1)
+ self.assertIsNone(cons.upper)
+ body = generate_standard_repn(cons.body)
+ self.assertEqual(body.constant, 0)
+ self.assertEqual(len(body.linear_vars), 3)
+ self.assertTrue(body.is_linear())
+ self.assertIs(body.linear_vars[0], m.on.indicator_var)
+ self.assertEqual(body.linear_coefs[0], -1)
+ self.assertIs(body.linear_vars[1], m.startup.indicator_var)
+ self.assertEqual(body.linear_coefs[1], -1)
+ self.assertIs(body.linear_vars[2], m.off.indicator_var)
+ self.assertEqual(body.linear_coefs[2], -1)
+
+ def create_hull_model(self):
+ m = ConcreteModel()
+ m.p = Var([1, 2], bounds=(0, 10))
+ m.time1 = Disjunction(expr=[m.p[1] >= 1, m.p[1] == 0])
+
+ m.on = Disjunct()
+ m.on.above_min = Constraint(expr=m.p[2] >= 1)
+ m.on.ramping = Constraint(expr=m.p[2] - m.p[1] <= 3)
+ m.on.on_before = Constraint(expr=m.p[1] >= 1)
+
+ m.startup = Disjunct()
+ m.startup.startup_limit = Constraint(expr=(1, m.p[2], 2))
+ m.startup.off_before = Constraint(expr=m.p[1] == 0)
+
+ m.off = Disjunct()
+ m.off.off = Constraint(expr=m.p[2] == 0)
+ m.time2 = Disjunction(expr=[m.on, m.startup, m.off])
+
+ m.obj = Objective(expr=m.p[1] + m.p[2])
+
+ hull = TransformationFactory('gdp.hull')
+ hull.apply_to(m)
+ disaggregatedVars = ComponentSet(
+ [hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[0]),
+ hull.get_disaggregated_var(m.p[1], m.time1.disjuncts[1]),
+ hull.get_disaggregated_var(m.p[1], m.on),
+ hull.get_disaggregated_var(m.p[2], m.on),
+ hull.get_disaggregated_var(m.p[1], m.startup),
+ hull.get_disaggregated_var(m.p[2], m.startup),
+ hull.get_disaggregated_var(m.p[1], m.off),
+ hull.get_disaggregated_var(m.p[2], m.off)
+ ])
+
+ # from nose.tools import set_trace
+ # set_trace()
+ # disaggregatedVars = ComponentSet([relaxationBlocks[0].component("p[1]"),
+ # relaxationBlocks[1].component("p[1]"),
+ # relaxationBlocks[2].component("p[1]"),
+ # relaxationBlocks[2].component("p[2]"),
+ # relaxationBlocks[3].component("p[1]"),
+ # relaxationBlocks[3].component("p[2]"),
+ # relaxationBlocks[4].component("p[1]"),
+ # relaxationBlocks[4].component("p[2]")])
+
+ return m, disaggregatedVars
+
+ def test_project_disaggregated_vars(self):
+ """This is a little bit more of an integration test with GDP,
+ but also an example of why FME is 'useful.' We will give a GDP,
+ take hull relaxation, and then project out the disaggregated
+ variables."""
+ m, disaggregatedVars = self.create_hull_model()
+
+ filtered = TransformationFactory('contrib.fourier_motzkin_elimination').\
+ create_using(m, vars_to_eliminate=disaggregatedVars)
+ TransformationFactory('contrib.fourier_motzkin_elimination').apply_to(
+ m, vars_to_eliminate=disaggregatedVars,
+ constraint_filtering_callback=None)
+
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+ # we of course get tremendous amounts of garbage, but we make sure that
+ # what should be here is:
+ self.check_hull_projected_constraints(m, constraints, [22, 20, 58, 61,
+ 56, 38, 32, 1, 2,
+ 4, 5])
+ # and when we filter, it's still there.
+ constraints = filtered._pyomo_contrib_fme_transformation.\
+ projected_constraints
+ self.check_hull_projected_constraints(filtered, constraints, [6, 5, 16,
+ 17, 15,
+ 11, 8, 1,
+ 2, 3, 4])
+
+ @unittest.skipIf(not 'glpk' in solvers, 'glpk not available')
+ def test_post_processing(self):
+ m, disaggregatedVars = self.create_hull_model()
+ fme = TransformationFactory('contrib.fourier_motzkin_elimination')
+ fme.apply_to(m, vars_to_eliminate=disaggregatedVars)
+ # post-process
+ fme.post_process_fme_constraints(m, SolverFactory('glpk'))
+
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+ self.assertEqual(len(constraints), 11)
+
+ # They should be the same as the above, but now these are *all* the
+ # constraints
+ self.check_hull_projected_constraints(m, constraints, [6, 5, 16, 17,
+ 15, 11, 8, 1, 2,
+ 3, 4])
+
+ # and check that we didn't change the model
+ for disj in m.component_data_objects(Disjunct):
+ self.assertIs(disj.indicator_var.domain, Binary)
+ self.assertEqual(len([o for o in m.component_data_objects(Objective)]),
+ 1)
+ self.assertIsInstance(m.component("obj"), Objective)
+ self.assertTrue(m.obj.active)
+
+ @unittest.skipIf(not 'glpk' in solvers, 'glpk not available')
+ def test_model_with_unrelated_nonlinear_expressions(self):
+ m = ConcreteModel()
+ m.x = Var([1, 2, 3], bounds=(0,3))
+ m.y = Var()
+ m.z = Var()
+
+ @m.Constraint([1,2])
+ def cons(m, i):
+ return m.x[i] <= m.y**i
+
+ m.cons2 = Constraint(expr=m.x[1] >= m.y)
+ m.cons3 = Constraint(expr=m.x[2] >= m.z - 3)
+ # This is vacuous, but I just want something that's not quadratic
+ m.cons4 = Constraint(expr=m.x[3] <= log(m.y + 1))
+
+ fme = TransformationFactory('contrib.fourier_motzkin_elimination')
+ fme.apply_to(m, vars_to_eliminate=m.x,
+ constraint_filtering_callback=None)
+ constraints = m._pyomo_contrib_fme_transformation.projected_constraints
+
+ # 0 <= y <= 3
+ cons = constraints[6]
+ self.assertEqual(cons.lower, 0)
+ self.assertIs(cons.body, m.y)
+ cons = constraints[5]
+ self.assertEqual(cons.lower, -3)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_linear())
+ self.assertEqual(len(body.linear_vars), 1)
+ self.assertIs(body.linear_vars[0], m.y)
+ self.assertEqual(body.linear_coefs[0], -1)
+
+ # z <= y**2 + 3
+ cons = constraints[4]
+ self.assertEqual(cons.lower, -3)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_quadratic())
+ self.assertEqual(len(body.linear_vars), 1)
+ self.assertIs(body.linear_vars[0], m.z)
+ self.assertEqual(body.linear_coefs[0], -1)
+ self.assertEqual(len(body.quadratic_vars), 1)
+ self.assertEqual(body.quadratic_coefs[0], 1)
+ self.assertIs(body.quadratic_vars[0][0], m.y)
+ self.assertIs(body.quadratic_vars[0][1], m.y)
+
+ # z <= 6
+ cons = constraints[2]
+ self.assertEqual(cons.lower, -6)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_linear())
+ self.assertEqual(len(body.linear_vars), 1)
+ self.assertEqual(body.linear_coefs[0], -1)
+ self.assertIs(body.linear_vars[0], m.z)
+
+ # 0 <= ln(y+ 1)
+ cons = constraints[1]
+ self.assertEqual(cons.lower, 0)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_nonlinear())
+ self.assertFalse(body.is_quadratic())
+ self.assertEqual(len(body.linear_vars), 0)
+ self.assertEqual(body.nonlinear_expr.name, 'log')
+ self.assertEqual(len(body.nonlinear_expr.args[0].args), 2)
+ self.assertIs(body.nonlinear_expr.args[0].args[0], m.y)
+ self.assertEqual(body.nonlinear_expr.args[0].args[1], 1)
+
+ # 0 <= y**2
+ cons = constraints[3]
+ self.assertEqual(cons.lower, 0)
+ body = generate_standard_repn(cons.body)
+ self.assertTrue(body.is_quadratic())
+ self.assertEqual(len(body.quadratic_vars), 1)
+ self.assertEqual(body.quadratic_coefs[0], 1)
+ self.assertIs(body.quadratic_vars[0][0], m.y)
+ self.assertIs(body.quadratic_vars[0][1], m.y)
+
+ # check constraints valid for a selection of points (this is nonconvex,
+ # but anyway...)
+ pts = [#(sqrt(3), 6), Not numerically stable enough for this test
+ (1, 4), (3, 6), (3, 0), (0, 0), (2,6)]
+ for pt in pts:
+ m.y.fix(pt[0])
+ m.z.fix(pt[1])
+ for i in constraints:
+ self.assertLessEqual(value(constraints[i].lower),
+ value(constraints[i].body))
+ m.y.fixed = False
+ m.z.fixed = False
+
+ # check post process these are non-convex, so I don't want to deal with
+ # it... (and this is a good test that I *don't* deal with it.)
+ constraints[4].deactivate()
+ constraints[3].deactivate()
+ constraints[1].deactivate()
+ # NOTE also that some of the suproblems in this test are unbounded: We
+ # need to keep those constraints.
+ fme.post_process_fme_constraints(m, SolverFactory('glpk'))
+ # we needed all the constraints, so we kept them all
+ self.assertEqual(len(constraints), 6)
+
+ # last check that if someone activates something on the model in
+ # between, we just use it. (I struggle to imagine why you would do this
+ # because why withold the information *during* FME, but if there's some
+ # reason, we may as well use all the information we've got.)
+ m.some_new_cons = Constraint(expr=m.y <= 2)
+ fme.post_process_fme_constraints(m, SolverFactory('glpk'))
+ # now we should have lost one constraint
+ self.assertEqual(len(constraints), 5)
+ # and it should be the y <= 3 one...
+ self.assertIsNone(dict(constraints).get(5))
diff --git a/pyomo/contrib/gdp_bounds/compute_bounds.py b/pyomo/contrib/gdp_bounds/compute_bounds.py
index 50d4d83fc9e..9bc2f0da70c 100644
--- a/pyomo/contrib/gdp_bounds/compute_bounds.py
+++ b/pyomo/contrib/gdp_bounds/compute_bounds.py
@@ -30,7 +30,7 @@ def disjunctive_obbt(model, solver):
model._disjuncts_to_process = list(model.component_data_objects(
ctype=Disjunct, active=True, descend_into=(Block, Disjunct),
descent_order=TraversalStrategy.BreadthFirstSearch))
- if model.type() == Disjunct:
+ if model.ctype == Disjunct:
model._disjuncts_to_process.insert(0, model)
linear_var_set = ComponentSet()
@@ -145,7 +145,7 @@ def fbbt_disjunct(disj, parent_bounds):
try:
new_bnds = fbbt(disj)
except InfeasibleConstraintException as e:
- if disj.type() == Disjunct:
+ if disj.ctype == Disjunct:
disj.deactivate() # simply prune the disjunct
new_bnds = parent_bounds
bnds_manager.pop_bounds()
diff --git a/pyomo/contrib/gdpbb/GDPbb.py b/pyomo/contrib/gdpbb/GDPbb.py
index 227ed365715..b436404e679 100644
--- a/pyomo/contrib/gdpbb/GDPbb.py
+++ b/pyomo/contrib/gdpbb/GDPbb.py
@@ -81,7 +81,7 @@ class GDPbbSolver(object):
@deprecated("GDPbb has been merged into GDPopt. "
"You can use the algorithm using GDPopt with strategy='LBB'.",
logger="pyomo.solvers",
- version='TBD', remove_in='TBD')
+ version='5.6.9')
def __init__(self, *args, **kwargs):
super(GDPbbSolver, self).__init__(*args, **kwargs)
diff --git a/pyomo/contrib/gdpopt/GDPopt.py b/pyomo/contrib/gdpopt/GDPopt.py
index 3be11d88380..0ccc1f7e225 100644
--- a/pyomo/contrib/gdpopt/GDPopt.py
+++ b/pyomo/contrib/gdpopt/GDPopt.py
@@ -1,6 +1,13 @@
# -*- coding: utf-8 -*-
"""Main driver module for GDPopt solver.
+20.2.28 changes:
+- bugfixes on tests
+20.1.22 changes:
+- improved subsolver time limit support for GAMS interface
+- add maxTimeLimit exit condition for GDPopt-LBB
+- add token Big M for reactivated constraints in GDPopt-LBB
+- activate fbbt for branch-and-bound nodes
20.1.15 changes:
- internal cleanup of codebase
- merge GDPbb capabilities (logic-based branch and bound)
@@ -43,7 +50,7 @@
setup_solver_environment)
from pyomo.opt.base import SolverFactory
-__version__ = (20, 1, 15) # Note: date-based version number
+__version__ = (20, 2, 28) # Note: date-based version number
@SolverFactory.register(
@@ -98,7 +105,7 @@ def solve(self, model, **kwds):
model (Block): a Pyomo model or block to be solved
"""
- config = self.CONFIG(kwds.pop('options', {}))
+ config = self.CONFIG(kwds.pop('options', {}), preserve_implicit=True)
config.set_value(kwds)
with setup_solver_environment(model, config) as solve_data:
diff --git a/pyomo/contrib/gdpopt/branch_and_bound.py b/pyomo/contrib/gdpopt/branch_and_bound.py
index 2361eac293a..70b5f063270 100644
--- a/pyomo/contrib/gdpopt/branch_and_bound.py
+++ b/pyomo/contrib/gdpopt/branch_and_bound.py
@@ -2,7 +2,9 @@
from collections import namedtuple
from heapq import heappush, heappop
-from pyomo.contrib.gdpopt.util import copy_var_list_values, SuppressInfeasibleWarning
+from pyomo.common.errors import InfeasibleConstraintException
+from pyomo.contrib.fbbt.fbbt import fbbt
+from pyomo.contrib.gdpopt.util import copy_var_list_values, SuppressInfeasibleWarning, get_main_elapsed_time
from pyomo.contrib.satsolver.satsolver import satisfiable
from pyomo.core import minimize, Suffix, Constraint, ComponentMap, TransformationFactory
from pyomo.opt import SolverFactory, SolverStatus
@@ -117,6 +119,25 @@ def _perform_branch_and_bound(solve_data):
node_data, node_model = heappop(queue)
config.logger.info("Nodes: %s LB %.10g Unbranched %s" % (
solve_data.explored_nodes, node_data.obj_lb, node_data.num_unbranched_disjunctions))
+
+ # Check time limit
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ if elapsed >= config.time_limit:
+ config.logger.info(
+ 'GDPopt-LBB unable to converge bounds '
+ 'before time limit of {} seconds. '
+ 'Elapsed: {} seconds'
+ .format(config.time_limit, elapsed))
+ no_feasible_soln = float('inf')
+ solve_data.LB = node_data.obj_lb if solve_data.objective_sense == minimize else -no_feasible_soln
+ solve_data.UB = no_feasible_soln if solve_data.objective_sense == minimize else -node_data.obj_lb
+ config.logger.info(
+ 'Final bound values: LB: {} UB: {}'.
+ format(solve_data.LB, solve_data.UB))
+ solve_data.results.solver.termination_condition = tc.maxTimeLimit
+ return True
+
+ # Handle current node
if not node_data.is_screened:
# Node has not been evaluated.
solve_data.explored_nodes += 1
@@ -177,6 +198,7 @@ def _branch_on_node(node_data, node_model, solve_data):
fixed_True_disjunct = child_unfixed_disjuncts[disjunct_index_to_fix_True]
for constr in child_model.GDPopt_utils.disjunct_to_nonlinear_constraints.get(fixed_True_disjunct, ()):
constr.activate()
+ child_model.BigM[constr] = 1 # set arbitrary BigM (ok, because we fix corresponding Y=True)
del child_model.GDPopt_utils.disjunction_to_unfixed_disjuncts[child_disjunction_to_branch]
for child_disjunct in child_unfixed_disjuncts:
@@ -206,7 +228,7 @@ def _prescreen_node(node_data, node_model, solve_data):
if node_data.node_count == 0:
config.logger.info("Root node is not satisfiable. Problem is infeasible.")
else:
- config.debug.info("SAT solver pruned node %s" % node_data.node_count)
+ config.logger.info("SAT solver pruned node %s" % node_data.node_count)
new_lb = new_ub = float('inf')
else:
# Solve model subproblem
@@ -243,7 +265,22 @@ def _solve_rnGDP_subproblem(model, solve_data):
try:
with SuppressInfeasibleWarning():
- result = SolverFactory(config.minlp_solver).solve(subproblem, **config.minlp_solver_args)
+ try:
+ fbbt(subproblem, integer_tol=config.integer_tolerance)
+ except InfeasibleConstraintException:
+ copy_var_list_values( # copy variable values, even if errored
+ from_list=subproblem.GDPopt_utils.variable_list,
+ to_list=model.GDPopt_utils.variable_list,
+ config=config, ignore_integrality=True
+ )
+ return float('inf'), float('inf')
+ minlp_args = dict(config.minlp_solver_args)
+ if config.minlp_solver == 'gams':
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ remaining = max(config.time_limit - elapsed, 1)
+ minlp_args['add_options'] = minlp_args.get('add_options', [])
+ minlp_args['add_options'].append('option reslim=%s;' % remaining)
+ result = SolverFactory(config.minlp_solver).solve(subproblem, **minlp_args)
except RuntimeError as e:
config.logger.warning(
"Solver encountered RuntimeError. Treating as infeasible. "
diff --git a/pyomo/contrib/gdpopt/cut_generation.py b/pyomo/contrib/gdpopt/cut_generation.py
index 3090211882f..6b6db57e5df 100644
--- a/pyomo/contrib/gdpopt/cut_generation.py
+++ b/pyomo/contrib/gdpopt/cut_generation.py
@@ -1,7 +1,9 @@
"""This module provides functions for cut generation."""
from __future__ import division
+from collections import namedtuple
from math import copysign, fabs
+from six import iteritems
from pyomo.contrib.gdp_bounds.info import disjunctive_bounds
from pyomo.contrib.gdpopt.util import time_code, constraints_in_True_disjuncts
from pyomo.contrib.mcpp.pyomo_mcpp import McCormick as mc, MCPP_Error
@@ -13,6 +15,8 @@
from pyomo.core.kernel.component_set import ComponentSet
from pyomo.gdp import Disjunct
+MAX_SYMBOLIC_DERIV_SIZE = 1000
+JacInfo = namedtuple('JacInfo', ['mode','vars','jac'])
def add_subproblem_cuts(subprob_result, solve_data, config):
if config.strategy == "LOA":
@@ -60,19 +64,32 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config):
"Adding OA cut for %s with dual value %s"
% (constr.name, dual_value))
- # Cache jacobians
- jacobians = GDPopt.jacobians.get(constr, None)
- if jacobians is None:
- constr_vars = list(identify_variables(constr.body, include_fixed=False))
- if len(constr_vars) >= 1000:
+ # Cache jacobian
+ jacobian = GDPopt.jacobians.get(constr, None)
+ if jacobian is None:
+ constr_vars = list(identify_variables(
+ constr.body, include_fixed=False))
+ if len(constr_vars) >= MAX_SYMBOLIC_DERIV_SIZE:
mode = differentiate.Modes.reverse_numeric
else:
mode = differentiate.Modes.sympy
+ try:
+ jac_list = differentiate(
+ constr.body, wrt_list=constr_vars, mode=mode)
+ jac_map = ComponentMap(zip(constr_vars, jac_list))
+ except:
+ if mode is differentiate.Modes.reverse_numeric:
+ raise
+ mode = differentiate.Modes.reverse_numeric
+ jac_map = ComponentMap()
+ jacobian = JacInfo(mode=mode, vars=constr_vars, jac=jac_map)
+ GDPopt.jacobians[constr] = jacobian
+ # Recompute numeric derivatives
+ if not jacobian.jac:
jac_list = differentiate(
- constr.body, wrt_list=constr_vars, mode=mode)
- jacobians = ComponentMap(zip(constr_vars, jac_list))
- GDPopt.jacobians[constr] = jacobians
+ constr.body, wrt_list=jacobian.vars, mode=jacobian.mode)
+ jacobian.jac.update(zip(jacobian.vars, jac_list))
# Create a block on which to put outer approximation cuts.
oa_utils = parent_block.component('GDPopt_OA')
@@ -92,11 +109,12 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config):
new_oa_cut = (
copysign(1, sign_adjust * dual_value) * (
value(constr.body) - rhs + sum(
- value(jacobians[var]) * (var - value(var))
- for var in jacobians)) - slack_var <= 0)
+ value(jac) * (var - value(var))
+ for var, jac in iteritems(jacobian.jac))
+ ) - slack_var <= 0)
if new_oa_cut.polynomial_degree() not in (1, 0):
- for var in jacobians:
- print(var.name, value(jacobians[var]))
+ for var, jac in iteritems(jacobian.jac):
+ print(var.name, value(jac))
oa_cuts.add(expr=new_oa_cut)
counter += 1
except ZeroDivisionError:
@@ -106,6 +124,9 @@ def add_outer_approximation_cuts(nlp_result, solve_data, config):
% (constr.name,)
)
# Simply continue on to the next constraint.
+ # Clear out the numeric Jacobian values
+ if jacobian.mode is differentiate.Modes.reverse_numeric:
+ jacobian.jac.clear()
config.logger.info('Added %s OA cuts' % counter)
diff --git a/pyomo/contrib/gdpopt/iterate.py b/pyomo/contrib/gdpopt/iterate.py
index 0c2c8cc0448..cc9e09a25c3 100644
--- a/pyomo/contrib/gdpopt/iterate.py
+++ b/pyomo/contrib/gdpopt/iterate.py
@@ -90,12 +90,13 @@ def algorithm_should_terminate(solve_data, config):
return True
# Check time limit
- if get_main_elapsed_time(solve_data.timing) >= config.time_limit:
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ if elapsed >= config.time_limit:
config.logger.info(
'GDPopt unable to converge bounds '
'before time limit of {} seconds. '
'Elapsed: {} seconds'
- .format(config.time_limit, get_main_elapsed_time(solve_data.timing)))
+ .format(config.time_limit, elapsed))
config.logger.info(
'Final bound values: LB: {} UB: {}'.
format(solve_data.LB, solve_data.UB))
diff --git a/pyomo/contrib/gdpopt/mip_solve.py b/pyomo/contrib/gdpopt/mip_solve.py
index 93171d25838..786914b34bd 100644
--- a/pyomo/contrib/gdpopt/mip_solve.py
+++ b/pyomo/contrib/gdpopt/mip_solve.py
@@ -7,7 +7,7 @@
from pyomo.common.errors import InfeasibleConstraintException
from pyomo.contrib.fbbt.fbbt import fbbt
from pyomo.contrib.gdpopt.data_class import MasterProblemResult
-from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, _DoNothing
+from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, _DoNothing, get_main_elapsed_time
from pyomo.core import (Block, Expression, Objective, TransformationFactory,
Var, minimize, value, Constraint)
from pyomo.gdp import Disjunct
@@ -75,8 +75,16 @@ def solve_linear_GDP(linear_GDP_model, solve_data, config):
try:
with SuppressInfeasibleWarning():
+ mip_args = dict(config.mip_solver_args)
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ remaining = max(config.time_limit - elapsed, 1)
+ if config.mip_solver == 'gams':
+ mip_args['add_options'] = mip_args.get('add_options', [])
+ mip_args['add_options'].append('option reslim=%s;' % remaining)
+ elif config.mip_solver == 'multisolve':
+ mip_args['time_limit'] = min(mip_args.get('time_limit', float('inf')), remaining)
results = SolverFactory(config.mip_solver).solve(
- m, **config.mip_solver_args)
+ m, **mip_args)
except RuntimeError as e:
if 'GAMS encountered an error during solve.' in str(e):
config.logger.warning("GAMS encountered an error in solve. Treating as infeasible.")
diff --git a/pyomo/contrib/gdpopt/nlp_solve.py b/pyomo/contrib/gdpopt/nlp_solve.py
index 1108d69f60c..78f4be9cfa7 100644
--- a/pyomo/contrib/gdpopt/nlp_solve.py
+++ b/pyomo/contrib/gdpopt/nlp_solve.py
@@ -7,7 +7,7 @@
from pyomo.contrib.fbbt.fbbt import fbbt
from pyomo.contrib.gdpopt.data_class import SubproblemResult
from pyomo.contrib.gdpopt.util import (SuppressInfeasibleWarning,
- is_feasible)
+ is_feasible, get_main_elapsed_time)
from pyomo.core import Constraint, TransformationFactory, minimize, value, Objective
from pyomo.core.expr import current as EXPR
from pyomo.core.kernel.component_set import ComponentSet
@@ -41,7 +41,15 @@ def solve_linear_subproblem(mip_model, solve_data, config):
if not mip_solver.available():
raise RuntimeError("MIP solver %s is not available." % config.mip_solver)
with SuppressInfeasibleWarning():
- results = mip_solver.solve(mip_model, **config.mip_solver_args)
+ mip_args = dict(config.mip_solver_args)
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ remaining = max(config.time_limit - elapsed, 1)
+ if config.mip_solver == 'gams':
+ mip_args['add_options'] = mip_args.get('add_options', [])
+ mip_args['add_options'].append('option reslim=%s;' % remaining)
+ elif config.mip_solver == 'multisolve':
+ mip_args['time_limit'] = min(mip_args.get('time_limit', float('inf')), remaining)
+ results = mip_solver.solve(mip_model, **mip_args)
subprob_result = SubproblemResult()
subprob_result.feasible = True
@@ -96,7 +104,15 @@ def solve_NLP(nlp_model, solve_data, config):
config.nlp_solver)
with SuppressInfeasibleWarning():
try:
- results = nlp_solver.solve(nlp_model, **config.nlp_solver_args)
+ nlp_args = dict(config.nlp_solver_args)
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ remaining = max(config.time_limit - elapsed, 1)
+ if config.nlp_solver == 'gams':
+ nlp_args['add_options'] = nlp_args.get('add_options', [])
+ nlp_args['add_options'].append('option reslim=%s;' % remaining)
+ elif config.nlp_solver == 'multisolve':
+ nlp_args['time_limit'] = min(nlp_args.get('time_limit', float('inf')), remaining)
+ results = nlp_solver.solve(nlp_model, **nlp_args)
except ValueError as err:
if 'Cannot load a SolverResults object with bad status: error' in str(err):
results = SolverResults()
@@ -187,7 +203,15 @@ def solve_MINLP(model, solve_data, config):
raise RuntimeError("MINLP solver %s is not available." %
config.minlp_solver)
with SuppressInfeasibleWarning():
- results = minlp_solver.solve(model, **config.minlp_solver_args)
+ minlp_args = dict(config.minlp_solver_args)
+ elapsed = get_main_elapsed_time(solve_data.timing)
+ remaining = max(config.time_limit - elapsed, 1)
+ if config.minlp_solver == 'gams':
+ minlp_args['add_options'] = minlp_args.get('add_options', [])
+ minlp_args['add_options'].append('option reslim=%s;' % remaining)
+ elif config.minlp_solver == 'multisolve':
+ minlp_args['time_limit'] = min(minlp_args.get('time_limit', float('inf')), remaining)
+ results = minlp_solver.solve(model, **minlp_args)
subprob_result = SubproblemResult()
subprob_result.feasible = True
@@ -214,6 +238,14 @@ def solve_MINLP(model, solve_data, config):
'Using potentially suboptimal feasible solution.')
else:
subprob_result.feasible = False
+ elif term_cond == tc.maxTimeLimit:
+ config.logger.info('MINLP subproblem failed to converge within time limit.')
+ if is_feasible(model, config):
+ config.logger.info(
+ 'MINLP solution is still feasible. '
+ 'Using potentially suboptimal feasible solution.')
+ else:
+ subprob_result.feasible = False
elif term_cond == tc.intermediateNonInteger:
config.logger.info(
"MINLP solver could not find feasible integer solution: %s" % results.solver.message)
diff --git a/pyomo/contrib/gdpopt/tests/test_gdpopt.py b/pyomo/contrib/gdpopt/tests/test_gdpopt.py
index 37df9d224a0..50a2f370cc2 100644
--- a/pyomo/contrib/gdpopt/tests/test_gdpopt.py
+++ b/pyomo/contrib/gdpopt/tests/test_gdpopt.py
@@ -5,16 +5,15 @@
from six import StringIO
-import pyomo.core.base.symbolic
import pyutilib.th as unittest
from pyomo.common.log import LoggingIntercept
from pyomo.contrib.gdpopt.GDPopt import GDPoptSolver
from pyomo.contrib.gdpopt.data_class import GDPoptSolveData
from pyomo.contrib.gdpopt.mip_solve import solve_linear_GDP
-from pyomo.contrib.gdpopt.util import is_feasible
+from pyomo.contrib.gdpopt.util import is_feasible, time_code
from pyomo.environ import ConcreteModel, Objective, SolverFactory, Var, value, Integers, Block, Constraint, maximize
from pyomo.gdp import Disjunct, Disjunction
-from pyutilib.misc import import_file
+from pyutilib.misc import import_file, Container
from pyomo.contrib.mcpp.pyomo_mcpp import mcpp_available
from pyomo.opt import TerminationCondition
@@ -51,7 +50,10 @@ def test_solve_linear_GDP_unbounded(self):
m.GDPopt_utils.disjunct_list = [m.d._autodisjuncts[0], m.d._autodisjuncts[1]]
output = StringIO()
with LoggingIntercept(output, 'pyomo.contrib.gdpopt', logging.WARNING):
- solve_linear_GDP(m, GDPoptSolveData(), GDPoptSolver.CONFIG(dict(mip_solver=mip_solver)))
+ solver_data = GDPoptSolveData()
+ solver_data.timing = Container()
+ with time_code(solver_data.timing, 'main', is_main_timer=True):
+ solve_linear_GDP(m, solver_data, GDPoptSolver.CONFIG(dict(mip_solver=mip_solver)))
self.assertIn("Linear GDP was unbounded. Resolving with arbitrary bound values",
output.getvalue().strip())
@@ -145,8 +147,6 @@ def test_is_feasible_function(self):
@unittest.skipIf(not LOA_solvers_available,
"Required subsolvers %s are not available"
% (LOA_solvers,))
-@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available,
- "Symbolic differentiation is not available")
class TestGDPopt(unittest.TestCase):
"""Tests for the GDPopt solver plugin."""
diff --git a/pyomo/contrib/gdpopt/util.py b/pyomo/contrib/gdpopt/util.py
index d15a2c695d6..389e96ae092 100644
--- a/pyomo/contrib/gdpopt/util.py
+++ b/pyomo/contrib/gdpopt/util.py
@@ -105,7 +105,7 @@ def presolve_lp_nlp(solve_data, config):
return False, None
-def process_objective(solve_data, config, move_linear_objective=False):
+def process_objective(solve_data, config, move_linear_objective=False, use_mcpp=True):
"""Process model objective function.
Check that the model has only 1 valid objective.
@@ -144,10 +144,11 @@ def process_objective(solve_data, config, move_linear_objective=False):
if move_linear_objective:
config.logger.info("Moving objective to constraint set.")
else:
- config.logger.info("Objective is nonlinear. Moving it to constraint set.")
+ config.logger.info(
+ "Objective is nonlinear. Moving it to constraint set.")
util_blk.objective_value = Var(domain=Reals, initialize=0)
- if mcpp_available():
+ if mcpp_available() and use_mcpp:
mc_obj = McCormick(main_obj.expr)
util_blk.objective_value.setub(mc_obj.upper())
util_blk.objective_value.setlb(mc_obj.lower())
@@ -206,8 +207,8 @@ def copy_var_list_values(from_list, to_list, config,
# Check to see if this is just a tolerance issue
if ignore_integrality \
and ('is not in domain Binary' in err_msg
- or 'is not in domain Integers' in err_msg):
- v_to.value = value(v_from, exception=False)
+ or 'is not in domain Integers' in err_msg):
+ v_to.value = value(v_from, exception=False)
elif 'is not in domain Binary' in err_msg and (
fabs(var_val - 1) <= config.integer_tolerance or
fabs(var_val) <= config.integer_tolerance):
@@ -431,8 +432,9 @@ def get_main_elapsed_time(timing_data_obj):
@deprecated(
- "'restore_logger_level()' has been deprecated in favor of the more specific "
- "'lower_logger_level_to()' function.", version='TBD', remove_in='TBD')
+ "'restore_logger_level()' has been deprecated in favor of the more "
+ "specific 'lower_logger_level_to()' function.",
+ version='5.6.9')
@contextmanager
def restore_logger_level(logger):
old_logger_level = logger.getEffectiveLevel()
diff --git a/pyomo/contrib/interior_point/__init__.py b/pyomo/contrib/interior_point/__init__.py
new file mode 100644
index 00000000000..1bc67ee9611
--- /dev/null
+++ b/pyomo/contrib/interior_point/__init__.py
@@ -0,0 +1,8 @@
+from pyomo.common.dependencies import numpy_available, scipy_available
+if not numpy_available or not scipy_available:
+ import pyutilib.th as unittest
+ raise unittest.SkipTest('numpy and scipy required for interior point')
+from .interface import BaseInteriorPointInterface, InteriorPointInterface
+from .interior_point import InteriorPointSolver, InteriorPointStatus
+from pyomo.contrib.interior_point import linalg
+from .inverse_reduced_hessian import inv_reduced_hessian_barrier
diff --git a/pyomo/contrib/interior_point/examples/ex1.py b/pyomo/contrib/interior_point/examples/ex1.py
new file mode 100644
index 00000000000..f71c5f27890
--- /dev/null
+++ b/pyomo/contrib/interior_point/examples/ex1.py
@@ -0,0 +1,28 @@
+import pyomo.environ as pe
+from pyomo.contrib.interior_point.interior_point import InteriorPointSolver
+from pyomo.contrib.interior_point.interface import InteriorPointInterface
+from pyomo.contrib.interior_point.linalg.mumps_interface import MumpsInterface
+import logging
+
+
+logging.basicConfig(level=logging.INFO)
+# Supposedly this sets the root logger's level to INFO.
+# But when linear_solver.logger logs with debug,
+# it gets propagated to a mysterious root logger with
+# level NOTSET...
+
+m = pe.ConcreteModel()
+m.x = pe.Var()
+m.y = pe.Var()
+m.obj = pe.Objective(expr=m.x**2 + m.y**2)
+m.c1 = pe.Constraint(expr=m.y == pe.exp(m.x))
+m.c2 = pe.Constraint(expr=m.y >= (m.x - 1)**2)
+interface = InteriorPointInterface(m)
+linear_solver = MumpsInterface(
+# log_filename='lin_sol.log',
+ icntl_options={11: 1}, # Set error level to 1 (most detailed)
+ )
+
+ip_solver = InteriorPointSolver(linear_solver)
+x, duals_eq, duals_ineq = ip_solver.solve(interface)
+print(x, duals_eq, duals_ineq)
diff --git a/pyomo/contrib/interior_point/interface.py b/pyomo/contrib/interior_point/interface.py
new file mode 100644
index 00000000000..13c5072554a
--- /dev/null
+++ b/pyomo/contrib/interior_point/interface.py
@@ -0,0 +1,624 @@
+from abc import ABCMeta, abstractmethod
+import six
+from pyomo.contrib.pynumero.interfaces import pyomo_nlp, ampl_nlp
+from pyomo.contrib.pynumero.sparse import BlockMatrix, BlockVector
+import numpy as np
+import scipy.sparse
+from pyutilib.misc.timing import HierarchicalTimer
+
+
+class BaseInteriorPointInterface(six.with_metaclass(ABCMeta, object)):
+ @abstractmethod
+ def n_primals(self):
+ pass
+
+ @abstractmethod
+ def nnz_hessian_lag(self):
+ pass
+
+ @abstractmethod
+ def primals_lb(self):
+ pass
+
+ @abstractmethod
+ def primals_ub(self):
+ pass
+
+ @abstractmethod
+ def init_primals(self):
+ pass
+
+ @abstractmethod
+ def set_primals(self, primals):
+ pass
+
+ @abstractmethod
+ def get_primals(self):
+ pass
+
+ @abstractmethod
+ def get_obj_factor(self):
+ pass
+
+ @abstractmethod
+ def set_obj_factor(self, obj_factor):
+ pass
+
+ @abstractmethod
+ def evaluate_objective(self):
+ pass
+
+ @abstractmethod
+ def evaluate_grad_objective(self):
+ pass
+
+ @abstractmethod
+ def n_eq_constraints(self):
+ pass
+
+ @abstractmethod
+ def n_ineq_constraints(self):
+ pass
+
+ @abstractmethod
+ def nnz_jacobian_eq(self):
+ pass
+
+ @abstractmethod
+ def nnz_jacobian_ineq(self):
+ pass
+
+ @abstractmethod
+ def ineq_lb(self):
+ pass
+
+ @abstractmethod
+ def ineq_ub(self):
+ pass
+
+ @abstractmethod
+ def init_duals_eq(self):
+ pass
+
+ @abstractmethod
+ def init_duals_ineq(self):
+ pass
+
+ @abstractmethod
+ def set_duals_eq(self, duals_eq):
+ pass
+
+ @abstractmethod
+ def set_duals_ineq(self, duals_ineq):
+ pass
+
+ @abstractmethod
+ def get_duals_eq(self):
+ pass
+
+ @abstractmethod
+ def get_duals_ineq(self):
+ pass
+
+ @abstractmethod
+ def evaluate_eq_constraints(self):
+ pass
+
+ @abstractmethod
+ def evaluate_ineq_constraints(self):
+ pass
+
+ @abstractmethod
+ def evaluate_jacobian_eq(self):
+ pass
+
+ @abstractmethod
+ def evaluate_jacobian_ineq(self):
+ pass
+
+ @abstractmethod
+ def init_slacks(self):
+ pass
+
+ @abstractmethod
+ def init_duals_primals_lb(self):
+ pass
+
+ @abstractmethod
+ def init_duals_primals_ub(self):
+ pass
+
+ @abstractmethod
+ def init_duals_slacks_lb(self):
+ pass
+
+ @abstractmethod
+ def init_duals_slacks_ub(self):
+ pass
+
+ @abstractmethod
+ def set_slacks(self, slacks):
+ pass
+
+ @abstractmethod
+ def set_duals_primals_lb(self, duals):
+ pass
+
+ @abstractmethod
+ def set_duals_primals_ub(self, duals):
+ pass
+
+ @abstractmethod
+ def set_duals_slacks_lb(self, duals):
+ pass
+
+ @abstractmethod
+ def set_duals_slacks_ub(self, duals):
+ pass
+
+ @abstractmethod
+ def get_slacks(self):
+ pass
+
+ @abstractmethod
+ def get_duals_primals_lb(self):
+ pass
+
+ @abstractmethod
+ def get_duals_primals_ub(self):
+ pass
+
+ @abstractmethod
+ def get_duals_slacks_lb(self):
+ pass
+
+ @abstractmethod
+ def get_duals_slacks_ub(self):
+ pass
+
+ @abstractmethod
+ def set_barrier_parameter(self, barrier):
+ pass
+
+ @abstractmethod
+ def evaluate_primal_dual_kkt_matrix(self, timer=None):
+ pass
+
+ @abstractmethod
+ def evaluate_primal_dual_kkt_rhs(self, timer=None):
+ pass
+
+ @abstractmethod
+ def set_primal_dual_kkt_solution(self, sol):
+ pass
+
+ @abstractmethod
+ def get_delta_primals(self):
+ pass
+
+ @abstractmethod
+ def get_delta_slacks(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_eq(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_ineq(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_primals_lb(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_primals_ub(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_slacks_lb(self):
+ pass
+
+ @abstractmethod
+ def get_delta_duals_slacks_ub(self):
+ pass
+
+ def regularize_equality_gradient(self, kkt, coef, copy_kkt=True):
+ raise RuntimeError(
+ 'Equality gradient regularization is necessary but no '
+ 'function has been implemented for doing so.')
+
+ def regularize_hessian(self, kkt, coef, copy_kkt=True):
+ raise RuntimeError(
+ 'Hessian of Lagrangian regularization is necessary but no '
+ 'function has been implemented for doing so.')
+
+
+class InteriorPointInterface(BaseInteriorPointInterface):
+ def __init__(self, pyomo_model):
+ if type(pyomo_model) is str:
+ # Assume argument is the name of an nl file
+ self._nlp = ampl_nlp.AmplNLP(pyomo_model)
+ else:
+ self._nlp = pyomo_nlp.PyomoNLP(pyomo_model)
+ self._slacks = self.init_slacks()
+
+ # set the init_duals_primals_lb/ub from ipopt_zL_out, ipopt_zU_out if available
+ # need to compress them as well and initialize the duals_primals_lb/ub
+ self._init_duals_primals_lb, self._init_duals_primals_ub =\
+ self._get_full_duals_primals_bounds()
+ self._init_duals_primals_lb[np.isneginf(self._nlp.primals_lb())] = 0
+ self._init_duals_primals_ub[np.isinf(self._nlp.primals_ub())] = 0
+ self._duals_primals_lb = self._init_duals_primals_lb.copy()
+ self._duals_primals_ub = self._init_duals_primals_ub.copy()
+
+ # set the init_duals_slacks_lb/ub from the init_duals_ineq
+ # need to be compressed and set according to their sign
+ # (-) value indicates it the upper is active, while (+) indicates
+ # that lower is active
+ self._init_duals_slacks_lb = self._nlp.init_duals_ineq().copy()
+ self._init_duals_slacks_lb[self._init_duals_slacks_lb < 0] = 0
+ self._init_duals_slacks_ub = self._nlp.init_duals_ineq().copy()
+ self._init_duals_slacks_ub[self._init_duals_slacks_ub > 0] = 0
+ self._init_duals_slacks_ub *= -1.0
+
+ self._duals_slacks_lb = self._init_duals_slacks_lb.copy()
+ self._duals_slacks_ub = self._init_duals_slacks_ub.copy()
+
+ self._delta_primals = None
+ self._delta_slacks = None
+ self._delta_duals_eq = None
+ self._delta_duals_ineq = None
+ self._barrier = None
+
+ def n_primals(self):
+ return self._nlp.n_primals()
+
+ def nnz_hessian_lag(self):
+ return self._nlp.nnz_hessian_lag()
+
+ def set_obj_factor(self, obj_factor):
+ self._nlp.set_obj_factor(obj_factor)
+
+ def get_obj_factor(self):
+ return self._nlp.get_obj_factor()
+
+ def n_eq_constraints(self):
+ return self._nlp.n_eq_constraints()
+
+ def n_ineq_constraints(self):
+ return self._nlp.n_ineq_constraints()
+
+ def nnz_jacobian_eq(self):
+ return self._nlp.nnz_jacobian_eq()
+
+ def nnz_jacobian_ineq(self):
+ return self._nlp.nnz_jacobian_ineq()
+
+ def init_primals(self):
+ primals = self._nlp.init_primals()
+ return primals
+
+ def init_slacks(self):
+ slacks = self._nlp.evaluate_ineq_constraints()
+ return slacks
+
+ def init_duals_eq(self):
+ return self._nlp.init_duals_eq()
+
+ def init_duals_ineq(self):
+ return self._nlp.init_duals_ineq()
+
+ def init_duals_primals_lb(self):
+ return self._init_duals_primals_lb
+
+ def init_duals_primals_ub(self):
+ return self._init_duals_primals_ub
+
+ def init_duals_slacks_lb(self):
+ return self._init_duals_slacks_lb
+
+ def init_duals_slacks_ub(self):
+ return self._init_duals_slacks_ub
+
+ def set_primals(self, primals):
+ self._nlp.set_primals(primals)
+
+ def set_slacks(self, slacks):
+ self._slacks = slacks
+
+ def set_duals_eq(self, duals):
+ self._nlp.set_duals_eq(duals)
+
+ def set_duals_ineq(self, duals):
+ self._nlp.set_duals_ineq(duals)
+
+ def set_duals_primals_lb(self, duals):
+ self._duals_primals_lb = duals
+
+ def set_duals_primals_ub(self, duals):
+ self._duals_primals_ub = duals
+
+ def set_duals_slacks_lb(self, duals):
+ self._duals_slacks_lb = duals
+
+ def set_duals_slacks_ub(self, duals):
+ self._duals_slacks_ub = duals
+
+ def get_primals(self):
+ return self._nlp.get_primals()
+
+ def get_slacks(self):
+ return self._slacks
+
+ def get_duals_eq(self):
+ return self._nlp.get_duals_eq()
+
+ def get_duals_ineq(self):
+ return self._nlp.get_duals_ineq()
+
+ def get_duals_primals_lb(self):
+ return self._duals_primals_lb
+
+ def get_duals_primals_ub(self):
+ return self._duals_primals_ub
+
+ def get_duals_slacks_lb(self):
+ return self._duals_slacks_lb
+
+ def get_duals_slacks_ub(self):
+ return self._duals_slacks_ub
+
+ def primals_lb(self):
+ return self._nlp.primals_lb()
+
+ def primals_ub(self):
+ return self._nlp.primals_ub()
+
+ def ineq_lb(self):
+ return self._nlp.ineq_lb()
+
+ def ineq_ub(self):
+ return self._nlp.ineq_ub()
+
+ def set_barrier_parameter(self, barrier):
+ self._barrier = barrier
+
+ def pyomo_nlp(self):
+ return self._nlp
+
+ def evaluate_primal_dual_kkt_matrix(self, timer=None):
+ if timer is None:
+ timer = HierarchicalTimer()
+ timer.start('eval hess')
+ hessian = self._nlp.evaluate_hessian_lag()
+ timer.stop('eval hess')
+ timer.start('eval jac')
+ jac_eq = self._nlp.evaluate_jacobian_eq()
+ jac_ineq = self._nlp.evaluate_jacobian_ineq()
+ timer.stop('eval jac')
+
+ duals_primals_lb = self._duals_primals_lb
+ duals_primals_ub = self._duals_primals_ub
+ duals_slacks_lb = self._duals_slacks_lb
+ duals_slacks_ub = self._duals_slacks_ub
+ primals = self._nlp.get_primals()
+
+ timer.start('hess block')
+ data = (duals_primals_lb/(primals - self._nlp.primals_lb()) +
+ duals_primals_ub/(self._nlp.primals_ub() - primals))
+ n = self._nlp.n_primals()
+ indices = np.arange(n)
+ hess_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n))
+ hess_block += hessian
+ timer.stop('hess block')
+
+ timer.start('slack block')
+ data = (duals_slacks_lb/(self._slacks - self._nlp.ineq_lb()) +
+ duals_slacks_ub/(self._nlp.ineq_ub() - self._slacks))
+ n = self._nlp.n_ineq_constraints()
+ indices = np.arange(n)
+ slack_block = scipy.sparse.coo_matrix((data, (indices, indices)), shape=(n, n))
+ timer.stop('slack block')
+
+ timer.start('set block')
+ kkt = BlockMatrix(4, 4)
+ kkt.set_block(0, 0, hess_block)
+ kkt.set_block(1, 1, slack_block)
+ kkt.set_block(2, 0, jac_eq)
+ kkt.set_block(0, 2, jac_eq.transpose())
+ kkt.set_block(3, 0, jac_ineq)
+ kkt.set_block(0, 3, jac_ineq.transpose())
+ kkt.set_block(3, 1, -scipy.sparse.identity(
+ self._nlp.n_ineq_constraints(),
+ format='coo'))
+ kkt.set_block(1, 3, -scipy.sparse.identity(
+ self._nlp.n_ineq_constraints(),
+ format='coo'))
+ timer.stop('set block')
+ return kkt
+
+ def evaluate_primal_dual_kkt_rhs(self, timer=None):
+ if timer is None:
+ timer = HierarchicalTimer()
+ timer.start('eval grad obj')
+ grad_obj = self.get_obj_factor() * self.evaluate_grad_objective()
+ timer.stop('eval grad obj')
+ timer.start('eval jac')
+ jac_eq = self._nlp.evaluate_jacobian_eq()
+ jac_ineq = self._nlp.evaluate_jacobian_ineq()
+ timer.stop('eval jac')
+ timer.start('eval cons')
+ eq_resid = self._nlp.evaluate_eq_constraints()
+ ineq_resid = self._nlp.evaluate_ineq_constraints() - self._slacks
+ timer.stop('eval cons')
+
+ timer.start('grad_lag_primals')
+ grad_lag_primals = (grad_obj +
+ jac_eq.transpose() * self._nlp.get_duals_eq() +
+ jac_ineq.transpose() * self._nlp.get_duals_ineq() -
+ self._barrier / (self._nlp.get_primals() - self._nlp.primals_lb()) +
+ self._barrier / (self._nlp.primals_ub() - self._nlp.get_primals()))
+ timer.stop('grad_lag_primals')
+
+ timer.start('grad_lag_slacks')
+ grad_lag_slacks = (-self._nlp.get_duals_ineq() -
+ self._barrier / (self._slacks - self._nlp.ineq_lb()) +
+ self._barrier / (self._nlp.ineq_ub() - self._slacks))
+ timer.stop('grad_lag_slacks')
+
+ rhs = BlockVector(4)
+ rhs.set_block(0, grad_lag_primals)
+ rhs.set_block(1, grad_lag_slacks)
+ rhs.set_block(2, eq_resid)
+ rhs.set_block(3, ineq_resid)
+ rhs = -rhs
+ return rhs
+
+ def set_primal_dual_kkt_solution(self, sol):
+ self._delta_primals = sol.get_block(0)
+ self._delta_slacks = sol.get_block(1)
+ self._delta_duals_eq = sol.get_block(2)
+ self._delta_duals_ineq = sol.get_block(3)
+
+ def get_delta_primals(self):
+ return self._delta_primals
+
+ def get_delta_slacks(self):
+ return self._delta_slacks
+
+ def get_delta_duals_eq(self):
+ return self._delta_duals_eq
+
+ def get_delta_duals_ineq(self):
+ return self._delta_duals_ineq
+
+ def get_delta_duals_primals_lb(self):
+ res = (((self._barrier - self._duals_primals_lb * self._delta_primals) /
+ (self._nlp.get_primals() - self._nlp.primals_lb())) -
+ self._duals_primals_lb)
+ return res
+
+ def get_delta_duals_primals_ub(self):
+ res = (((self._barrier + self._duals_primals_ub * self._delta_primals) /
+ (self._nlp.primals_ub() - self._nlp.get_primals())) -
+ self._duals_primals_ub)
+ return res
+
+ def get_delta_duals_slacks_lb(self):
+ res = (((self._barrier - self._duals_slacks_lb * self._delta_slacks) /
+ (self._slacks - self._nlp.ineq_lb())) -
+ self._duals_slacks_lb)
+ return res
+
+ def get_delta_duals_slacks_ub(self):
+ res = (((self._barrier + self._duals_slacks_ub * self._delta_slacks) /
+ (self._nlp.ineq_ub() - self._slacks)) -
+ self._duals_slacks_ub)
+ return res
+
+ def evaluate_objective(self):
+ return self._nlp.evaluate_objective()
+
+ def evaluate_eq_constraints(self):
+ return self._nlp.evaluate_eq_constraints()
+
+ def evaluate_ineq_constraints(self):
+ return self._nlp.evaluate_ineq_constraints()
+
+ def evaluate_grad_objective(self):
+ return self._nlp.evaluate_grad_objective()
+
+ def evaluate_jacobian_eq(self):
+ return self._nlp.evaluate_jacobian_eq()
+
+ def evaluate_jacobian_ineq(self):
+ return self._nlp.evaluate_jacobian_ineq()
+
+ def regularize_equality_gradient(self, kkt, coef, copy_kkt=True):
+ # Not technically regularizing the equality gradient ...
+ # Replace this with a regularize_diagonal_block function?
+ # Then call with kkt matrix and the value of the perturbation?
+
+ # Use a constant perturbation to regularize the equality constraint
+ # gradient
+ if copy_kkt:
+ kkt = kkt.copy()
+ reg_coef = coef
+ ptb = (reg_coef *
+ scipy.sparse.identity(self._nlp.n_eq_constraints(),
+ format='coo'))
+
+ kkt.set_block(2, 2, ptb)
+ return kkt
+
+ def regularize_hessian(self, kkt, coef, copy_kkt=True):
+ if copy_kkt:
+ kkt = kkt.copy()
+
+ hess = kkt.get_block(0, 0)
+ ptb = coef * scipy.sparse.identity(self._nlp.n_primals(), format='coo')
+ hess += ptb
+ kkt.set_block(0, 0, hess)
+ return kkt
+
+ def _get_full_duals_primals_bounds(self):
+ full_duals_primals_lb = None
+ full_duals_primals_ub = None
+ # Check in case _nlp was constructed as an AmplNLP (from an nl file)
+ if (hasattr(self._nlp, 'pyomo_model') and
+ hasattr(self._nlp, 'get_pyomo_variables')):
+ pyomo_model = self._nlp.pyomo_model()
+ pyomo_variables = self._nlp.get_pyomo_variables()
+ if hasattr(pyomo_model,'ipopt_zL_out'):
+ zL_suffix = pyomo_model.ipopt_zL_out
+ full_duals_primals_lb = np.empty(self._nlp.n_primals())
+ for i,v in enumerate(pyomo_variables):
+ if v in zL_suffix:
+ full_duals_primals_lb[i] = zL_suffix[v]
+
+ if hasattr(pyomo_model,'ipopt_zU_out'):
+ zU_suffix = pyomo_model.ipopt_zU_out
+ full_duals_primals_ub = np.empty(self._nlp.n_primals())
+ for i,v in enumerate(pyomo_variables):
+ if v in zU_suffix:
+ full_duals_primals_ub[i] = zU_suffix[v]
+
+ if full_duals_primals_lb is None:
+ full_duals_primals_lb = np.ones(self._nlp.n_primals())
+
+ if full_duals_primals_ub is None:
+ full_duals_primals_ub = np.ones(self._nlp.n_primals())
+
+ return full_duals_primals_lb, full_duals_primals_ub
+
+ def load_primals_into_pyomo_model(self):
+ if not isinstance(self._nlp, pyomo_nlp.PyomoNLP):
+ raise RuntimeError('Can only load primals into a pyomo model if a pyomo model was used in the constructor.')
+
+ pyomo_variables = self._nlp.get_pyomo_variables()
+ primals = self._nlp.get_primals()
+ for i, v in enumerate(pyomo_variables):
+ v.value = primals[i]
+
+ def pyomo_model(self):
+ return self._nlp.pyomo_model()
+
+ def get_pyomo_variables(self):
+ return self._nlp.get_pyomo_variables()
+
+ def get_pyomo_constraints(self):
+ return self._nlp.get_pyomo_constraints()
+
+ def variable_names(self):
+ return self._nlp.variable_names()
+
+ def constraint_names(self):
+ return self._nlp.constraint_names()
+
+ def get_primal_indices(self, pyomo_variables):
+ return self._nlp.get_primal_indices(pyomo_variables)
+
+ def get_constraint_indices(self, pyomo_constraints):
+ return self._nlp.get_constraint_indices(pyomo_constraints)
diff --git a/pyomo/contrib/interior_point/interior_point.py b/pyomo/contrib/interior_point/interior_point.py
new file mode 100644
index 00000000000..b2cda7399c0
--- /dev/null
+++ b/pyomo/contrib/interior_point/interior_point.py
@@ -0,0 +1,677 @@
+from pyomo.contrib.pynumero.interfaces.utils import build_bounds_mask, build_compression_matrix
+import numpy as np
+import logging
+import time
+from .linalg.results import LinearSolverStatus
+from pyutilib.misc.timing import HierarchicalTimer
+import enum
+
+
+"""
+Interface Requirements
+----------------------
+1) duals_primals_lb[i] must always be 0 if primals_lb[i] is -inf
+2) duals_primals_ub[i] must always be 0 if primals_ub[i] is inf
+3) duals_slacks_lb[i] must always be 0 if ineq_lb[i] is -inf
+4) duals_slacks_ub[i] must always be 0 if ineq_ub[i] is inf
+"""
+
+
+ip_logger = logging.getLogger('interior_point')
+
+
+class InteriorPointStatus(enum.Enum):
+ optimal = 0
+ error = 1
+
+
+class LinearSolveContext(object):
+ def __init__(self,
+ interior_point_logger,
+ linear_solver_logger,
+ filename=None,
+ level=logging.INFO):
+
+ self.interior_point_logger = interior_point_logger
+ self.linear_solver_logger = linear_solver_logger
+ self.filename = filename
+
+ if filename:
+ self.handler = logging.FileHandler(filename)
+ self.handler.setLevel(level)
+
+ def __enter__(self):
+ self.linear_solver_logger.propagate = False
+ self.interior_point_logger.propagate = False
+ if self.filename:
+ self.linear_solver_logger.addHandler(self.handler)
+ self.interior_point_logger.addHandler(self.handler)
+
+
+ def __exit__(self, et, ev, tb):
+ self.linear_solver_logger.propagate = True
+ self.interior_point_logger.propagate = True
+ if self.filename:
+ self.linear_solver_logger.removeHandler(self.handler)
+ self.interior_point_logger.removeHandler(self.handler)
+
+
+# How should the RegContext work?
+# TODO: in this class, use the linear_solver_context to ...
+# Use linear_solver_logger to write iter_no and reg_coef
+#
+# Define a method for logging IP_reg_info to the linear solver log
+# Method can be called within linear_solve_context
+class FactorizationContext(object):
+ def __init__(self, logger):
+ # Any reason to pass in a logging level here?
+ # ^ So the "regularization log" can have its own outlvl
+ self.logger = logger
+
+ def __enter__(self):
+ self.logger.debug('Factorizing KKT')
+ self.log_header()
+ return self
+
+ def __exit__(self, et, ev, tb):
+ self.logger.debug('Finished factorizing KKT')
+ # Will this swallow exceptions in this context?
+
+ def log_header(self):
+ self.logger.debug('{_iter:<10}'
+ '{reg_iter:<10}'
+ '{num_realloc:<10}'
+ '{reg_coef:<10}'
+ '{neg_eig:<10}'
+ '{status:<10}'.format(
+ _iter='Iter',
+ reg_iter='reg_iter',
+ num_realloc='# realloc',
+ reg_coef='reg_coef',
+ neg_eig='neg_eig',
+ status='status'))
+
+ def log_info(self, _iter, reg_iter, num_realloc, coef, neg_eig, status):
+ self.logger.debug('{_iter:<10}'
+ '{reg_iter:<10}'
+ '{num_realloc:<10}'
+ '{reg_coef:<10.2e}'
+ '{neg_eig:<10}'
+ '{status:<10}'.format(
+ _iter=_iter,
+ reg_iter=reg_iter,
+ num_realloc=num_realloc,
+ reg_coef=coef,
+ neg_eig=str(neg_eig),
+ status=status.name))
+
+
+class InteriorPointSolver(object):
+ """
+ Class for creating interior point solvers with different options
+ """
+ def __init__(self,
+ linear_solver,
+ max_iter=100,
+ tol=1e-8,
+ linear_solver_log_filename=None,
+ max_reallocation_iterations=5,
+ reallocation_factor=2):
+ self.linear_solver = linear_solver
+ self.max_iter = max_iter
+ self.tol = tol
+ self.linear_solver_log_filename = linear_solver_log_filename
+ self.max_reallocation_iterations = max_reallocation_iterations
+ self.reallocation_factor = reallocation_factor
+ self.base_eq_reg_coef = -1e-8
+ self._barrier_parameter = 0.1
+ self._minimum_barrier_parameter = 1e-9
+ self.hess_reg_coef = 1e-4
+ self.max_reg_iter = 6
+ self.reg_factor_increase = 100
+
+ self.logger = logging.getLogger('interior_point')
+ self._iter = 0
+ self.factorization_context = FactorizationContext(self.logger)
+
+ if linear_solver_log_filename:
+ with open(linear_solver_log_filename, 'w'):
+ pass
+
+ self.linear_solver_logger = self.linear_solver.getLogger()
+ self.linear_solve_context = LinearSolveContext(self.logger,
+ self.linear_solver_logger,
+ self.linear_solver_log_filename)
+
+ def update_barrier_parameter(self):
+ self._barrier_parameter = max(self._minimum_barrier_parameter, min(0.5 * self._barrier_parameter, self._barrier_parameter ** 1.5))
+
+ def set_linear_solver(self, linear_solver):
+ """This method exists to hopefully make it easy to try the same IP
+ algorithm with different linear solvers.
+ Subclasses may have linear-solver specific methods, in which case
+ this should not be called.
+
+ Hopefully the linear solver interface can be standardized such that
+ this is not a problem. (Need a generalized method for set_options)
+ """
+ self.linear_solver = linear_solver
+
+ def set_interface(self, interface):
+ self.interface = interface
+
+ def solve(self, interface, timer=None, report_timing=False):
+ """
+ Parameters
+ ----------
+ interface: pyomo.contrib.interior_point.interface.BaseInteriorPointInterface
+ The interior point interface. This object handles the function evaluation,
+ building the KKT matrix, and building the KKT right hand side.
+ timer: HierarchicalTimer
+ report_timing: bool
+ """
+ linear_solver = self.linear_solver
+ max_iter = self.max_iter
+ tol = self.tol
+ if timer is None:
+ timer = HierarchicalTimer()
+
+ timer.start('IP solve')
+ timer.start('init')
+
+ self._barrier_parameter = 0.1
+
+ self.set_interface(interface)
+
+ t0 = time.time()
+ primals = interface.init_primals().copy()
+ slacks = interface.init_slacks().copy()
+ duals_eq = interface.init_duals_eq().copy()
+ duals_ineq = interface.init_duals_ineq().copy()
+ duals_primals_lb = interface.init_duals_primals_lb().copy()
+ duals_primals_ub = interface.init_duals_primals_ub().copy()
+ duals_slacks_lb = interface.init_duals_slacks_lb().copy()
+ duals_slacks_ub = interface.init_duals_slacks_ub().copy()
+
+ self.process_init(primals, interface.primals_lb(), interface.primals_ub())
+ self.process_init(slacks, interface.ineq_lb(), interface.ineq_ub())
+ self.process_init_duals_lb(duals_primals_lb, self.interface.primals_lb())
+ self.process_init_duals_ub(duals_primals_ub, self.interface.primals_ub())
+ self.process_init_duals_lb(duals_slacks_lb, self.interface.ineq_lb())
+ self.process_init_duals_ub(duals_slacks_ub, self.interface.ineq_ub())
+
+ interface.set_barrier_parameter(self._barrier_parameter)
+
+ alpha_primal_max = 1
+ alpha_dual_max = 1
+
+ self.logger.info('{_iter:<6}'
+ '{objective:<11}'
+ '{primal_inf:<11}'
+ '{dual_inf:<11}'
+ '{compl_inf:<11}'
+ '{barrier:<11}'
+ '{alpha_p:<11}'
+ '{alpha_d:<11}'
+ '{reg:<11}'
+ '{time:<7}'.format(_iter='Iter',
+ objective='Objective',
+ primal_inf='Prim Inf',
+ dual_inf='Dual Inf',
+ compl_inf='Comp Inf',
+ barrier='Barrier',
+ alpha_p='Prim Step',
+ alpha_d='Dual Step',
+ reg='Reg',
+ time='Time'))
+
+ reg_coef = 0
+
+ timer.stop('init')
+ status = InteriorPointStatus.error
+
+ for _iter in range(max_iter):
+ self._iter = _iter
+
+ interface.set_primals(primals)
+ interface.set_slacks(slacks)
+ interface.set_duals_eq(duals_eq)
+ interface.set_duals_ineq(duals_ineq)
+ interface.set_duals_primals_lb(duals_primals_lb)
+ interface.set_duals_primals_ub(duals_primals_ub)
+ interface.set_duals_slacks_lb(duals_slacks_lb)
+ interface.set_duals_slacks_ub(duals_slacks_ub)
+
+ timer.start('convergence check')
+ primal_inf, dual_inf, complimentarity_inf = \
+ self.check_convergence(barrier=0, timer=timer)
+ timer.stop('convergence check')
+ objective = interface.evaluate_objective()
+ self.logger.info('{_iter:<6}'
+ '{objective:<11.2e}'
+ '{primal_inf:<11.2e}'
+ '{dual_inf:<11.2e}'
+ '{compl_inf:<11.2e}'
+ '{barrier:<11.2e}'
+ '{alpha_p:<11.2e}'
+ '{alpha_d:<11.2e}'
+ '{reg:<11.2e}'
+ '{time:<7.3f}'.format(_iter=_iter,
+ objective=objective,
+ primal_inf=primal_inf,
+ dual_inf=dual_inf,
+ compl_inf=complimentarity_inf,
+ barrier=self._barrier_parameter,
+ alpha_p=alpha_primal_max,
+ alpha_d=alpha_dual_max,
+ reg=reg_coef,
+ time=time.time() - t0))
+
+ if max(primal_inf, dual_inf, complimentarity_inf) <= tol:
+ status = InteriorPointStatus.optimal
+ break
+ timer.start('convergence check')
+ primal_inf, dual_inf, complimentarity_inf = \
+ self.check_convergence(barrier=self._barrier_parameter, timer=timer)
+ timer.stop('convergence check')
+ if max(primal_inf, dual_inf, complimentarity_inf) \
+ <= 0.1 * self._barrier_parameter:
+ # This comparison is made with barrier problem infeasibility.
+ # Sometimes have trouble getting dual infeasibility low enough
+ self.update_barrier_parameter()
+
+ interface.set_barrier_parameter(self._barrier_parameter)
+ timer.start('eval')
+ timer.start('eval kkt')
+ kkt = interface.evaluate_primal_dual_kkt_matrix(timer=timer)
+ timer.stop('eval kkt')
+ timer.start('eval rhs')
+ rhs = interface.evaluate_primal_dual_kkt_rhs(timer=timer)
+ timer.stop('eval rhs')
+ timer.stop('eval')
+
+ # Factorize linear system
+ timer.start('factorize')
+ reg_coef = self.factorize(kkt=kkt, timer=timer)
+ timer.stop('factorize')
+
+ timer.start('back solve')
+ with self.linear_solve_context:
+ self.logger.info('Iter: %s' % self._iter)
+ delta = linear_solver.do_back_solve(rhs)
+ timer.stop('back solve')
+
+ interface.set_primal_dual_kkt_solution(delta)
+ timer.start('frac boundary')
+ alpha_primal_max, alpha_dual_max = \
+ self.fraction_to_the_boundary()
+ timer.stop('frac boundary')
+ delta_primals = interface.get_delta_primals()
+ delta_slacks = interface.get_delta_slacks()
+ delta_duals_eq = interface.get_delta_duals_eq()
+ delta_duals_ineq = interface.get_delta_duals_ineq()
+ delta_duals_primals_lb = interface.get_delta_duals_primals_lb()
+ delta_duals_primals_ub = interface.get_delta_duals_primals_ub()
+ delta_duals_slacks_lb = interface.get_delta_duals_slacks_lb()
+ delta_duals_slacks_ub = interface.get_delta_duals_slacks_ub()
+
+ primals += alpha_primal_max * delta_primals
+ slacks += alpha_primal_max * delta_slacks
+ duals_eq += alpha_dual_max * delta_duals_eq
+ duals_ineq += alpha_dual_max * delta_duals_ineq
+ duals_primals_lb += alpha_dual_max * delta_duals_primals_lb
+ duals_primals_ub += alpha_dual_max * delta_duals_primals_ub
+ duals_slacks_lb += alpha_dual_max * delta_duals_slacks_lb
+ duals_slacks_ub += alpha_dual_max * delta_duals_slacks_ub
+
+ timer.stop('IP solve')
+ if report_timing:
+ print(timer)
+ return status
+
+ def factorize(self, kkt, timer=None):
+ desired_n_neg_evals = (self.interface.n_eq_constraints() +
+ self.interface.n_ineq_constraints())
+ reg_iter = 0
+ with self.factorization_context as fact_con:
+ status, num_realloc = try_factorization_and_reallocation(kkt=kkt,
+ linear_solver=self.linear_solver,
+ reallocation_factor=self.reallocation_factor,
+ max_iter=self.max_reallocation_iterations,
+ timer=timer)
+ if status not in {LinearSolverStatus.successful, LinearSolverStatus.singular}:
+ raise RuntimeError('Could not factorize KKT system; linear solver status: ' + str(status))
+
+ if status == LinearSolverStatus.successful:
+ neg_eig = self.linear_solver.get_inertia()[1]
+ else:
+ neg_eig = None
+ fact_con.log_info(_iter=self._iter, reg_iter=reg_iter, num_realloc=num_realloc,
+ coef=0, neg_eig=neg_eig, status=status)
+ reg_iter += 1
+
+ if status == LinearSolverStatus.singular:
+ kkt = self.interface.regularize_equality_gradient(kkt=kkt,
+ coef=self.base_eq_reg_coef * self._barrier_parameter**0.25,
+ copy_kkt=False)
+
+ total_hess_reg_coef = self.hess_reg_coef
+ last_hess_reg_coef = 0
+
+ while neg_eig != desired_n_neg_evals or status == LinearSolverStatus.singular:
+ kkt = self.interface.regularize_hessian(kkt=kkt,
+ coef=total_hess_reg_coef - last_hess_reg_coef,
+ copy_kkt=False)
+ status, num_realloc = try_factorization_and_reallocation(kkt=kkt,
+ linear_solver=self.linear_solver,
+ reallocation_factor=self.reallocation_factor,
+ max_iter=self.max_reallocation_iterations,
+ timer=timer)
+ if status != LinearSolverStatus.successful:
+ raise RuntimeError('Could not factorize KKT system; linear solver status: ' + str(status))
+ neg_eig = self.linear_solver.get_inertia()[1]
+ fact_con.log_info(_iter=self._iter, reg_iter=reg_iter, num_realloc=num_realloc,
+ coef=total_hess_reg_coef, neg_eig=neg_eig, status=status)
+ reg_iter += 1
+ if reg_iter > self.max_reg_iter:
+ raise RuntimeError('Exceeded maximum number of regularization iterations.')
+ last_hess_reg_coef = total_hess_reg_coef
+ total_hess_reg_coef *= self.reg_factor_increase
+
+ return last_hess_reg_coef
+
+ def process_init(self, x, lb, ub):
+ process_init(x, lb, ub)
+
+ def process_init_duals_lb(self, x, lb):
+ process_init_duals_lb(x, lb)
+
+ def process_init_duals_ub(self, x, ub):
+ process_init_duals_ub(x, ub)
+
+ def check_convergence(self, barrier, timer=None):
+ """
+ Parameters
+ ----------
+ barrier: float
+ timer: HierarchicalTimer
+
+ Returns
+ -------
+ primal_inf: float
+ dual_inf: float
+ complimentarity_inf: float
+ """
+ if timer is None:
+ timer = HierarchicalTimer()
+
+ interface = self.interface
+ slacks = interface.get_slacks()
+ timer.start('grad obj')
+ grad_obj = interface.get_obj_factor() * interface.evaluate_grad_objective()
+ timer.stop('grad obj')
+ timer.start('jac eq')
+ jac_eq = interface.evaluate_jacobian_eq()
+ timer.stop('jac eq')
+ timer.start('jac ineq')
+ jac_ineq = interface.evaluate_jacobian_ineq()
+ timer.stop('jac ineq')
+ timer.start('eq cons')
+ eq_resid = interface.evaluate_eq_constraints()
+ timer.stop('eq cons')
+ timer.start('ineq cons')
+ ineq_resid = interface.evaluate_ineq_constraints() - slacks
+ timer.stop('ineq cons')
+ primals = interface.get_primals()
+ duals_eq = interface.get_duals_eq()
+ duals_ineq = interface.get_duals_ineq()
+ duals_primals_lb = interface.get_duals_primals_lb()
+ duals_primals_ub = interface.get_duals_primals_ub()
+ duals_slacks_lb = interface.get_duals_slacks_lb()
+ duals_slacks_ub = interface.get_duals_slacks_ub()
+
+ primals_lb = interface.primals_lb()
+ primals_ub = interface.primals_ub()
+ primals_lb_mod = primals_lb.copy()
+ primals_ub_mod = primals_ub.copy()
+ primals_lb_mod[np.isneginf(primals_lb)] = 0 # these entries get multiplied by 0
+ primals_ub_mod[np.isinf(primals_ub)] = 0 # these entries get multiplied by 0
+
+ ineq_lb = interface.ineq_lb()
+ ineq_ub = interface.ineq_ub()
+ ineq_lb_mod = ineq_lb.copy()
+ ineq_ub_mod = ineq_ub.copy()
+ ineq_lb_mod[np.isneginf(ineq_lb)] = 0 # these entries get multiplied by 0
+ ineq_ub_mod[np.isinf(ineq_ub)] = 0 # these entries get multiplied by 0
+
+ timer.start('grad_lag_primals')
+ grad_lag_primals = grad_obj + jac_eq.transpose() * duals_eq
+ grad_lag_primals += jac_ineq.transpose() * duals_ineq
+ grad_lag_primals -= duals_primals_lb
+ grad_lag_primals += duals_primals_ub
+ timer.stop('grad_lag_primals')
+ timer.start('grad_lag_slacks')
+ grad_lag_slacks = (-duals_ineq -
+ duals_slacks_lb +
+ duals_slacks_ub)
+ timer.stop('grad_lag_slacks')
+ timer.start('bound resids')
+ primals_lb_resid = (primals - primals_lb_mod) * duals_primals_lb - barrier
+ primals_ub_resid = (primals_ub_mod - primals) * duals_primals_ub - barrier
+ primals_lb_resid[np.isneginf(primals_lb)] = 0
+ primals_ub_resid[np.isinf(primals_ub)] = 0
+ slacks_lb_resid = (slacks - ineq_lb_mod) * duals_slacks_lb - barrier
+ slacks_ub_resid = (ineq_ub_mod - slacks) * duals_slacks_ub - barrier
+ slacks_lb_resid[np.isneginf(ineq_lb)] = 0
+ slacks_ub_resid[np.isinf(ineq_ub)] = 0
+ timer.stop('bound resids')
+
+ if eq_resid.size == 0:
+ max_eq_resid = 0
+ else:
+ max_eq_resid = np.max(np.abs(eq_resid))
+ if ineq_resid.size == 0:
+ max_ineq_resid = 0
+ else:
+ max_ineq_resid = np.max(np.abs(ineq_resid))
+ primal_inf = max(max_eq_resid, max_ineq_resid)
+
+ max_grad_lag_primals = np.max(np.abs(grad_lag_primals))
+ if grad_lag_slacks.size == 0:
+ max_grad_lag_slacks = 0
+ else:
+ max_grad_lag_slacks = np.max(np.abs(grad_lag_slacks))
+ dual_inf = max(max_grad_lag_primals, max_grad_lag_slacks)
+
+ if primals_lb_resid.size == 0:
+ max_primals_lb_resid = 0
+ else:
+ max_primals_lb_resid = np.max(np.abs(primals_lb_resid))
+ if primals_ub_resid.size == 0:
+ max_primals_ub_resid = 0
+ else:
+ max_primals_ub_resid = np.max(np.abs(primals_ub_resid))
+ if slacks_lb_resid.size == 0:
+ max_slacks_lb_resid = 0
+ else:
+ max_slacks_lb_resid = np.max(np.abs(slacks_lb_resid))
+ if slacks_ub_resid.size == 0:
+ max_slacks_ub_resid = 0
+ else:
+ max_slacks_ub_resid = np.max(np.abs(slacks_ub_resid))
+ complimentarity_inf = max(max_primals_lb_resid, max_primals_ub_resid,
+ max_slacks_lb_resid, max_slacks_ub_resid)
+
+ return primal_inf, dual_inf, complimentarity_inf
+
+ def fraction_to_the_boundary(self):
+ return fraction_to_the_boundary(self.interface, 1 - self._barrier_parameter)
+
+
+def try_factorization_and_reallocation(kkt, linear_solver, reallocation_factor, max_iter, timer=None):
+ if timer is None:
+ timer = HierarchicalTimer()
+
+ assert max_iter >= 1
+ for count in range(max_iter):
+ timer.start('symbolic')
+ """
+ Performance could be improved significantly by only performing symbolic factorization once.
+ However, we first have to make sure the nonzero structure (and ordering of row and column arrays)
+ of the KKT matrix never changes. We have not had time to test this thoroughly, yet.
+ """
+ res = linear_solver.do_symbolic_factorization(matrix=kkt, raise_on_error=False)
+ timer.stop('symbolic')
+ if res.status == LinearSolverStatus.successful:
+ timer.start('numeric')
+ res = linear_solver.do_numeric_factorization(matrix=kkt, raise_on_error=False)
+ timer.stop('numeric')
+ status = res.status
+ if status == LinearSolverStatus.not_enough_memory:
+ linear_solver.increase_memory_allocation(reallocation_factor)
+ else:
+ break
+ return status, count
+
+
+def _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl):
+ delta_x_mod = delta_x.copy()
+ delta_x_mod[delta_x_mod == 0] = 1
+ alpha = -tau * (x - xl) / delta_x_mod
+ alpha[delta_x >= 0] = np.inf
+ if alpha.size == 0:
+ return 1
+ else:
+ return min(alpha.min(), 1)
+
+
+def _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu):
+ delta_x_mod = delta_x.copy()
+ delta_x_mod[delta_x_mod == 0] = 1
+ alpha = tau * (xu - x) / delta_x_mod
+ alpha[delta_x <= 0] = np.inf
+ if alpha.size == 0:
+ return 1
+ else:
+ return min(alpha.min(), 1)
+
+
+def fraction_to_the_boundary(interface, tau):
+ """
+ Parameters
+ ----------
+ interface: pyomo.contrib.interior_point.interface.BaseInteriorPointInterface
+ tau: float
+
+ Returns
+ -------
+ alpha_primal_max: float
+ alpha_dual_max: float
+ """
+ primals = interface.get_primals()
+ slacks = interface.get_slacks()
+ duals_primals_lb = interface.get_duals_primals_lb()
+ duals_primals_ub = interface.get_duals_primals_ub()
+ duals_slacks_lb = interface.get_duals_slacks_lb()
+ duals_slacks_ub = interface.get_duals_slacks_ub()
+
+ delta_primals = interface.get_delta_primals()
+ delta_slacks = interface.get_delta_slacks()
+ delta_duals_primals_lb = interface.get_delta_duals_primals_lb()
+ delta_duals_primals_ub = interface.get_delta_duals_primals_ub()
+ delta_duals_slacks_lb = interface.get_delta_duals_slacks_lb()
+ delta_duals_slacks_ub = interface.get_delta_duals_slacks_ub()
+
+ primals_lb = interface.primals_lb()
+ primals_ub = interface.primals_ub()
+ ineq_lb = interface.ineq_lb()
+ ineq_ub = interface.ineq_ub()
+
+ alpha_primal_max_a = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=primals,
+ delta_x=delta_primals,
+ xl=primals_lb)
+ alpha_primal_max_b = _fraction_to_the_boundary_helper_ub(
+ tau=tau,
+ x=primals,
+ delta_x=delta_primals,
+ xu=primals_ub)
+ alpha_primal_max_c = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=slacks,
+ delta_x=delta_slacks,
+ xl=ineq_lb)
+ alpha_primal_max_d = _fraction_to_the_boundary_helper_ub(
+ tau=tau,
+ x=slacks,
+ delta_x=delta_slacks,
+ xu=ineq_ub)
+ alpha_primal_max = min(alpha_primal_max_a, alpha_primal_max_b,
+ alpha_primal_max_c, alpha_primal_max_d)
+
+ alpha_dual_max_a = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=duals_primals_lb,
+ delta_x=delta_duals_primals_lb,
+ xl=np.zeros(duals_primals_lb.size))
+ alpha_dual_max_b = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=duals_primals_ub,
+ delta_x=delta_duals_primals_ub,
+ xl=np.zeros(duals_primals_ub.size))
+ alpha_dual_max_c = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=duals_slacks_lb,
+ delta_x=delta_duals_slacks_lb,
+ xl=np.zeros(duals_slacks_lb.size))
+ alpha_dual_max_d = _fraction_to_the_boundary_helper_lb(
+ tau=tau,
+ x=duals_slacks_ub,
+ delta_x=delta_duals_slacks_ub,
+ xl=np.zeros(duals_slacks_ub.size))
+ alpha_dual_max = min(alpha_dual_max_a, alpha_dual_max_b,
+ alpha_dual_max_c, alpha_dual_max_d)
+
+ return alpha_primal_max, alpha_dual_max
+
+
+def process_init(x, lb, ub):
+ if np.any((ub - lb) < 0):
+ raise ValueError(
+ 'Lower bounds for variables/inequalities should not be larger than upper bounds.')
+ if np.any((ub - lb) == 0):
+ raise ValueError(
+ 'Variables and inequalities should not have equal lower and upper bounds.')
+
+ lb_mask = build_bounds_mask(lb)
+ ub_mask = build_bounds_mask(ub)
+
+ lb_only = np.logical_and(lb_mask, np.logical_not(ub_mask))
+ ub_only = np.logical_and(ub_mask, np.logical_not(lb_mask))
+ lb_and_ub = np.logical_and(lb_mask, ub_mask)
+ out_of_bounds = ((x >= ub) + (x <= lb))
+ out_of_bounds_lb_only = np.logical_and(out_of_bounds, lb_only)
+ out_of_bounds_ub_only = np.logical_and(out_of_bounds, ub_only)
+ out_of_bounds_lb_and_ub = np.logical_and(out_of_bounds, lb_and_ub)
+
+ cm = build_compression_matrix(out_of_bounds_lb_only)
+ x[out_of_bounds_lb_only] = cm * (lb + 1)
+
+ cm = build_compression_matrix(out_of_bounds_ub_only)
+ x[out_of_bounds_ub_only] = cm * (ub - 1)
+
+ del cm
+ cm1 = build_compression_matrix(lb_and_ub)
+ cm2 = build_compression_matrix(out_of_bounds_lb_and_ub)
+ x[out_of_bounds_lb_and_ub] = cm2 * (0.5 * cm1.transpose() * (cm1 * lb + cm1 * ub))
+
+
+def process_init_duals_lb(x, lb):
+ x[x <= 0] = 1
+ x[np.isneginf(lb)] = 0
+
+
+def process_init_duals_ub(x, ub):
+ x[x <= 0] = 1
+ x[np.isinf(ub)] = 0
diff --git a/pyomo/contrib/interior_point/inverse_reduced_hessian.py b/pyomo/contrib/interior_point/inverse_reduced_hessian.py
new file mode 100644
index 00000000000..e677254a2ca
--- /dev/null
+++ b/pyomo/contrib/interior_point/inverse_reduced_hessian.py
@@ -0,0 +1,124 @@
+import pyomo.environ as pyo
+from pyomo.opt import check_optimal_termination
+from pyomo.common.dependencies import attempt_import
+from .interface import InteriorPointInterface
+from .linalg.scipy_interface import ScipyInterface
+
+np, numpy_available = attempt_import('numpy', 'Interior point requires numpy', minimum_version='1.13.0')
+
+
+# Todo: This function currently used IPOPT for the initial solve - should accept solver
+def inv_reduced_hessian_barrier(model, independent_variables, bound_tolerance=1e-6, tee=False):
+ """
+ This function computes the inverse of the reduced Hessian of a problem at the
+ solution. This function first solves the problem with Ipopt and then generates
+ the KKT system for the barrier subproblem to compute the inverse reduced hessian.
+
+ For more information on the reduced Hessian, see "Numerical Optimization", 2nd Edition
+ Nocedal and Wright, 2006.
+
+ The approach used in this method can be found in, "Computational Strategies for
+ the Optimal Operation of Large-Scale Chemical Processes", Dissertation, V. Zavala
+ 2008. See section 3.2.1.
+
+ Parameters
+ ----------
+ model : Pyomo model
+ The Pyomo model that we want to solve and analyze
+ independent_variables : list of Pyomo variables
+ This is the list of independent variables for computing the reduced hessian.
+ These variables must not be at their bounds at the solution of the
+ optimization problem.
+ bound_tolerance : float
+ The tolerance to use when checking if the variables are too close to their bound.
+ If they are too close, then the routine will exit without a reduced hessian.
+ tee : bool
+ This flag is sent to the tee option of the solver. If true, then the solver
+ log is output to the console.
+ """
+ m = model
+
+ # make sure the necessary suffixes are added
+ # so the reduced hessian kkt system is setup correctly from
+ # the ipopt solution
+ if not hasattr(m, 'ipopt_zL_out'):
+ m.ipopt_zL_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
+ if not hasattr(m, 'ipopt_zU_out'):
+ m.ipopt_zU_out = pyo.Suffix(direction=pyo.Suffix.IMPORT)
+ if not hasattr(m, 'ipopt_zL_in'):
+ m.ipopt_zL_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
+ if not hasattr(m, 'ipopt_zU_in'):
+ m.ipopt_zU_in = pyo.Suffix(direction=pyo.Suffix.EXPORT)
+ if not hasattr(m, 'dual'):
+ m.dual = pyo.Suffix(direction=pyo.Suffix.IMPORT_EXPORT)
+
+ # create the ipopt solver
+ solver = pyo.SolverFactory('ipopt')
+ # set options to prevent bounds relaxation (and 0 slacks)
+ solver.options['bound_relax_factor']=0
+ solver.options['honor_original_bounds']='no'
+ # solve the problem
+ status = solver.solve(m, tee=tee)
+ if not check_optimal_termination(status):
+ return status, None
+
+ # compute the barrier parameter
+ # ToDo: this needs to eventually come from the solver itself
+ estimated_mu = list()
+ for v in m.ipopt_zL_out:
+ if v.has_lb():
+ estimated_mu.append((pyo.value(v) - v.lb)*m.ipopt_zL_out[v])
+ for v in m.ipopt_zU_out:
+ if v.has_ub():
+ estimated_mu.append((v.ub - pyo.value(v))*m.ipopt_zU_out[v])
+ if len(estimated_mu) == 0:
+ mu = 10**-8.6
+ else:
+ mu = sum(estimated_mu)/len(estimated_mu)
+ # check to make sure these estimates were all reasonable
+ if any([abs(mu-estmu) > 1e-7 for estmu in estimated_mu]):
+ print('Warning: estimated values of mu do not seem consistent - using mu=10^(-8.6)')
+ mu = 10**-8.6
+
+ # collect the list of var data objects for the independent variables
+ ind_vardatas = list()
+ for v in independent_variables:
+ if v.is_indexed():
+ for k in v:
+ ind_vardatas.append(v[k])
+ else:
+ ind_vardatas.append(v)
+
+ # check that none of the independent variables are at their bounds
+ for v in ind_vardatas:
+ if (v.has_lb() and pyo.value(v) - v.lb <= bound_tolerance) or \
+ (v.has_ub() and v.ub - pyo.value(b) <= bound_tolerance):
+ raise ValueError("Independent variable: {} has a solution value that is near"
+ " its bound (according to tolerance). The reduced hessian"
+ " computation does not support this at this time. All"
+ " independent variables should be in their interior.".format(v))
+
+ # find the list of indices that we need to make up the reduced hessian
+ kkt_builder = InteriorPointInterface(m)
+ pyomo_nlp = kkt_builder.pyomo_nlp()
+ ind_var_indices = pyomo_nlp.get_primal_indices(ind_vardatas)
+
+ # setup the computation of the reduced hessian
+ kkt_builder.set_barrier_parameter(mu)
+ kkt = kkt_builder.evaluate_primal_dual_kkt_matrix()
+ linear_solver = ScipyInterface(compute_inertia=False)
+ linear_solver.do_symbolic_factorization(kkt)
+ linear_solver.do_numeric_factorization(kkt)
+
+ n_rh = len(ind_var_indices)
+ rhs = np.zeros(kkt.shape[0])
+ inv_red_hess = np.zeros((n_rh, n_rh))
+
+ for rhi, vari in enumerate(ind_var_indices):
+ rhs[vari] = 1
+ v = linear_solver.do_back_solve(rhs)
+ rhs[vari] = 0
+ for rhj, varj in enumerate(ind_var_indices):
+ inv_red_hess[rhi,rhj] = v[varj]
+
+ return status, inv_red_hess
diff --git a/pyomo/contrib/interior_point/linalg/__init__.py b/pyomo/contrib/interior_point/linalg/__init__.py
new file mode 100644
index 00000000000..7889ad25a78
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/__init__.py
@@ -0,0 +1,4 @@
+from .results import LinearSolverStatus
+from .scipy_interface import ScipyInterface
+from .mumps_interface import MumpsInterface
+from .ma27_interface import InteriorPointMA27Interface
diff --git a/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py b/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py
new file mode 100644
index 00000000000..b776d93a98d
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/base_linear_solver_interface.py
@@ -0,0 +1,33 @@
+from abc import ABCMeta, abstractmethod
+import six
+import logging
+
+
+class LinearSolverInterface(six.with_metaclass(ABCMeta, object)):
+ @classmethod
+ def getLoggerName(cls):
+ return 'linear_solver'
+
+ @classmethod
+ def getLogger(cls):
+ name = 'interior_point.' + cls.getLoggerName()
+ return logging.getLogger(name)
+
+ @abstractmethod
+ def do_symbolic_factorization(self, matrix, raise_on_error=True):
+ pass
+
+ @abstractmethod
+ def do_numeric_factorization(self, matrix, raise_on_error=True):
+ pass
+
+ def increase_memory_allocation(self, factor):
+ raise NotImplementedError('Should be implemented by base class.')
+
+ @abstractmethod
+ def do_back_solve(self, rhs):
+ pass
+
+ @abstractmethod
+ def get_inertia(self):
+ pass
diff --git a/pyomo/contrib/interior_point/linalg/ma27_interface.py b/pyomo/contrib/interior_point/linalg/ma27_interface.py
new file mode 100644
index 00000000000..78da74312f6
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/ma27_interface.py
@@ -0,0 +1,125 @@
+from .base_linear_solver_interface import LinearSolverInterface
+from .results import LinearSolverStatus, LinearSolverResults
+from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface
+from scipy.sparse import isspmatrix_coo, tril
+from pyomo.contrib.pynumero.sparse import BlockVector
+
+
+class InteriorPointMA27Interface(LinearSolverInterface):
+ @classmethod
+ def getLoggerName(cls):
+ return 'ma27'
+
+ def __init__(self, cntl_options=None, icntl_options=None, iw_factor=1.2, a_factor=2):
+ self._ma27 = MA27Interface(iw_factor=iw_factor, a_factor=a_factor)
+
+ if cntl_options is None:
+ cntl_options = dict()
+ if icntl_options is None:
+ icntl_options = dict()
+
+ for k, v in cntl_options.items():
+ self.set_cntl(k, v)
+ for k, v in icntl_options.items():
+ self.set_icntl(k, v)
+
+ self._dim = None
+ self._num_status = None
+
+ def do_symbolic_factorization(self, matrix, raise_on_error=True):
+ self._num_status = None
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ matrix = tril(matrix)
+ nrows, ncols = matrix.shape
+ if nrows != ncols:
+ raise ValueError('Matrix must be square')
+ self._dim = nrows
+
+ stat = self._ma27.do_symbolic_factorization(dim=self._dim, irn=matrix.row, icn=matrix.col)
+ res = LinearSolverResults()
+ if stat == 0:
+ res.status = LinearSolverStatus.successful
+ else:
+ if raise_on_error:
+ raise RuntimeError('Symbolic factorization was not successful; return code: ' + str(stat))
+ if stat in {-3, -4}:
+ res.status = LinearSolverStatus.not_enough_memory
+ elif stat in {-5, 3}:
+ res.status = LinearSolverStatus.singular
+ else:
+ res.status = LinearSolverStatus.error
+ return res
+
+ def do_numeric_factorization(self, matrix, raise_on_error=True):
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ matrix = tril(matrix)
+ nrows, ncols = matrix.shape
+ if nrows != ncols:
+ raise ValueError('Matrix must be square')
+ if nrows != self._dim:
+ raise ValueError('Matrix dimensions do not match the dimensions of '
+ 'the matrix used for symbolic factorization')
+
+ stat = self._ma27.do_numeric_factorization(irn=matrix.row, icn=matrix.col, dim=self._dim, entries=matrix.data)
+ res = LinearSolverResults()
+ if stat == 0:
+ res.status = LinearSolverStatus.successful
+ else:
+ if raise_on_error:
+ raise RuntimeError('Numeric factorization was not successful; return code: ' + str(stat))
+ if stat in {-3, -4}:
+ res.status = LinearSolverStatus.not_enough_memory
+ elif stat in {-5, 3}:
+ res.status = LinearSolverStatus.singular
+ else:
+ res.status = LinearSolverStatus.error
+
+ self._num_status = res.status
+
+ return res
+
+ def increase_memory_allocation(self, factor):
+ self._ma27.iw_factor *= factor
+ self._ma27.a_factor *= factor
+
+ def do_back_solve(self, rhs):
+ if isinstance(rhs, BlockVector):
+ _rhs = rhs.flatten()
+ result = _rhs
+ else:
+ result = rhs.copy()
+
+ result = self._ma27.do_backsolve(result)
+
+ if isinstance(rhs, BlockVector):
+ _result = rhs.copy_structure()
+ _result.copyfrom(result)
+ result = _result
+
+ return result
+
+ def get_inertia(self):
+ if self._num_status is None:
+ raise RuntimeError('Must call do_numeric_factorization before inertia can be computed')
+ if self._num_status != LinearSolverStatus.successful:
+ raise RuntimeError('Can only compute inertia if the numeric factorization was successful.')
+ num_negative_eigenvalues = self.get_info(15)
+ num_positive_eigenvalues = self._dim - num_negative_eigenvalues
+ return (num_positive_eigenvalues, num_negative_eigenvalues, 0)
+
+ def set_icntl(self, key, value):
+ self._ma27.set_icntl(key, value)
+
+ def set_cntl(self, key, value):
+ self._ma27.set_cntl(key, value)
+
+ def get_icntl(self, key):
+ return self._ma27.get_icntl(key)
+
+ def get_cntl(self, key):
+ return self._ma27.get_cntl(key)
+
+ def get_info(self, key):
+ return self._ma27.get_info(key)
diff --git a/pyomo/contrib/interior_point/linalg/mumps_interface.py b/pyomo/contrib/interior_point/linalg/mumps_interface.py
new file mode 100644
index 00000000000..4e977673c4c
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/mumps_interface.py
@@ -0,0 +1,219 @@
+from .base_linear_solver_interface import LinearSolverInterface
+from .results import LinearSolverStatus, LinearSolverResults
+from pyomo.common.dependencies import attempt_import
+from scipy.sparse import isspmatrix_coo, tril
+from collections import OrderedDict
+import logging
+mumps, mumps_available = attempt_import(name='pyomo.contrib.pynumero.linalg.mumps_interface',
+ error_message='pymumps is required to use the MumpsInterface')
+
+
+class MumpsInterface(LinearSolverInterface):
+
+ @classmethod
+ def getLoggerName(cls):
+ return 'mumps'
+
+ def __init__(self, par=1, comm=None, cntl_options=None, icntl_options=None):
+ self._mumps = mumps.MumpsCentralizedAssembledLinearSolver(sym=2,
+ par=par,
+ comm=comm)
+
+ if cntl_options is None:
+ cntl_options = dict()
+ if icntl_options is None:
+ icntl_options = dict()
+
+ # These options are set in order to get the correct inertia.
+ if 13 not in icntl_options:
+ icntl_options[13] = 1
+ if 24 not in icntl_options:
+ icntl_options[24] = 0
+
+ for k, v in cntl_options.items():
+ self.set_cntl(k, v)
+ for k, v in icntl_options.items():
+ self.set_icntl(k, v)
+
+ self.error_level = self.get_icntl(11)
+ self.log_error = bool(self.error_level)
+ self._dim = None
+ self.logger = self.getLogger()
+ self.log_header(include_error=self.log_error)
+ self._prev_allocation = None
+
+ def do_symbolic_factorization(self, matrix, raise_on_error=True):
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ matrix = tril(matrix)
+ nrows, ncols = matrix.shape
+ self._dim = nrows
+
+ try:
+ self._mumps.do_symbolic_factorization(matrix)
+ self._prev_allocation = self.get_infog(16)
+ except RuntimeError as err:
+ if raise_on_error:
+ raise err
+
+ stat = self.get_infog(1)
+ res = LinearSolverResults()
+ if stat == 0:
+ res.status = LinearSolverStatus.successful
+ elif stat in {-6, -10}:
+ res.status = LinearSolverStatus.singular
+ elif stat < 0:
+ res.status = LinearSolverStatus.error
+ else:
+ res.status = LinearSolverStatus.warning
+ return res
+
+ def do_numeric_factorization(self, matrix, raise_on_error=True):
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ matrix = tril(matrix)
+ try:
+ self._mumps.do_numeric_factorization(matrix)
+ except RuntimeError as err:
+ if raise_on_error:
+ raise err
+
+ stat = self.get_infog(1)
+ res = LinearSolverResults()
+ if stat == 0:
+ res.status = LinearSolverStatus.successful
+ elif stat in {-6, -10}:
+ res.status = LinearSolverStatus.singular
+ elif stat in {-8, -9}:
+ res.status = LinearSolverStatus.not_enough_memory
+ elif stat < 0:
+ res.status = LinearSolverStatus.error
+ else:
+ res.status = LinearSolverStatus.warning
+ return res
+
+ def increase_memory_allocation(self, factor):
+ # info(16) is rounded to the nearest MB, so it could be zero
+ if self._prev_allocation == 0:
+ new_allocation = 1
+ else:
+ new_allocation = factor*self._prev_allocation
+ # Here I set the memory allocation directly instead of increasing
+ # the "percent-increase-from-predicted" parameter ICNTL(14)
+ self.set_icntl(23, new_allocation)
+ self._prev_allocation = new_allocation
+ return new_allocation
+
+ def do_back_solve(self, rhs):
+ res = self._mumps.do_back_solve(rhs)
+ self.log_info()
+ return res
+
+ def get_inertia(self):
+ num_negative_eigenvalues = self.get_infog(12)
+ num_zero_eigenvalues = self.get_infog(28)
+ num_positive_eigenvalues = self._dim - num_negative_eigenvalues - num_zero_eigenvalues
+ return num_positive_eigenvalues, num_negative_eigenvalues, num_zero_eigenvalues
+
+ def get_error_info(self):
+ # Access error level contained in ICNTL(11) (Fortran indexing).
+ # Assuming this value has not changed since the solve was performed.
+ error_level = self.get_icntl(11)
+ info = OrderedDict()
+ if error_level == 0:
+ return info
+ elif error_level == 1:
+ info['||A||'] = self.get_rinfog(4)
+ info['||x||'] = self.get_rinfog(5)
+ info['Max resid'] = self.get_rinfog(6)
+ info['Max error'] = self.get_rinfog(9)
+ return info
+ elif error_level == 2:
+ info['||A||'] = self.get_rinfog(4)
+ info['||x||'] = self.get_rinfog(5)
+ info['Max resid'] = self.get_rinfog(6)
+ return info
+
+ def set_icntl(self, key, value):
+ if key == 13:
+ if value <= 0:
+ raise ValueError(
+ 'ICNTL(13) must be positive for the MumpsInterface.')
+ elif key == 24:
+ if value != 0:
+ raise ValueError(
+ 'ICNTL(24) must be 0 for the MumpsInterface.')
+ self._mumps.set_icntl(key, value)
+
+ def set_cntl(self, key, value):
+ self._mumps.set_cntl(key, value)
+
+ def get_icntl(self, key):
+ return self._mumps.get_icntl(key)
+
+ def get_cntl(self, key):
+ return self._mumps.get_cntl(key)
+
+ def get_info(self, key):
+ return self._mumps.get_info(key)
+
+ def get_infog(self, key):
+ return self._mumps.get_infog(key)
+
+ def get_rinfo(self, key):
+ return self._mumps.get_rinfo(key)
+
+ def get_rinfog(self, key):
+ return self._mumps.get_rinfog(key)
+
+ def log_header(self, include_error=True, extra_fields=None):
+ if extra_fields is None:
+ extra_fields = list()
+ header_fields = []
+ header_fields.append('Status')
+ header_fields.append('n_null')
+ header_fields.append('n_neg')
+
+ if include_error:
+ header_fields.extend(self.get_error_info().keys())
+
+ header_fields.extend(extra_fields)
+
+ # Allocate 10 spaces for integer values
+ header_string = '{0:<10}'
+ header_string += '{1:<10}'
+ header_string += '{2:<10}'
+
+ # Allocate 15 spaces for the rest, which I assume are floats
+ for i in range(4, len(header_fields)):
+ header_string += '{' + str(i) + ':<15}'
+
+ self.logger.info(header_string.format(*header_fields))
+
+ def log_info(self):
+ # Which fields to log should be specified at the instance level
+ # Any logging that should be done on an iteration-specific case
+ # should be handled by the IP solver
+ fields=[]
+ fields.append(self.get_infog(1)) # Status, 0 for success
+ fields.append(self.get_infog(28)) # Number of null pivots
+ fields.append(self.get_infog(12)) # Number of negative pivots
+
+ include_error = self.log_error
+ if include_error:
+ fields.extend(self.get_error_info().values())
+
+ extra_fields = []
+ fields.extend(extra_fields)
+
+ # Allocate 10 spaces for integer values
+ log_string = '{0:<10}'
+ log_string += '{1:<10}'
+ log_string += '{2:<10}'
+
+ # Allocate 15 spsaces for the rest, which I assume are floats
+ for i in range(4, len(fields)):
+ log_string += '{' + str(i) + ':<15.3e}'
+
+ self.logger.info(log_string.format(*fields))
+
diff --git a/pyomo/contrib/interior_point/linalg/results.py b/pyomo/contrib/interior_point/linalg/results.py
new file mode 100644
index 00000000000..6cf67f1b945
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/results.py
@@ -0,0 +1,14 @@
+import enum
+
+
+class LinearSolverStatus(enum.Enum):
+ successful = 0
+ not_enough_memory = 1
+ singular = 2
+ error = 3
+ warning = 4
+
+
+class LinearSolverResults(object):
+ def __init__(self):
+ self.status = None
diff --git a/pyomo/contrib/interior_point/linalg/scipy_interface.py b/pyomo/contrib/interior_point/linalg/scipy_interface.py
new file mode 100644
index 00000000000..442452f037b
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/scipy_interface.py
@@ -0,0 +1,67 @@
+from .base_linear_solver_interface import LinearSolverInterface
+from .results import LinearSolverStatus, LinearSolverResults
+from scipy.sparse.linalg import splu
+from scipy.linalg import eigvals
+from scipy.sparse import isspmatrix_csc
+from pyomo.contrib.pynumero.sparse.block_vector import BlockVector
+import logging
+import numpy as np
+
+
+class ScipyInterface(LinearSolverInterface):
+ def __init__(self, compute_inertia=False):
+ self._lu = None
+ self._inertia = None
+ self.compute_inertia = compute_inertia
+
+ self.logger = logging.getLogger('scipy')
+ self.logger.propagate = False
+
+ def do_symbolic_factorization(self, matrix, raise_on_error=True):
+ res = LinearSolverResults()
+ res.status = LinearSolverStatus.successful
+ return res
+
+ def do_numeric_factorization(self, matrix, raise_on_error=True):
+ if not isspmatrix_csc(matrix):
+ matrix = matrix.tocsc()
+ res = LinearSolverResults()
+ try:
+ self._lu = splu(matrix)
+ res.status = LinearSolverStatus.successful
+ except RuntimeError as err:
+ if raise_on_error:
+ raise err
+ if 'Factor is exactly singular' in str(err):
+ res.status = LinearSolverStatus.singular
+ else:
+ res.status = LinearSolverStatus.error
+
+ if self.compute_inertia:
+ eig = eigvals(matrix.toarray())
+ pos_eig = np.count_nonzero((eig > 0))
+ neg_eigh = np.count_nonzero((eig < 0))
+ zero_eig = np.count_nonzero(eig == 0)
+ self._inertia = (pos_eig, neg_eigh, zero_eig)
+
+ return res
+
+ def do_back_solve(self, rhs):
+ if isinstance(rhs, BlockVector):
+ _rhs = rhs.flatten()
+ else:
+ _rhs = rhs
+
+ result = self._lu.solve(_rhs)
+
+ if isinstance(rhs, BlockVector):
+ _result = rhs.copy_structure()
+ _result.copyfrom(result)
+ result = _result
+
+ return result
+
+ def get_inertia(self):
+ if self._inertia is None:
+ raise RuntimeError('The intertia was not computed during do_numeric_factorization. Set compute_inertia to True.')
+ return self._inertia
diff --git a/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py
new file mode 100644
index 00000000000..94a11cec1a3
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/tests/test_linear_solvers.py
@@ -0,0 +1,120 @@
+import pyutilib.th as unittest
+from pyomo.common.dependencies import attempt_import
+np, np_available = attempt_import('numpy', minimum_version='1.13.0')
+scipy, scipy_available = attempt_import('scipy.sparse')
+mumps, mumps_available = attempt_import('mumps')
+if not np_available or not scipy_available:
+ raise unittest.SkipTest('numpy and scipy are needed for interior point tests')
+import numpy as np
+from scipy.sparse import coo_matrix, tril
+from pyomo.contrib import interior_point as ip
+from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface
+ma27_available = MA27Interface.available()
+
+
+def get_base_matrix(use_tril):
+ if use_tril:
+ row = [0, 1, 1, 2, 2]
+ col = [0, 0, 1, 0, 2]
+ data = [1, 7, 4, 3, 6]
+ else:
+ row = [0, 0, 0, 1, 1, 2, 2]
+ col = [0, 1, 2, 0, 1, 0, 2]
+ data = [1, 7, 3, 7, 4, 3, 6]
+ mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double)
+ return mat
+
+
+def get_base_matrix_wrong_order(use_tril):
+ if use_tril:
+ row = [1, 0, 1, 2, 2]
+ col = [0, 0, 1, 0, 2]
+ data = [7, 1, 4, 3, 6]
+ else:
+ row = [1, 0, 0, 0, 1, 2, 2]
+ col = [0, 1, 2, 0, 1, 0, 2]
+ data = [7, 7, 3, 1, 4, 3, 6]
+ mat = coo_matrix((data, (row, col)), shape=(3,3), dtype=np.double)
+ return mat
+
+
+class TestTrilBehavior(unittest.TestCase):
+ """
+ Some of the other tests in this file depend on
+ the behavior of tril that is tested in this
+ test, namely the tests in TestWrongNonzeroOrdering.
+ """
+ def test_tril_behavior(self):
+ mat = get_base_matrix(use_tril=True)
+ mat2 = tril(mat)
+ self.assertTrue(np.all(mat.row == mat2.row))
+ self.assertTrue(np.all(mat.col == mat2.col))
+ self.assertTrue(np.allclose(mat.data, mat2.data))
+
+ mat = get_base_matrix_wrong_order(use_tril=True)
+ self.assertFalse(np.all(mat.row == mat2.row))
+ self.assertFalse(np.allclose(mat.data, mat2.data))
+ mat2 = tril(mat)
+ self.assertTrue(np.all(mat.row == mat2.row))
+ self.assertTrue(np.all(mat.col == mat2.col))
+ self.assertTrue(np.allclose(mat.data, mat2.data))
+
+
+class TestLinearSolvers(unittest.TestCase):
+ def _test_linear_solvers(self, solver):
+ mat = get_base_matrix(use_tril=False)
+ zero_mat = mat.copy()
+ zero_mat.data.fill(0)
+ stat = solver.do_symbolic_factorization(zero_mat)
+ self.assertEqual(stat.status, ip.linalg.LinearSolverStatus.successful)
+ stat = solver.do_numeric_factorization(mat)
+ self.assertEqual(stat.status, ip.linalg.LinearSolverStatus.successful)
+ x_true = np.array([1, 2, 3], dtype=np.double)
+ rhs = mat * x_true
+ x = solver.do_back_solve(rhs)
+ self.assertTrue(np.allclose(x, x_true))
+ x_true = np.array([4, 2, 3], dtype=np.double)
+ rhs = mat * x_true
+ x = solver.do_back_solve(rhs)
+ self.assertTrue(np.allclose(x, x_true))
+
+ def test_scipy(self):
+ solver = ip.linalg.ScipyInterface()
+ self._test_linear_solvers(solver)
+
+ @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests')
+ def test_mumps(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_linear_solvers(solver)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is needed for interior point MA27 tests')
+ def test_ma27(self):
+ solver = ip.linalg.InteriorPointMA27Interface()
+ self._test_linear_solvers(solver)
+
+
+@unittest.skip('This does not work yet')
+class TestWrongNonzeroOrdering(unittest.TestCase):
+ def _test_solvers(self, solver, use_tril):
+ mat = get_base_matrix(use_tril=use_tril)
+ wrong_order_mat = get_base_matrix_wrong_order(use_tril=use_tril)
+ stat = solver.do_symbolic_factorization(mat)
+ stat = solver.do_numeric_factorization(wrong_order_mat)
+ x_true = np.array([1, 2, 3], dtype=np.double)
+ rhs = mat * x_true
+ x = solver.do_back_solve(rhs)
+ self.assertTrue(np.allclose(x, x_true))
+
+ def test_scipy(self):
+ solver = ip.linalg.ScipyInterface()
+ self._test_solvers(solver, use_tril=False)
+
+ @unittest.skipIf(not mumps_available, 'mumps is needed for interior point mumps tests')
+ def test_mumps(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_solvers(solver, use_tril=True)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is needed for interior point MA27 tests')
+ def test_ma27(self):
+ solver = ip.linalg.InteriorPointMA27Interface()
+ self._test_solvers(solver, use_tril=True)
diff --git a/pyomo/contrib/interior_point/linalg/tests/test_realloc.py b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py
new file mode 100644
index 00000000000..6203985dc12
--- /dev/null
+++ b/pyomo/contrib/interior_point/linalg/tests/test_realloc.py
@@ -0,0 +1,67 @@
+import pyutilib.th as unittest
+from pyomo.common.dependencies import attempt_import
+np, numpy_available = attempt_import('numpy', 'Interior point requires numpy',
+ minimum_version='1.13.0')
+scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy')
+mumps, mumps_available = attempt_import('mumps')
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest('Interior point tests require numpy and scipy')
+from scipy.sparse import coo_matrix
+import pyomo.contrib.interior_point as ip
+
+
+class TestReallocation(unittest.TestCase):
+ @unittest.skipIf(not mumps_available, 'mumps is not available')
+ def test_reallocate_memory_mumps(self):
+
+ # Create a tri-diagonal matrix with small entries on the diagonal
+ n = 10000
+ small_val = 1e-7
+ big_val = 1e2
+ irn = []
+ jcn = []
+ ent = []
+ for i in range(n-1):
+ irn.extend([i+1, i, i])
+ jcn.extend([i, i, i+1])
+ ent.extend([big_val,small_val,big_val])
+ irn.append(n-1)
+ jcn.append(n-1)
+ ent.append(small_val)
+ irn = np.array(irn)
+ jcn = np.array(jcn)
+ ent = np.array(ent)
+
+ matrix = coo_matrix((ent, (irn, jcn)), shape=(n,n))
+
+ linear_solver = ip.linalg.MumpsInterface()
+ linear_solver.do_symbolic_factorization(matrix)
+
+ predicted = linear_solver.get_infog(16)
+
+ res = linear_solver.do_numeric_factorization(matrix, raise_on_error=False)
+ self.assertEqual(res.status, ip.linalg.LinearSolverStatus.not_enough_memory)
+
+ linear_solver.do_symbolic_factorization(matrix)
+
+ factor = 2
+ linear_solver.increase_memory_allocation(factor)
+
+ res = linear_solver.do_numeric_factorization(matrix)
+ self.assertEqual(res.status, ip.linalg.LinearSolverStatus.successful)
+
+ # Expected memory allocation (MB)
+ self.assertEqual(linear_solver._prev_allocation, 6)
+
+ actual = linear_solver.get_infog(18)
+
+ # Sanity checks:
+ # Make sure actual memory usage is greater than initial guess
+ self.assertTrue(predicted < actual)
+ # Make sure memory allocation is at least as much as was used
+ self.assertTrue(actual <= linear_solver._prev_allocation)
+
+
+if __name__ == '__main__':
+ test_realloc = TestReallocation()
+ test_realloc.test_reallocate_memory_mumps()
diff --git a/pyomo/contrib/interior_point/tests/__init__.py b/pyomo/contrib/interior_point/tests/__init__.py
new file mode 100644
index 00000000000..e69de29bb2d
diff --git a/pyomo/contrib/interior_point/tests/test_interior_point.py b/pyomo/contrib/interior_point/tests/test_interior_point.py
new file mode 100644
index 00000000000..b3328d1529b
--- /dev/null
+++ b/pyomo/contrib/interior_point/tests/test_interior_point.py
@@ -0,0 +1,197 @@
+import pyutilib.th as unittest
+import pyomo.environ as pe
+from pyomo.common.dependencies import attempt_import
+
+np, numpy_availalbe = attempt_import('numpy', 'Interior point requires numpy', minimum_version='1.13.0')
+scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy')
+mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps')
+if not (numpy_availalbe and scipy_available):
+ raise unittest.SkipTest('Interior point tests require numpy and scipy')
+
+import numpy as np
+
+from pyomo.contrib.pynumero.asl import AmplInterface
+asl_available = AmplInterface.available()
+import pyomo.contrib.interior_point as ip
+from pyomo.contrib.interior_point.interior_point import (process_init,
+ process_init_duals_lb,
+ process_init_duals_ub,
+ _fraction_to_the_boundary_helper_lb,
+ _fraction_to_the_boundary_helper_ub)
+from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface
+ma27_available = MA27Interface.available()
+
+
+@unittest.skipIf(not asl_available, 'asl is not available')
+class TestSolveInteriorPoint(unittest.TestCase):
+ def _test_solve_interior_point_1(self, linear_solver):
+ m = pe.ConcreteModel()
+ m.x = pe.Var()
+ m.y = pe.Var()
+ m.obj = pe.Objective(expr=m.x**2 + m.y**2)
+ m.c1 = pe.Constraint(expr=m.y == pe.exp(m.x))
+ m.c2 = pe.Constraint(expr=m.y >= (m.x - 1)**2)
+ interface = ip.InteriorPointInterface(m)
+ ip_solver = ip.InteriorPointSolver(linear_solver)
+ status = ip_solver.solve(interface)
+ self.assertEqual(status, ip.InteriorPointStatus.optimal)
+ x = interface.get_primals()
+ duals_eq = interface.get_duals_eq()
+ duals_ineq = interface.get_duals_ineq()
+ self.assertAlmostEqual(x[0], 0)
+ self.assertAlmostEqual(x[1], 1)
+ self.assertAlmostEqual(duals_eq[0], -1-1.0/3.0)
+ self.assertAlmostEqual(duals_ineq[0], 2.0/3.0)
+ interface.load_primals_into_pyomo_model()
+ self.assertAlmostEqual(m.x.value, 0)
+ self.assertAlmostEqual(m.y.value, 1)
+
+ def _test_solve_interior_point_2(self, linear_solver):
+ m = pe.ConcreteModel()
+ m.x = pe.Var(bounds=(1, 4))
+ m.obj = pe.Objective(expr=m.x**2)
+ interface = ip.InteriorPointInterface(m)
+ ip_solver = ip.InteriorPointSolver(linear_solver)
+ status = ip_solver.solve(interface)
+ self.assertEqual(status, ip.InteriorPointStatus.optimal)
+ interface.load_primals_into_pyomo_model()
+ self.assertAlmostEqual(m.x.value, 1)
+
+ def test_ip1_scipy(self):
+ solver = ip.linalg.ScipyInterface()
+ solver.compute_inertia = True
+ self._test_solve_interior_point_1(solver)
+
+ def test_ip2_scipy(self):
+ solver = ip.linalg.ScipyInterface()
+ solver.compute_inertia = True
+ self._test_solve_interior_point_2(solver)
+
+ @unittest.skipIf(not mumps_available, 'Mumps is not available')
+ def test_ip1_mumps(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_solve_interior_point_1(solver)
+
+ @unittest.skipIf(not mumps_available, 'Mumps is not available')
+ def test_ip2_mumps(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_solve_interior_point_2(solver)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is not available')
+ def test_ip1_ma27(self):
+ solver = ip.linalg.InteriorPointMA27Interface()
+ self._test_solve_interior_point_1(solver)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is not available')
+ def test_ip2_ma27(self):
+ solver = ip.linalg.InteriorPointMA27Interface()
+ self._test_solve_interior_point_2(solver)
+
+
+class TestProcessInit(unittest.TestCase):
+ def testprocess_init(self):
+ lb = np.array([-np.inf, -np.inf, -2, -2], dtype=np.double)
+ ub = np.array([ np.inf, 2, np.inf, 2], dtype=np.double)
+
+ x = np.array([ 0, 0, 0, 0], dtype=np.double)
+ process_init(x, lb, ub)
+ self.assertTrue(np.allclose(x, np.array([0, 0, 0, 0], dtype=np.double)))
+
+ x = np.array([ -2, -2, -2, -2], dtype=np.double)
+ process_init(x, lb, ub)
+ self.assertTrue(np.allclose(x, np.array([-2, -2, -1, 0], dtype=np.double)))
+
+ x = np.array([ -3, -3, -3, -3], dtype=np.double)
+ process_init(x, lb, ub)
+ self.assertTrue(np.allclose(x, np.array([-3, -3, -1, 0], dtype=np.double)))
+
+ x = np.array([ 2, 2, 2, 2], dtype=np.double)
+ process_init(x, lb, ub)
+ self.assertTrue(np.allclose(x, np.array([2, 1, 2, 0], dtype=np.double)))
+
+ x = np.array([ 3, 3, 3, 3], dtype=np.double)
+ process_init(x, lb, ub)
+ self.assertTrue(np.allclose(x, np.array([3, 1, 3, 0], dtype=np.double)))
+
+ def testprocess_init_duals(self):
+ x = np.array([0, 0, 0, 0], dtype=np.double)
+ lb = np.array([-5, 0, -np.inf, 2], dtype=np.double)
+ process_init_duals_lb(x, lb)
+ self.assertTrue(np.allclose(x, np.array([1, 1, 0, 1], dtype=np.double)))
+
+ x = np.array([-1, -1, -1, -1], dtype=np.double)
+ process_init_duals_lb(x, lb)
+ self.assertTrue(np.allclose(x, np.array([1, 1, 0, 1], dtype=np.double)))
+
+ x = np.array([2, 2, 2, 2], dtype=np.double)
+ ub = np.array([-5, 0, np.inf, 2], dtype=np.double)
+ process_init_duals_ub(x, ub)
+ self.assertTrue(np.allclose(x, np.array([2, 2, 0, 2], dtype=np.double)))
+
+
+class TestFractionToTheBoundary(unittest.TestCase):
+ def test_fraction_to_the_boundary_helper_lb(self):
+ tau = 0.9
+ x = np.array([0, 0, 0, 0], dtype=np.double)
+ xl = np.array([-np.inf, -1, -np.inf, -1], dtype=np.double)
+
+ delta_x = np.array([-0.1, -0.1, -0.1, -0.1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([-1, -1, -1, -1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 0.9)
+
+ delta_x = np.array([-10, -10, -10, -10], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 0.09)
+
+ delta_x = np.array([1, 1, 1, 1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([-10, 1, -10, 1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([-10, -1, -10, -1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 0.9)
+
+ delta_x = np.array([1, -10, 1, -1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_lb(tau, x, delta_x, xl)
+ self.assertAlmostEqual(alpha, 0.09)
+
+ def test_fraction_to_the_boundary_helper_ub(self):
+ tau = 0.9
+ x = np.array([0, 0, 0, 0], dtype=np.double)
+ xu = np.array([np.inf, 1, np.inf, 1], dtype=np.double)
+
+ delta_x = np.array([0.1, 0.1, 0.1, 0.1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([1, 1, 1, 1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 0.9)
+
+ delta_x = np.array([10, 10, 10, 10], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 0.09)
+
+ delta_x = np.array([-1, -1, -1, -1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([10, -1, 10, -1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 1)
+
+ delta_x = np.array([10, 1, 10, 1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 0.9)
+
+ delta_x = np.array([-1, 10, -1, 1], dtype=np.double)
+ alpha = _fraction_to_the_boundary_helper_ub(tau, x, delta_x, xu)
+ self.assertAlmostEqual(alpha, 0.09)
diff --git a/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py
new file mode 100644
index 00000000000..5a894aa8bd3
--- /dev/null
+++ b/pyomo/contrib/interior_point/tests/test_inverse_reduced_hessian.py
@@ -0,0 +1,136 @@
+import pyutilib.th as unittest
+import pyomo.environ as pe
+from pyomo.opt import check_optimal_termination
+from pyomo.common.dependencies import attempt_import
+from pyomo.contrib.interior_point.inverse_reduced_hessian import inv_reduced_hessian_barrier
+
+np, numpy_available = attempt_import('numpy', 'inverse_reduced_hessian numpy',
+ minimum_version='1.13.0')
+scipy, scipy_available = attempt_import('scipy', 'inverse_reduced_hessian requires scipy')
+from pyomo.contrib.pynumero.asl import AmplInterface
+asl_available = AmplInterface.available()
+if not (numpy_available and scipy_available and asl_available):
+ raise unittest.SkipTest('inverse_reduced_hessian tests require numpy, scipy, and asl')
+from pyomo.common.dependencies import(pandas as pd, pandas_available)
+import pyomo.environ as pe
+ipopt_solver = pe.SolverFactory('ipopt')
+if not ipopt_solver.available(exception_flag=False):
+ raise unittest.SkipTest('ipopt is not available')
+
+numdiff_available = True
+try:
+ import numdifftools as nd
+except:
+ numdiff_available = False
+
+
+class TestInverseReducedHessian(unittest.TestCase):
+ # the original test
+ def test_invrh_zavala_thesis(self):
+ m = pe.ConcreteModel()
+ m.x = pe.Var([1,2,3])
+ m.obj = pe.Objective(expr=(m.x[1]-1)**2 + (m.x[2]-2)**2 + (m.x[3]-3)**2)
+ m.c1 = pe.Constraint(expr=m.x[1] + 2*m.x[2] + 3*m.x[3]==0)
+
+ status, invrh = inv_reduced_hessian_barrier(m, [m.x[2], m.x[3]])
+ expected_invrh = np.asarray([[ 0.35714286, -0.21428571],
+ [-0.21428571, 0.17857143]])
+ np.testing.assert_array_almost_equal(invrh, expected_invrh)
+
+ # test by DLW, April 2020
+ def _simple_model(self, add_constraint=False):
+ # Hardwired to have two x columns and one y
+ # if add_constraint is true, there is a binding constraint on b0
+ data = pd.DataFrame([[1, 1.1, 0.365759306],
+ [2, 1.2, 4],
+ [3, 1.3, 4.8876684],
+ [4, 1.4, 5.173455561],
+ [5, 1.5, 2.093799081],
+ [6, 1.6, 9],
+ [7, 1.7, 6.475045106],
+ [8, 1.8, 8.127111268],
+ [9, 1.9, 6],
+ [10, 1.21, 10.20642714],
+ [11, 1.22, 13.08211636],
+ [12, 1.23, 10],
+ [13, 1.24, 15.38766047],
+ [14, 1.25, 14.6587746],
+ [15, 1.26, 13.68608604],
+ [16, 1.27, 14.70707893],
+ [17, 1.28, 18.46192779],
+ [18, 1.29, 15.60649164]],
+ columns=['tofu','chard', 'y'])
+
+ model = pe.ConcreteModel()
+
+ model.b0 = pe.Var(initialize = 0)
+ model.bindexes = pe.Set(initialize=['tofu', 'chard'])
+ model.b = pe.Var(model.bindexes, initialize = 1)
+
+ # try to make trouble
+ if add_constraint:
+ model.binding_constraint = pe.Constraint(expr=model.b0>=10)
+
+ # The columns need to have unique values (or you get warnings)
+ def response_rule(m, t, c):
+ expr = m.b0 + m.b['tofu']*t + m.b['chard']*c
+ return expr
+ model.response_function = pe.Expression(data.tofu, data.chard, rule = response_rule)
+
+ def SSE_rule(m):
+ return sum((data.y[i] - m.response_function[data.tofu[i], data.chard[i]])**2\
+ for i in data.index)
+ model.SSE = pe.Objective(rule = SSE_rule, sense=pe.minimize)
+
+ return model
+
+ @unittest.skipIf(not numdiff_available, "numdiff missing")
+ @unittest.skipIf(not pandas_available, "pandas missing")
+ def test_3x3_using_linear_regression(self):
+ """ simple linear regression with two x columns, so 3x3 Hessian"""
+
+ model = self._simple_model()
+ solver = pe.SolverFactory("ipopt")
+ status = solver.solve(model)
+ self.assertTrue(check_optimal_termination(status))
+ tstar = [pe.value(model.b0),
+ pe.value(model.b['tofu']), pe.value(model.b['chard'])]
+
+ def _ndwrap(x):
+ # wrapper for numdiff call
+ model.b0.fix(x[0])
+ model.b["tofu"].fix(x[1])
+ model.b["chard"].fix(x[2])
+ rval = pe.value(model.SSE)
+ return rval
+
+ H = nd.Hessian(_ndwrap)(tstar)
+ HInv = np.linalg.inv(H)
+
+ model.b0.fixed = False
+ model.b["tofu"].fixed = False
+ model.b["chard"].fixed = False
+ status, H_inv_red_hess = inv_reduced_hessian_barrier(model,
+ [model.b0,
+ model.b["tofu"],
+ model.b["chard"]])
+ # this passes at decimal=6, BTW
+ np.testing.assert_array_almost_equal(HInv, H_inv_red_hess, decimal=3)
+
+
+ @unittest.skipIf(not numdiff_available, "numdiff missing")
+ @unittest.skipIf(not pandas_available, "pandas missing")
+ def test_with_binding_constraint(self):
+ """ there is a binding constraint"""
+
+ model = self._simple_model(add_constraint=True)
+
+ status, H_inv_red_hess = inv_reduced_hessian_barrier(model,
+ [model.b0,
+ model.b["tofu"],
+ model.b["chard"]])
+ print("test_with_binding_constraint should see an error raised.")
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/contrib/interior_point/tests/test_reg.py b/pyomo/contrib/interior_point/tests/test_reg.py
new file mode 100644
index 00000000000..fdf8c7145e5
--- /dev/null
+++ b/pyomo/contrib/interior_point/tests/test_reg.py
@@ -0,0 +1,120 @@
+import pyutilib.th as unittest
+import pyomo.environ as pe
+from pyomo.core.base import ConcreteModel, Var, Constraint, Objective
+from pyomo.common.dependencies import attempt_import
+
+np, numpy_available = attempt_import('numpy', 'Interior point requires numpy',
+ minimum_version='1.13.0')
+scipy, scipy_available = attempt_import('scipy', 'Interior point requires scipy')
+mumps, mumps_available = attempt_import('mumps', 'Interior point requires mumps')
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest('Interior point tests require numpy and scipy')
+
+from pyomo.contrib.pynumero.asl import AmplInterface
+asl_available = AmplInterface.available()
+if not asl_available:
+ raise unittest.SkipTest('Regularization tests require ASL')
+import pyomo.contrib.interior_point as ip
+from pyomo.contrib.pynumero.linalg.ma27 import MA27Interface
+ma27_available = MA27Interface.available()
+
+
+def make_model():
+ m = ConcreteModel()
+ m.x = Var([1,2,3], initialize=0)
+ m.f = Var([1,2,3], initialize=0)
+ m.F = Var(initialize=0)
+ m.f[1].fix(1)
+ m.f[2].fix(2)
+
+ m.sum_con = Constraint(expr=
+ (1 == m.x[1] + m.x[2] + m.x[3]))
+ def bilin_rule(m, i):
+ return m.F*m.x[i] == m.f[i]
+ m.bilin_con = Constraint([1,2,3], rule=bilin_rule)
+
+ m.obj = Objective(expr=m.F**2)
+
+ return m
+
+
+def make_model_2():
+ m = ConcreteModel()
+ m.x = Var(initialize=0.1, bounds=(0, 1))
+ m.y = Var(initialize=0.1, bounds=(0, 1))
+ m.obj = Objective(expr=-m.x**2 - m.y**2)
+ m.c = Constraint(expr=m.y <= pe.exp(-m.x))
+ return m
+
+
+class TestRegularization(unittest.TestCase):
+ def _test_regularization(self, linear_solver):
+ m = make_model()
+ interface = ip.InteriorPointInterface(m)
+ ip_solver = ip.InteriorPointSolver(linear_solver)
+ ip_solver.set_interface(interface)
+
+ interface.set_barrier_parameter(1e-1)
+
+ # Evaluate KKT matrix before any iterations
+ kkt = interface.evaluate_primal_dual_kkt_matrix()
+ reg_coef = ip_solver.factorize(kkt)
+
+ # Expected regularization coefficient:
+ self.assertAlmostEqual(reg_coef, 1e-4)
+
+ desired_n_neg_evals = (ip_solver.interface.n_eq_constraints() +
+ ip_solver.interface.n_ineq_constraints())
+
+ # Expected inertia:
+ n_pos_evals, n_neg_evals, n_null_evals = linear_solver.get_inertia()
+ self.assertEqual(n_null_evals, 0)
+ self.assertEqual(n_neg_evals, desired_n_neg_evals)
+
+ @unittest.skipIf(not mumps_available, 'Mumps is not available')
+ def test_mumps(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_regularization(solver)
+
+ def test_scipy(self):
+ solver = ip.linalg.ScipyInterface(compute_inertia=True)
+ self._test_regularization(solver)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is not available')
+ def test_ma27(self):
+ solver = ip.linalg.InteriorPointMA27Interface(icntl_options={1: 0, 2: 0})
+ self._test_regularization(solver)
+
+ def _test_regularization_2(self, linear_solver):
+ m = make_model_2()
+ interface = ip.InteriorPointInterface(m)
+ ip_solver = ip.InteriorPointSolver(linear_solver)
+
+ status = ip_solver.solve(interface)
+ self.assertEqual(status, ip.InteriorPointStatus.optimal)
+ interface.load_primals_into_pyomo_model()
+ self.assertAlmostEqual(m.x.value, 1)
+ self.assertAlmostEqual(m.y.value, pe.exp(-1))
+
+ @unittest.skipIf(not mumps_available, 'Mumps is not available')
+ def test_mumps_2(self):
+ solver = ip.linalg.MumpsInterface()
+ self._test_regularization_2(solver)
+
+ def test_scipy_2(self):
+ solver = ip.linalg.ScipyInterface(compute_inertia=True)
+ self._test_regularization_2(solver)
+
+ @unittest.skipIf(not ma27_available, 'MA27 is not available')
+ def test_ma27_2(self):
+ solver = ip.linalg.InteriorPointMA27Interface(icntl_options={1: 0, 2: 0})
+ self._test_regularization_2(solver)
+
+
+if __name__ == '__main__':
+ #
+ unittest.main()
+ # test_reg = TestRegularization()
+ # test_reg.test_regularize_mumps()
+ # test_reg.test_regularize_scipy()
+
diff --git a/pyomo/contrib/mcpp/build.py b/pyomo/contrib/mcpp/build.py
index 91971de7a80..310f9294c40 100644
--- a/pyomo/contrib/mcpp/build.py
+++ b/pyomo/contrib/mcpp/build.py
@@ -12,14 +12,15 @@
import shutil
import tempfile
-import distutils.core
-from distutils.command.build_ext import build_ext
-from setuptools.extension import Extension
-
from pyomo.common.config import PYOMO_CONFIG_DIR
from pyomo.common.fileutils import this_file_dir, find_dir
+from pyomo.common.download import FileDownloader
def _generate_configuration():
+ # defer the import until use (this eventually imports pkg_resources,
+ # which is slow to import)
+ from setuptools.extension import Extension
+
# Try and find MC++. Defer to the MCPP_ROOT if it is set;
# otherwise, look in common locations for a mcpp directory.
pathlist=[
@@ -67,27 +68,30 @@ def _generate_configuration():
return package_config
-class _BuildWithoutPlatformInfo(build_ext, object):
- # Python3.x puts platform information into the generated SO file
- # name, which is usually fine for python extensions, but since this
- # is not a "real" extension, we will hijack things to remove the
- # platform information from the filename so that Pyomo can more
- # easily locate it. Note that build_ext is not a new-style class in
- # Python 2.7, so we will add an explicit inheritance from object so
- # that super() works.
- def get_ext_filename(self, ext_name):
- filename = super(_BuildWithoutPlatformInfo, self).get_ext_filename(
- ext_name).split('.')
- filename = '.'.join([filename[0],filename[-1]])
- return filename
-
def build_mcpp():
+ import distutils.core
+ from distutils.command.build_ext import build_ext
+
+ class _BuildWithoutPlatformInfo(build_ext, object):
+ # Python3.x puts platform information into the generated SO file
+ # name, which is usually fine for python extensions, but since this
+ # is not a "real" extension, we will hijack things to remove the
+ # platform information from the filename so that Pyomo can more
+ # easily locate it. Note that build_ext is not a new-style class in
+ # Python 2.7, so we will add an explicit inheritance from object so
+ # that super() works.
+ def get_ext_filename(self, ext_name):
+ filename = super(_BuildWithoutPlatformInfo, self).get_ext_filename(
+ ext_name).split('.')
+ filename = '.'.join([filename[0],filename[-1]])
+ return filename
+
+ print("\n**** Building MCPP library ****")
package_config = _generate_configuration()
package_config['cmdclass'] = {'build_ext': _BuildWithoutPlatformInfo}
dist = distutils.core.Distribution(package_config)
install_dir = os.path.join(PYOMO_CONFIG_DIR, 'lib')
dist.get_command_obj('install_lib').install_dir = install_dir
- print("**** Building library ****")
try:
basedir = os.path.abspath(os.path.curdir)
tmpdir = os.path.abspath(tempfile.mkdtemp())
@@ -99,6 +103,13 @@ def build_mcpp():
os.chdir(basedir)
shutil.rmtree(tmpdir)
+class MCPPBuilder(object):
+ def __call__(self, parallel):
+ return build_mcpp()
+
+ def skip(self):
+ return FileDownloader.get_sysinfo()[0] == 'windows'
+
if __name__ == "__main__":
build_mcpp()
diff --git a/pyomo/contrib/mcpp/plugins.py b/pyomo/contrib/mcpp/plugins.py
index b7aa033d602..4b7764f29ce 100644
--- a/pyomo/contrib/mcpp/plugins.py
+++ b/pyomo/contrib/mcpp/plugins.py
@@ -11,9 +11,9 @@
from pyomo.common.download import DownloadFactory
from pyomo.common.extensions import ExtensionBuilderFactory
from .getMCPP import get_mcpp
-from .build import build_mcpp
+from .build import MCPPBuilder
def load():
DownloadFactory.register('mcpp')(get_mcpp)
- ExtensionBuilderFactory.register('mcpp')(build_mcpp)
+ ExtensionBuilderFactory.register('mcpp')(MCPPBuilder)
diff --git a/pyomo/contrib/mcpp/pyomo_mcpp.py b/pyomo/contrib/mcpp/pyomo_mcpp.py
index 983ae988c47..7cb8ab6fcfb 100644
--- a/pyomo/contrib/mcpp/pyomo_mcpp.py
+++ b/pyomo/contrib/mcpp/pyomo_mcpp.py
@@ -310,7 +310,7 @@ def exitNode(self, node, data):
return ans
- def beforeChild(self, node, child):
+ def beforeChild(self, node, child, child_idx):
if type(child) in nonpyomo_leaf_types:
# This means the child is POD
# i.e., int, float, string
@@ -322,7 +322,7 @@ def beforeChild(self, node, child):
# this is an expression node
return True, None
- def acceptChildResult(self, node, data, child_result):
+ def acceptChildResult(self, node, data, child_result, child_idx):
self.refs.add(child_result)
data.append(child_result)
return data
diff --git a/pyomo/contrib/mindtpy/MindtPy.py b/pyomo/contrib/mindtpy/MindtPy.py
index 1f490a08ce5..0cff242922a 100644
--- a/pyomo/contrib/mindtpy/MindtPy.py
+++ b/pyomo/contrib/mindtpy/MindtPy.py
@@ -93,12 +93,6 @@ class MindtPySolver(object):
"covering problem (max_binary), and fix the initial value for "
"the integer variables (initial_binary)"
))
- CONFIG.declare("integer_cuts", ConfigValue(
- default=True,
- domain=bool,
- description="Integer cuts",
- doc="Add integer cuts after finding a feasible solution to the MINLP"
- ))
CONFIG.declare("max_slack", ConfigValue(
default=1000.0,
domain=PositiveFloat,
@@ -124,7 +118,7 @@ class MindtPySolver(object):
))
CONFIG.declare("nlp_solver", ConfigValue(
default="ipopt",
- domain=In(["ipopt"]),
+ domain=In(["ipopt", "gams"]),
description="NLP subsolver name",
doc="Which NLP subsolver is going to be used for solving the nonlinear"
"subproblems"
@@ -137,7 +131,8 @@ class MindtPySolver(object):
))
CONFIG.declare("mip_solver", ConfigValue(
default="gurobi",
- domain=In(["gurobi", "cplex", "cbc", "glpk", "gams"]),
+ domain=In(["gurobi", "cplex", "cbc", "glpk", "gams",
+ "gurobi_persistent", "cplex_persistent"]),
description="MIP subsolver name",
doc="Which MIP subsolver is going to be used for solving the mixed-"
"integer master problems"
@@ -196,7 +191,7 @@ class MindtPySolver(object):
description="Tolerance on variable bounds."
))
CONFIG.declare("zero_tolerance", ConfigValue(
- default=1E-15,
+ default=1E-8,
description="Tolerance on variable equal to zero."
))
CONFIG.declare("initial_feas", ConfigValue(
@@ -220,6 +215,37 @@ class MindtPySolver(object):
"Note that 'integer_to_binary' flag needs to be used to apply it to actual integers and not just binaries.",
domain=bool
))
+ CONFIG.declare("single_tree", ConfigValue(
+ default=False,
+ description="Use single tree implementation in solving the MILP master problem.",
+ domain=bool
+ ))
+ CONFIG.declare("solution_pool", ConfigValue(
+ default=False,
+ description="Use solution pool in solving the MILP master problem.",
+ domain=bool
+ ))
+ CONFIG.declare("add_slack", ConfigValue(
+ default=False,
+ description="whether add slack variable here."
+ "slack variables here are used to deal with nonconvex MINLP",
+ domain=bool
+ ))
+ CONFIG.declare("continuous_var_bound", ConfigValue(
+ default=1e10,
+ description="default bound added to unbounded continuous variables in nonlinear constraint if single tree is activated.",
+ domain=PositiveFloat
+ ))
+ CONFIG.declare("integer_var_bound", ConfigValue(
+ default=1e9,
+ description="default bound added to unbounded integral variables in nonlinear constraint if single tree is activated.",
+ domain=PositiveFloat
+ ))
+ CONFIG.declare("cycling_check", ConfigValue(
+ default=True,
+ description="check if OA algorithm is stalled in a cycle and terminate.",
+ domain=bool
+ ))
def available(self, exception_flag=True):
"""Check if solver is available.
@@ -246,9 +272,24 @@ def solve(self, model, **kwds):
"""
config = self.CONFIG(kwds.pop('options', {}))
config.set_value(kwds)
+
+ # configration confirmation
+ if config.single_tree:
+ config.iteration_limit = 1
+ config.add_slack = False
+ config.add_integer_cuts = False
+ config.mip_solver = 'cplex_persistent'
+ config.logger.info(
+ "Single tree implementation is activated. The defalt MIP solver is 'cplex_persistent'")
+ # if the slacks fix to zero, just don't add them
+ if config.max_slack == 0.0:
+ config.add_slack = False
+
solve_data = MindtPySolveData()
solve_data.results = SolverResults()
solve_data.timing = Container()
+ solve_data.curr_int_sol = []
+ solve_data.prev_int_sol = []
solve_data.original_model = model
solve_data.working_model = model.clone()
@@ -256,16 +297,15 @@ def solve(self, model, **kwds):
TransformationFactory('contrib.integer_to_binary'). \
apply_to(solve_data.working_model)
-
new_logging_level = logging.INFO if config.tee else None
with time_code(solve_data.timing, 'total', is_main_timer=True), \
- lower_logger_level_to(config.logger, new_logging_level), \
- create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
+ lower_logger_level_to(config.logger, new_logging_level), \
+ create_utility_block(solve_data.working_model, 'MindtPy_utils', solve_data):
config.logger.info("---Starting MindtPy---")
MindtPy = solve_data.working_model.MindtPy_utils
setup_results_object(solve_data, config)
- process_objective(solve_data, config)
+ process_objective(solve_data, config, use_mcpp=False)
# Save model initial values.
solve_data.initial_var_values = list(
@@ -345,7 +385,9 @@ def solve(self, model, **kwds):
# MindtPy.feas_inverse_map[n] = c
# Create slack variables for OA cuts
- lin.slack_vars = VarList(bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)
+ if config.add_slack:
+ lin.slack_vars = VarList(
+ bounds=(0, config.max_slack), initialize=0, domain=NonNegativeReals)
# Create slack variables for feasibility problem
feas.slack_var = Var(feas.constraint_set,
domain=NonNegativeReals, initialize=1)
@@ -391,6 +433,10 @@ def solve(self, model, **kwds):
solve_data.results.solver.iterations = solve_data.mip_iter
+ if config.single_tree:
+ solve_data.results.solver.num_nodes = solve_data.nlp_iter - \
+ (1 if config.init_strategy == 'rNLP' else 0)
+
return solve_data.results
#
diff --git a/pyomo/contrib/mindtpy/cut_generation.py b/pyomo/contrib/mindtpy/cut_generation.py
index 349deddce77..78f85677c0c 100644
--- a/pyomo/contrib/mindtpy/cut_generation.py
+++ b/pyomo/contrib/mindtpy/cut_generation.py
@@ -28,18 +28,17 @@ def add_objective_linearization(solve_data, config):
expr=sign_adjust * sum(
value(MindtPy.jacs[obj][id(var)]) * (var - value(var))
for var in list(EXPR.identify_variables(obj.body))) +
- value(obj.body) <= 0)
+ value(obj.body) <= 0)
MindtPy.ECP_constr_map[obj, solve_data.mip_iter] = c
def add_oa_cuts(target_model, dual_values, solve_data, config,
linearize_active=True,
linearize_violated=True,
- linearize_inactive=False,
- use_slack_var=False):
+ linearize_inactive=False):
"""Linearizes nonlinear constraints.
- For nonconvex problems, turn on 'use_slack_var'. Slack variables will
+ For nonconvex problems, turn on 'config.add_slack'. Slack variables will
always be used for nonlinear equality constraints.
"""
for (constr, dual_value) in zip(target_model.MindtPy_utils.constraint_list,
@@ -56,98 +55,99 @@ def add_oa_cuts(target_model, dual_values, solve_data, config,
rhs = ((0 if constr.upper is None else constr.upper)
+ (0 if constr.lower is None else constr.lower))
rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs
- slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add()
+ if config.add_slack:
+ slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add()
target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add(
expr=copysign(1, sign_adjust * dual_value)
- * (sum(value(jacs[constr][var]) * (var - value(var))
- for var in list(EXPR.identify_variables(constr.body)))
- + value(constr.body) - rhs)
- - slack_var <= 0)
+ * (sum(value(jacs[constr][var]) * (var - value(var))
+ for var in list(EXPR.identify_variables(constr.body)))
+ + value(constr.body) - rhs)
+ - (slack_var if config.add_slack else 0) <= 0)
else: # Inequality constraint (possibly two-sided)
if constr.has_ub() \
- and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \
+ and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \
or (linearize_violated and constr.uslack() < 0) \
or (linearize_inactive and constr.uslack() > 0):
- if use_slack_var:
+ if config.add_slack:
slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add()
target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add(
expr=(sum(value(jacs[constr][var])*(var - var.value)
- for var in constr_vars)
- - (slack_var if use_slack_var else 0)
+ for var in constr_vars) + value(constr.body)
+ - (slack_var if config.add_slack else 0)
<= constr.upper)
)
if constr.has_lb() \
- and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \
+ and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \
or (linearize_violated and constr.lslack() < 0) \
or (linearize_inactive and constr.lslack() > 0):
- if use_slack_var:
+ if config.add_slack:
slack_var = target_model.MindtPy_utils.MindtPy_linear_cuts.slack_vars.add()
target_model.MindtPy_utils.MindtPy_linear_cuts.oa_cuts.add(
expr=(sum(value(jacs[constr][var])*(var - var.value)
- for var in constr_vars)
- + (slack_var if use_slack_var else 0)
+ for var in constr_vars) + value(constr.body)
+ + (slack_var if config.add_slack else 0)
>= constr.lower)
)
-def add_oa_equality_relaxation(var_values, duals, solve_data, config, ignore_integrality=False):
- """More general case for outer approximation
-
- This method covers nonlinear inequalities g(x)<=b and g(x)>=b as well as
- equalities g(x)=b all in the same linearization call. It combines the dual
- with the objective sense to figure out how to generate the cut.
- Note that the dual sign is defined as follows (according to IPOPT):
- sgn | min | max
- -------|-----|-----
- g(x)<=b| +1 | -1
- g(x)>=b| -1 | +1
-
- Note additionally that the dual value is not strictly neccesary for inequality
- constraints, but definitely neccesary for equality constraints. For equality
- constraints the cut will always be generated so that the side with the worse objective
- function is the 'interior'.
-
- ignore_integrality: Accepts float values for discrete variables.
- Useful for cut in initial relaxation
- """
-
- m = solve_data.mip
- MindtPy = m.MindtPy_utils
- MindtPy.MindtPy_linear_cuts.nlp_iters.add(solve_data.nlp_iter)
- sign_adjust = -1 if solve_data.objective_sense == minimize else 1
-
- copy_var_list_values(from_list=var_values,
- to_list=MindtPy.variable_list,
- config=config,
- ignore_integrality=ignore_integrality)
-
- # generate new constraints
- # TODO some kind of special handling if the dual is phenomenally small?
- # TODO-romeo conditional for 'global' option, i.e. slack or no slack
- jacs = solve_data.jacobians
- for constr, dual_value in zip(MindtPy.constraint_list, duals):
- if constr.body.polynomial_degree() in (1, 0):
- continue
- rhs = ((0 if constr.upper is None else constr.upper)
- + (0 if constr.lower is None else constr.lower))
- # Properly handle equality constraints and ranged inequalities
- # TODO special handling for ranged inequalities? a <= x <= b
- rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs
- slack_var = MindtPy.MindtPy_linear_cuts.slack_vars.add()
- MindtPy.MindtPy_linear_cuts.oa_cuts.add(
- expr=copysign(1, sign_adjust * dual_value)
- * (sum(value(jacs[constr][var]) * (var - value(var))
- for var in list(EXPR.identify_variables(constr.body)))
- + value(constr.body) - rhs)
- - slack_var <= 0)
+# def add_oa_equality_relaxation(var_values, duals, solve_data, config, ignore_integrality=False):
+# """More general case for outer approximation
+
+# This method covers nonlinear inequalities g(x)<=b and g(x)>=b as well as
+# equalities g(x)=b all in the same linearization call. It combines the dual
+# with the objective sense to figure out how to generate the cut.
+# Note that the dual sign is defined as follows (according to IPOPT):
+# sgn | min | max
+# -------|-----|-----
+# g(x)<=b| +1 | -1
+# g(x)>=b| -1 | +1
+
+# Note additionally that the dual value is not strictly neccesary for inequality
+# constraints, but definitely neccesary for equality constraints. For equality
+# constraints the cut will always be generated so that the side with the worse objective
+# function is the 'interior'.
+
+# ignore_integrality: Accepts float values for discrete variables.
+# Useful for cut in initial relaxation
+# """
+
+# m = solve_data.mip
+# MindtPy = m.MindtPy_utils
+# MindtPy.MindtPy_linear_cuts.nlp_iters.add(solve_data.nlp_iter)
+# sign_adjust = -1 if solve_data.objective_sense == minimize else 1
+
+# copy_var_list_values(from_list=var_values,
+# to_list=MindtPy.variable_list,
+# config=config,
+# ignore_integrality=ignore_integrality)
+
+# # generate new constraints
+# # TODO some kind of special handling if the dual is phenomenally small?
+# # TODO-romeo conditional for 'global' option, i.e. slack or no slack
+# jacs = solve_data.jacobians
+# for constr, dual_value in zip(MindtPy.constraint_list, duals):
+# if constr.body.polynomial_degree() in (1, 0):
+# continue
+# rhs = ((0 if constr.upper is None else constr.upper)
+# + (0 if constr.lower is None else constr.lower))
+# # Properly handle equality constraints and ranged inequalities
+# # TODO special handling for ranged inequalities? a <= x <= b
+# rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs
+# slack_var = MindtPy.MindtPy_linear_cuts.slack_vars.add()
+# MindtPy.MindtPy_linear_cuts.oa_cuts.add(
+# expr=copysign(1, sign_adjust * dual_value)
+# * (sum(value(jacs[constr][var]) * (var - value(var))
+# for var in list(EXPR.identify_variables(constr.body)))
+# + value(constr.body) - rhs)
+# - slack_var <= 0)
def add_int_cut(var_values, solve_data, config, feasible=False):
- if not config.integer_cuts:
+ if not config.add_integer_cuts:
return
config.logger.info("Adding integer cuts")
diff --git a/pyomo/contrib/mindtpy/initialization.py b/pyomo/contrib/mindtpy/initialization.py
index 8d5d2fdabfb..3c02bf3b465 100644
--- a/pyomo/contrib/mindtpy/initialization.py
+++ b/pyomo/contrib/mindtpy/initialization.py
@@ -11,6 +11,11 @@
TransformationFactory, maximize, minimize, value, Var)
from pyomo.opt import TerminationCondition as tc
from pyomo.opt import SolverFactory
+from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
+from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem,
+ handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible,
+ handle_NLP_subproblem_other_termination)
+from pyomo.contrib.mindtpy.util import var_bound_add
def MindtPy_initialize_master(solve_data, config):
@@ -18,10 +23,13 @@ def MindtPy_initialize_master(solve_data, config):
This includes generating the initial cuts require to build the master
problem.
"""
+ # if single tree is activated, we need to add bounds for unbounded variables in nonlinear constraints to avoid unbounded master problem.
+ if config.single_tree:
+ var_bound_add(solve_data, config)
+
m = solve_data.mip = solve_data.working_model.clone()
MindtPy = m.MindtPy_utils
-
- m.dual.activate()
+ m.dual.deactivate()
if config.strategy == 'OA':
calc_jacobians(solve_data, config) # preload jacobians
@@ -53,7 +61,15 @@ def MindtPy_initialize_master(solve_data, config):
# if config.strategy == 'ECP':
# add_ecp_cut(solve_data, config)
# else:
- solve_NLP_subproblem(solve_data, config)
+
+ fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(solve_data, config)
+ if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal:
+ handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config)
+ elif fixed_nlp_result.solver.termination_condition is tc.infeasible:
+ handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config)
+ else:
+ handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
+ solve_data, config)
def init_rNLP(solve_data, config):
@@ -63,12 +79,12 @@ def init_rNLP(solve_data, config):
config.logger.info(
"NLP %s: Solve relaxed integrality" % (solve_data.nlp_iter,))
MindtPy = m.MindtPy_utils
- TransformationFactory('core.relax_integrality').apply_to(m)
+ TransformationFactory('core.relax_integer_vars').apply_to(m)
with SuppressInfeasibleWarning():
results = SolverFactory(config.nlp_solver).solve(
m, **config.nlp_solver_args)
subprob_terminate_cond = results.solver.termination_condition
- if subprob_terminate_cond is tc.optimal:
+ if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal:
main_objective = next(m.component_data_objects(Objective, active=True))
nlp_solution_values = list(v.value for v in MindtPy.variable_list)
dual_values = list(m.dual[c] for c in MindtPy.constraint_list)
@@ -82,10 +98,14 @@ def init_rNLP(solve_data, config):
% (solve_data.nlp_iter, value(main_objective.expr),
solve_data.LB, solve_data.UB))
if config.strategy == 'OA':
- copy_var_list_values(m.MindtPy_utils.variable_list,
+ copy_var_list_values(m.MindtPy_utils.variable_list,
solve_data.mip.MindtPy_utils.variable_list,
config, ignore_integrality=True)
add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
+ # TODO check if value of the binary or integer varibles is 0/1 or integer value.
+ for var in solve_data.mip.component_data_objects(ctype=Var):
+ if var.is_integer():
+ var.value = int(round(var.value))
elif subprob_terminate_cond is tc.infeasible:
# TODO fail? try something else?
config.logger.info(
@@ -106,6 +126,7 @@ def init_max_binaries(solve_data, config):
"""
m = solve_data.working_model.clone()
+ m.dual.deactivate()
MindtPy = m.MindtPy_utils
solve_data.mip_subiter += 1
config.logger.info(
@@ -125,7 +146,14 @@ def init_max_binaries(solve_data, config):
getattr(m, 'ipopt_zL_out', _DoNothing()).deactivate()
getattr(m, 'ipopt_zU_out', _DoNothing()).deactivate()
- results = SolverFactory(config.mip_solver).solve(m, options=config.mip_solver_args)
+ opt = SolverFactory(config.mip_solver)
+ if isinstance(opt, PersistentSolver):
+ opt.set_instance(m)
+ mip_args = dict(config.mip_solver_args)
+ if config.mip_solver == 'gams':
+ mip_args['add_options'] = mip_args.get('add_options', [])
+ mip_args['add_options'].append('option optcr=0.0;')
+ results = opt.solve(m, **mip_args)
solve_terminate_cond = results.solver.termination_condition
if solve_terminate_cond is tc.optimal:
@@ -133,6 +161,7 @@ def init_max_binaries(solve_data, config):
MindtPy.variable_list,
solve_data.working_model.MindtPy_utils.variable_list,
config)
+
pass # good
elif solve_terminate_cond is tc.infeasible:
raise ValueError(
diff --git a/pyomo/contrib/mindtpy/iterate.py b/pyomo/contrib/mindtpy/iterate.py
index cfbe38950bb..415bc813690 100644
--- a/pyomo/contrib/mindtpy/iterate.py
+++ b/pyomo/contrib/mindtpy/iterate.py
@@ -2,30 +2,33 @@
from __future__ import division
from pyomo.contrib.mindtpy.mip_solve import (solve_OA_master,
- handle_master_mip_optimal, handle_master_mip_other_conditions)
+ handle_master_mip_optimal, handle_master_mip_other_conditions)
from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem,
- handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible,
- handle_NLP_subproblem_other_termination)
-from pyomo.core import minimize, Objective
+ handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible,
+ handle_NLP_subproblem_other_termination)
+from pyomo.core import minimize, Objective, Var
from pyomo.opt import TerminationCondition as tc
from pyomo.contrib.gdpopt.util import get_main_elapsed_time
def MindtPy_iteration_loop(solve_data, config):
working_model = solve_data.working_model
- main_objective = next(working_model.component_data_objects(Objective, active=True))
+ main_objective = next(
+ working_model.component_data_objects(Objective, active=True))
while solve_data.mip_iter < config.iteration_limit:
+
config.logger.info(
'---MindtPy Master Iteration %s---'
% solve_data.mip_iter)
- if algorithm_should_terminate(solve_data, config):
+ if algorithm_should_terminate(solve_data, config, check_cycling=False):
break
solve_data.mip_subiter = 0
# solve MILP master problem
if config.strategy == 'OA':
- master_mip, master_mip_results = solve_OA_master(solve_data, config)
+ master_mip, master_mip_results = solve_OA_master(
+ solve_data, config)
if master_mip_results.solver.termination_condition is tc.optimal:
handle_master_mip_optimal(master_mip, solve_data, config)
else:
@@ -36,59 +39,61 @@ def MindtPy_iteration_loop(solve_data, config):
else:
raise NotImplementedError()
- if algorithm_should_terminate(solve_data, config):
+ if algorithm_should_terminate(solve_data, config, check_cycling=True):
break
- # Solve NLP subproblem
- # The constraint linearization happens in the handlers
- fix_nlp, fix_nlp_result = solve_NLP_subproblem(solve_data, config)
- if fix_nlp_result.solver.termination_condition is tc.optimal:
- handle_NLP_subproblem_optimal(fix_nlp, solve_data, config)
- elif fix_nlp_result.solver.termination_condition is tc.infeasible:
- handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config)
- else:
- handle_NLP_subproblem_other_termination(fix_nlp, fix_nlp_result.solver.termination_condition,
- solve_data, config)
- # Call the NLP post-solve callback
- config.call_after_subproblem_solve(fix_nlp, solve_data)
-
- if config.strategy == 'PSC':
- # If the hybrid algorithm is not making progress, switch to OA.
- progress_required = 1E-6
- if main_objective.sense == minimize:
- log = solve_data.LB_progress
- sign_adjust = 1
+ if config.single_tree is False: # if we don't use lazy callback, i.e. LP_NLP
+ # Solve NLP subproblem
+ # The constraint linearization happens in the handlers
+ fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(
+ solve_data, config)
+ if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal:
+ handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config)
+ elif fixed_nlp_result.solver.termination_condition is tc.infeasible:
+ handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config)
else:
- log = solve_data.UB_progress
- sign_adjust = -1
- # Maximum number of iterations in which the lower (optimistic)
- # bound does not improve before switching to OA
- max_nonimprove_iter = 5
- making_progress = True
- # TODO-romeo Unneccesary for OA and LOA, right?
- for i in range(1, max_nonimprove_iter + 1):
- try:
- if (sign_adjust * log[-i]
- <= (log[-i - 1] + progress_required)
- * sign_adjust):
- making_progress = False
- else:
- making_progress = True
- break
- except IndexError:
- # Not enough history yet, keep going.
- making_progress = True
- break
- if not making_progress and (
- config.strategy == 'hPSC' or
- config.strategy == 'PSC'):
- config.logger.info(
- 'Not making enough progress for {} iterations. '
- 'Switching to OA.'.format(max_nonimprove_iter))
- config.strategy = 'OA'
-
-
-def algorithm_should_terminate(solve_data, config):
+ handle_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
+ solve_data, config)
+ # Call the NLP post-solve callback
+ config.call_after_subproblem_solve(fixed_nlp, solve_data)
+
+ # if config.strategy == 'PSC':
+ # # If the hybrid algorithm is not making progress, switch to OA.
+ # progress_required = 1E-6
+ # if main_objective.sense == minimize:
+ # log = solve_data.LB_progress
+ # sign_adjust = 1
+ # else:
+ # log = solve_data.UB_progress
+ # sign_adjust = -1
+ # # Maximum number of iterations in which the lower (optimistic)
+ # # bound does not improve before switching to OA
+ # max_nonimprove_iter = 5
+ # making_progress = True
+ # # TODO-romeo Unneccesary for OA and LOA, right?
+ # for i in range(1, max_nonimprove_iter + 1):
+ # try:
+ # if (sign_adjust * log[-i]
+ # <= (log[-i - 1] + progress_required)
+ # * sign_adjust):
+ # making_progress = False
+ # else:
+ # making_progress = True
+ # break
+ # except IndexError:
+ # # Not enough history yet, keep going.
+ # making_progress = True
+ # break
+ # if not making_progress and (
+ # config.strategy == 'hPSC' or
+ # config.strategy == 'PSC'):
+ # config.logger.info(
+ # 'Not making enough progress for {} iterations. '
+ # 'Switching to OA.'.format(max_nonimprove_iter))
+ # config.strategy = 'OA'
+
+
+def algorithm_should_terminate(solve_data, config, check_cycling):
"""Check if the algorithm should terminate.
Termination conditions based on solver options and progress.
@@ -128,6 +133,30 @@ def algorithm_should_terminate(solve_data, config):
format(solve_data.LB, solve_data.UB))
solve_data.results.solver.termination_condition = tc.maxTimeLimit
return True
+
+ # Cycling check
+ if config.cycling_check == True and solve_data.mip_iter >= 1 and check_cycling:
+ temp = []
+ for var in solve_data.mip.component_data_objects(ctype=Var):
+ if var.is_integer():
+ temp.append(int(round(var.value)))
+ solve_data.curr_int_sol = temp
+
+ if solve_data.curr_int_sol == solve_data.prev_int_sol:
+ config.logger.info(
+ 'Cycling happens after {} master iterations. '
+ 'This issue happens when the NLP subproblem violates constraint qualification. '
+ 'Convergence to optimal solution is not guaranteed.'
+ .format(solve_data.mip_iter))
+ config.logger.info(
+ 'Final bound values: LB: {} UB: {}'.
+ format(solve_data.LB, solve_data.UB))
+ # TODO determine solve_data.LB, solve_data.UB is inf or -inf.
+ solve_data.results.solver.termination_condition = tc.feasible
+ return True
+
+ solve_data.prev_int_sol = solve_data.curr_int_sol
+
# if not algorithm_is_making_progress(solve_data, config):
# config.logger.debug(
# 'Algorithm is not making enough progress. '
diff --git a/pyomo/contrib/mindtpy/mip_solve.py b/pyomo/contrib/mindtpy/mip_solve.py
index 7c8cd671794..7bd04930478 100644
--- a/pyomo/contrib/mindtpy/mip_solve.py
+++ b/pyomo/contrib/mindtpy/mip_solve.py
@@ -2,17 +2,36 @@
from __future__ import division
from pyomo.contrib.gdpopt.util import copy_var_list_values
-from pyomo.core import Constraint, Expression, Objective, minimize, value
+from pyomo.core import Constraint, Expression, Objective, minimize, value, Var
from pyomo.opt import TerminationCondition as tc
from pyomo.opt import SolutionStatus, SolverFactory
from pyomo.contrib.gdpopt.util import SuppressInfeasibleWarning, _DoNothing
from pyomo.contrib.gdpopt.mip_solve import distinguish_mip_infeasible_or_unbounded
+from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
+
+from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem,
+ handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible,
+ handle_NLP_subproblem_other_termination, solve_NLP_feas)
+from pyomo.contrib.mindtpy.cut_generation import (add_oa_cuts,
+ add_int_cut)
+from pyomo.contrib.gdpopt.util import copy_var_list_values, identify_variables
+from math import copysign
+from pyomo.environ import *
+from pyomo.core import Constraint, minimize, value
+from pyomo.core.expr import current as EXPR
+from math import fabs
+
+from pyomo.repn import generate_standard_repn
+
+from pyomo.common.dependencies import attempt_import
+
+single_tree, single_tree_available = attempt_import(
+ 'pyomo.contrib.mindtpy.single_tree')
def solve_OA_master(solve_data, config):
solve_data.mip_iter += 1
- master_mip = solve_data.mip.clone()
- MindtPy = master_mip.MindtPy_utils
+ MindtPy = solve_data.mip.MindtPy_utils
config.logger.info(
'MIP %s: Solve master problem.' %
(solve_data.mip_iter,))
@@ -22,40 +41,88 @@ def solve_OA_master(solve_data, config):
c.deactivate()
MindtPy.MindtPy_linear_cuts.activate()
- main_objective = next(master_mip.component_data_objects(Objective, active=True))
+ main_objective = next(
+ solve_data.mip.component_data_objects(Objective, active=True))
main_objective.deactivate()
- sign_adjust = 1 if main_objective.sense == minimize else -1
- MindtPy.MindtPy_penalty_expr = Expression(
- expr=sign_adjust * config.OA_penalty_factor * sum(
- v for v in MindtPy.MindtPy_linear_cuts.slack_vars[...]))
+ sign_adjust = 1 if main_objective.sense == minimize else - 1
+ MindtPy.del_component('MindtPy_oa_obj')
- MindtPy.MindtPy_oa_obj = Objective(
- expr=main_objective.expr + MindtPy.MindtPy_penalty_expr,
- sense=main_objective.sense)
+ if config.add_slack:
+ MindtPy.del_component('MindtPy_penalty_expr')
- # Deactivate extraneous IMPORT/EXPORT suffixes
- getattr(master_mip, 'ipopt_zL_out', _DoNothing()).deactivate()
- getattr(master_mip, 'ipopt_zU_out', _DoNothing()).deactivate()
+ MindtPy.MindtPy_penalty_expr = Expression(
+ expr=sign_adjust * config.OA_penalty_factor * sum(
+ v for v in MindtPy.MindtPy_linear_cuts.slack_vars[...]))
- # master_mip.pprint() #print oa master problem for debugging
- with SuppressInfeasibleWarning():
- master_mip_results = SolverFactory(config.mip_solver).solve(
- master_mip, **config.mip_solver_args)
- if master_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded:
+ MindtPy.MindtPy_oa_obj = Objective(
+ expr=main_objective.expr + MindtPy.MindtPy_penalty_expr,
+ sense=main_objective.sense)
+ else:
+ MindtPy.MindtPy_oa_obj = Objective(
+ expr=main_objective.expr,
+ sense=main_objective.sense)
+ # Deactivate extraneous IMPORT/EXPORT suffixes
+ getattr(solve_data.mip, 'ipopt_zL_out', _DoNothing()).deactivate()
+ getattr(solve_data.mip, 'ipopt_zU_out', _DoNothing()).deactivate()
+
+ masteropt = SolverFactory(config.mip_solver)
+ # determine if persistent solver is called.
+ if isinstance(masteropt, PersistentSolver):
+ masteropt.set_instance(solve_data.mip, symbolic_solver_labels=True)
+ if config.single_tree:
+ # Configuration of lazy callback
+ lazyoa = masteropt._solver_model.register_callback(
+ single_tree.LazyOACallback_cplex)
+ # pass necessary data and parameters to lazyoa
+ lazyoa.master_mip = solve_data.mip
+ lazyoa.solve_data = solve_data
+ lazyoa.config = config
+ lazyoa.opt = masteropt
+ masteropt._solver_model.set_warning_stream(None)
+ masteropt._solver_model.set_log_stream(None)
+ masteropt._solver_model.set_error_stream(None)
+ masteropt.options['timelimit'] = config.time_limit
+ mip_args = dict(config.mip_solver_args)
+ if config.mip_solver == 'gams':
+ mip_args['add_options'] = mip_args.get('add_options', [])
+ mip_args['add_options'].append('option optcr=0.0;')
+ master_mip_results = masteropt.solve(
+ solve_data.mip, **mip_args) # , tee=True)
+
+ if master_mip_results.solver.termination_condition is tc.optimal:
+ if config.single_tree:
+ if main_objective.sense == minimize:
+ solve_data.LB = max(
+ master_mip_results.problem.lower_bound, solve_data.LB)
+ solve_data.LB_progress.append(solve_data.LB)
+ else:
+ solve_data.UB = min(
+ master_mip_results.problem.upper_bound, solve_data.UB)
+ solve_data.UB_progress.append(solve_data.UB)
+
+ elif master_mip_results.solver.termination_condition is tc.infeasibleOrUnbounded:
# Linear solvers will sometimes tell me that it's infeasible or
# unbounded during presolve, but fails to distinguish. We need to
# resolve with a solver option flag on.
- master_mip_results, _ = distinguish_mip_infeasible_or_unbounded(master_mip, config)
+ master_mip_results, _ = distinguish_mip_infeasible_or_unbounded(
+ solve_data.mip, config)
- return master_mip, master_mip_results
+ return solve_data.mip, master_mip_results
-def handle_master_mip_optimal(master_mip, solve_data, config):
+def handle_master_mip_optimal(master_mip, solve_data, config, copy=True):
"""Copy the result to working model and update upper or lower bound"""
# proceed. Just need integer values
MindtPy = master_mip.MindtPy_utils
- main_objective = next(master_mip.component_data_objects(Objective, active=True))
+ main_objective = next(
+ master_mip.component_data_objects(Objective, active=True))
+ # check if the value of binary variable is valid
+ for var in MindtPy.variable_list:
+ if var.value == None and var.is_integer():
+ config.logger.warning(
+ "Integer variable {} not initialized. It is set to it's lower bound when using the initial_binary initialization method".format(var.name))
+ var.value = var.lb # nlp_var.bounds[0]
copy_var_list_values(
master_mip.MindtPy_utils.variable_list,
solve_data.working_model.MindtPy_utils.variable_list,
@@ -114,22 +181,21 @@ def handle_master_mip_other_conditions(master_mip, master_mip_results, solve_dat
def handle_master_mip_infeasible(master_mip, solve_data, config):
- config.logger.info(
- 'MILP master problem is infeasible. '
- 'Problem may have no more feasible '
- 'binary configurations.')
- if solve_data.mip_iter == 1:
- config.logger.warn(
- 'MindtPy initialization may have generated poor '
- 'quality cuts.')
- # set optimistic bound to infinity
- main_objective = next(master_mip.component_data_objects(Objective, active=True))
- if main_objective.sense == minimize:
- solve_data.LB = float('inf')
- solve_data.LB_progress.append(solve_data.UB)
- else:
- solve_data.UB = float('-inf')
- solve_data.UB_progress.append(solve_data.UB)
+ config.logger.info(
+ 'MILP master problem is infeasible. '
+ 'Problem may have no more feasible '
+ 'binary configurations.')
+ if solve_data.mip_iter == 1:
+ config.logger.warning(
+ 'MindtPy initialization may have generated poor '
+ 'quality cuts.')
+ # set optimistic bound to infinity
+ main_objective = next(
+ master_mip.component_data_objects(Objective, active=True))
+ if main_objective.sense == minimize:
+ solve_data.LB_progress.append(solve_data.LB)
+ else:
+ solve_data.UB_progress.append(solve_data.UB)
def handle_master_mip_max_timelimit(master_mip, solve_data, config):
@@ -166,8 +232,13 @@ def handle_master_mip_unbounded(master_mip, solve_data, config):
'Master MILP was unbounded. '
'Resolving with arbitrary bound values of (-{0:.10g}, {0:.10g}) on the objective. '
'You can change this bound with the option obj_bound.'.format(config.obj_bound))
- main_objective = next(master_mip.component_data_objects(Objective, active=True))
- MindtPy.objective_bound = Constraint(expr=(-config.obj_bound, main_objective.expr, config.obj_bound))
+ main_objective = next(
+ master_mip.component_data_objects(Objective, active=True))
+ MindtPy.objective_bound = Constraint(
+ expr=(-config.obj_bound, main_objective.expr, config.obj_bound))
with SuppressInfeasibleWarning():
- master_mip_results = SolverFactory(config.mip_solver).solve(
+ opt = SolverFactory(config.mip_solver)
+ if isinstance(opt, PersistentSolver):
+ opt.set_instance(master_mip)
+ master_mip_results = opt.solve(
master_mip, **config.mip_solver_args)
diff --git a/pyomo/contrib/mindtpy/nlp_solve.py b/pyomo/contrib/mindtpy/nlp_solve.py
index b5153f5ee44..a1c97f85e3e 100644
--- a/pyomo/contrib/mindtpy/nlp_solve.py
+++ b/pyomo/contrib/mindtpy/nlp_solve.py
@@ -2,11 +2,11 @@
from __future__ import division
from pyomo.contrib.mindtpy.cut_generation import (add_oa_cuts,
- add_int_cut)
+ add_int_cut)
from pyomo.contrib.mindtpy.util import add_feas_slacks
from pyomo.contrib.gdpopt.util import copy_var_list_values
from pyomo.core import (Constraint, Objective, TransformationFactory, Var,
- minimize, value)
+ minimize, value)
from pyomo.core.kernel.component_map import ComponentMap
from pyomo.opt import TerminationCondition as tc
from pyomo.opt import SolverFactory
@@ -16,7 +16,7 @@
def solve_NLP_subproblem(solve_data, config):
""" Solves fixed NLP with fixed working model binaries
- Sets up local working model `fix_nlp`
+ Sets up local working model `fixed_nlp`
Fixes binaries
Sets continuous variables to initial var values
Precomputes dual values
@@ -26,55 +26,76 @@ def solve_NLP_subproblem(solve_data, config):
Returns the fixed-NLP model and the solver results
"""
- fix_nlp = solve_data.working_model.clone()
- MindtPy = fix_nlp.MindtPy_utils
- main_objective = next(fix_nlp.component_data_objects(Objective, active=True))
+ fixed_nlp = solve_data.working_model.clone()
+ MindtPy = fixed_nlp.MindtPy_utils
solve_data.nlp_iter += 1
config.logger.info('NLP %s: Solve subproblem for fixed binaries.'
% (solve_data.nlp_iter,))
# Set up NLP
- TransformationFactory('core.fix_discrete').apply_to(fix_nlp)
-
- # restore original variable values
- for nlp_var, orig_val in zip(
- MindtPy.variable_list,
- solve_data.initial_var_values):
- if not nlp_var.fixed and not nlp_var.is_binary():
- nlp_var.value = orig_val
+ TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)
MindtPy.MindtPy_linear_cuts.deactivate()
- fix_nlp.tmp_duals = ComponentMap()
- for c in fix_nlp.component_data_objects(ctype=Constraint, active=True,
- descend_into=True):
- rhs = ((0 if c.upper is None else c.upper)
- + (0 if c.lower is None else c.lower))
- sign_adjust = 1 if value(c.upper) is None else -1
- fix_nlp.tmp_duals[c] = sign_adjust * max(0,
- sign_adjust * (rhs - value(c.body)))
- # TODO check sign_adjust
+ fixed_nlp.tmp_duals = ComponentMap()
+ # tmp_duals are the value of the dual variables stored before using deactivate trivial contraints
+ # The values of the duals are computed as follows: (Complementary Slackness)
+ #
+ # | constraint | c_geq | status at x1 | tmp_dual (violation) |
+ # |------------|-------|--------------|----------------------|
+ # | g(x) <= b | -1 | g(x1) <= b | 0 |
+ # | g(x) <= b | -1 | g(x1) > b | g(x1) - b |
+ # | g(x) >= b | +1 | g(x1) >= b | 0 |
+ # | g(x) >= b | +1 | g(x1) < b | b - g(x1) |
+ evaluation_error = False
+ for c in fixed_nlp.component_data_objects(ctype=Constraint, active=True,
+ descend_into=True):
+ # We prefer to include the upper bound as the right hand side since we are
+ # considering c by default a (hopefully) convex function, which would make
+ # c >= lb a nonconvex inequality which we wouldn't like to add linearizations
+ # if we don't have to
+ rhs = c.upper if c.has_ub() else c.lower
+ c_geq = -1 if c.has_ub() else 1
+ # c_leq = 1 if c.has_ub else -1
+ try:
+ fixed_nlp.tmp_duals[c] = c_geq * max(
+ 0, c_geq*(rhs - value(c.body)))
+ except (ValueError, OverflowError) as error:
+ fixed_nlp.tmp_duals[c] = None
+ evaluation_error = True
+ if evaluation_error:
+ for nlp_var, orig_val in zip(
+ MindtPy.variable_list,
+ solve_data.initial_var_values):
+ if not nlp_var.fixed and not nlp_var.is_binary():
+ nlp_var.value = orig_val
+ # fixed_nlp.tmp_duals[c] = c_leq * max(
+ # 0, c_leq*(value(c.body) - rhs))
+ # TODO: change logic to c_leq based on benchmarking
+
TransformationFactory('contrib.deactivate_trivial_constraints')\
- .apply_to(fix_nlp, tmp=True, ignore_infeasible=True)
+ .apply_to(fixed_nlp, tmp=True, ignore_infeasible=True)
# Solve the NLP
with SuppressInfeasibleWarning():
results = SolverFactory(config.nlp_solver).solve(
- fix_nlp, **config.nlp_solver_args)
- return fix_nlp, results
+ fixed_nlp, **config.nlp_solver_args)
+ return fixed_nlp, results
-def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config):
+def handle_NLP_subproblem_optimal(fixed_nlp, solve_data, config):
"""Copies result to working model, updates bound, adds OA and integer cut,
stores best solution if new one is best"""
copy_var_list_values(
- fix_nlp.MindtPy_utils.variable_list,
+ fixed_nlp.MindtPy_utils.variable_list,
solve_data.working_model.MindtPy_utils.variable_list,
config)
- for c in fix_nlp.tmp_duals:
- if fix_nlp.dual.get(c, None) is None:
- fix_nlp.dual[c] = fix_nlp.tmp_duals[c]
- dual_values = list(fix_nlp.dual[c] for c in fix_nlp.MindtPy_utils.constraint_list)
-
- main_objective = next(fix_nlp.component_data_objects(Objective, active=True))
+ for c in fixed_nlp.tmp_duals:
+ if fixed_nlp.dual.get(c, None) is None:
+ fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
+ dual_values = list(fixed_nlp.dual[c]
+ for c in fixed_nlp.MindtPy_utils.constraint_list)
+
+ main_objective = next(
+ fixed_nlp.component_data_objects(Objective, active=True))
if main_objective.sense == minimize:
solve_data.UB = min(value(main_objective.expr), solve_data.UB)
solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1]
@@ -91,11 +112,11 @@ def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config):
solve_data.LB, solve_data.UB))
if solve_data.solution_improved:
- solve_data.best_solution_found = fix_nlp.clone()
+ solve_data.best_solution_found = fixed_nlp.clone()
# Add the linear cut
if config.strategy == 'OA':
- copy_var_list_values(fix_nlp.MindtPy_utils.variable_list,
+ copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
solve_data.mip.MindtPy_utils.variable_list,
config)
add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
@@ -108,14 +129,14 @@ def handle_NLP_subproblem_optimal(fix_nlp, solve_data, config):
# ConstraintList, which is not activated by default. However, it
# may be activated as needed in certain situations or for certain
# values of option flags.
- var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list)
+ var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
if config.add_integer_cuts:
add_int_cut(var_values, solve_data, config, feasible=True)
- config.call_after_subproblem_feasible(fix_nlp, solve_data)
+ config.call_after_subproblem_feasible(fixed_nlp, solve_data)
-def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config):
+def handle_NLP_subproblem_infeasible(fixed_nlp, solve_data, config):
"""Solve feasibility problem, add cut according to strategy.
The solution of the feasibility problem is copied to the working model.
@@ -123,27 +144,27 @@ def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config):
# TODO try something else? Reinitialize with different initial
# value?
config.logger.info('NLP subproblem was locally infeasible.')
- for c in fix_nlp.component_data_objects(ctype=Constraint):
- rhs = ((0 if c.upper is None else c.upper)
- + (0 if c.lower is None else c.lower))
- sign_adjust = 1 if value(c.upper) is None else -1
- fix_nlp.dual[c] = (sign_adjust
- * max(0, sign_adjust * (rhs - value(c.body))))
- dual_values = list(fix_nlp.dual[c] for c in fix_nlp.MindtPy_utils.constraint_list)
-
- if config.strategy == 'PSC' or config.strategy == 'GBD':
- for var in fix_nlp.component_data_objects(ctype=Var, descend_into=True):
- fix_nlp.ipopt_zL_out[var] = 0
- fix_nlp.ipopt_zU_out[var] = 0
- if var.ub is not None and abs(var.ub - value(var)) < config.bound_tolerance:
- fix_nlp.ipopt_zL_out[var] = 1
- elif var.lb is not None and abs(value(var) - var.lb) < config.bound_tolerance:
- fix_nlp.ipopt_zU_out[var] = -1
-
- elif config.strategy == 'OA':
+ for c in fixed_nlp.component_data_objects(ctype=Constraint):
+ rhs = c.upper if c. has_ub() else c.lower
+ c_geq = -1 if c.has_ub() else 1
+ fixed_nlp.dual[c] = (c_geq
+ * max(0, c_geq * (rhs - value(c.body))))
+ dual_values = list(fixed_nlp.dual[c]
+ for c in fixed_nlp.MindtPy_utils.constraint_list)
+
+ # if config.strategy == 'PSC' or config.strategy == 'GBD':
+ # for var in fixed_nlp.component_data_objects(ctype=Var, descend_into=True):
+ # fixed_nlp.ipopt_zL_out[var] = 0
+ # fixed_nlp.ipopt_zU_out[var] = 0
+ # if var.has_ub() and abs(var.ub - value(var)) < config.bound_tolerance:
+ # fixed_nlp.ipopt_zL_out[var] = 1
+ # elif var.has_lb() and abs(value(var) - var.lb) < config.bound_tolerance:
+ # fixed_nlp.ipopt_zU_out[var] = -1
+
+ if config.strategy == 'OA':
config.logger.info('Solving feasibility problem')
if config.initial_feas:
- # add_feas_slacks(fix_nlp, solve_data)
+ # add_feas_slacks(fixed_nlp, solve_data)
# config.initial_feas = False
feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config)
copy_var_list_values(feas_NLP.MindtPy_utils.variable_list,
@@ -151,21 +172,24 @@ def handle_NLP_subproblem_infeasible(fix_nlp, solve_data, config):
config)
add_oa_cuts(solve_data.mip, dual_values, solve_data, config)
# Add an integer cut to exclude this discrete option
- var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list)
+ var_values = list(v.value for v in fixed_nlp.MindtPy_utils.variable_list)
if config.add_integer_cuts:
- add_int_cut(var_values, solve_data, config) # excludes current discrete option
+ # excludes current discrete option
+ add_int_cut(var_values, solve_data, config)
-def handle_NLP_subproblem_other_termination(fix_nlp, termination_condition,
+def handle_NLP_subproblem_other_termination(fixed_nlp, termination_condition,
solve_data, config):
"""Case that fix-NLP is neither optimal nor infeasible (i.e. max_iterations)"""
if termination_condition is tc.maxIterations:
# TODO try something else? Reinitialize with different initial value?
config.logger.info(
'NLP subproblem failed to converge within iteration limit.')
- var_values = list(v.value for v in fix_nlp.MindtPy_utils.variable_list)
+ var_values = list(
+ v.value for v in fixed_nlp.MindtPy_utils.variable_list)
if config.add_integer_cuts:
- add_int_cut(var_values, solve_data, config) # excludes current discrete option
+ # excludes current discrete option
+ add_int_cut(var_values, solve_data, config)
else:
raise ValueError(
'MindtPy unable to handle NLP subproblem termination '
@@ -177,26 +201,26 @@ def solve_NLP_feas(solve_data, config):
Returns: Result values and dual values
"""
- fix_nlp = solve_data.working_model.clone()
- add_feas_slacks(fix_nlp)
- MindtPy = fix_nlp.MindtPy_utils
- next(fix_nlp.component_data_objects(Objective, active=True)).deactivate()
- for constr in fix_nlp.component_data_objects(
+ fixed_nlp = solve_data.working_model.clone()
+ add_feas_slacks(fixed_nlp)
+ MindtPy = fixed_nlp.MindtPy_utils
+ next(fixed_nlp.component_data_objects(Objective, active=True)).deactivate()
+ for constr in fixed_nlp.component_data_objects(
ctype=Constraint, active=True, descend_into=True):
- if constr.body.polynomial_degree() not in [0,1]:
+ if constr.body.polynomial_degree() not in [0, 1]:
constr.deactivate()
MindtPy.MindtPy_feas.activate()
MindtPy.MindtPy_feas_obj = Objective(
expr=sum(s for s in MindtPy.MindtPy_feas.slack_var[...]),
sense=minimize)
- TransformationFactory('core.fix_discrete').apply_to(fix_nlp)
+ TransformationFactory('core.fix_integer_vars').apply_to(fixed_nlp)
with SuppressInfeasibleWarning():
feas_soln = SolverFactory(config.nlp_solver).solve(
- fix_nlp, **config.nlp_solver_args)
+ fixed_nlp, **config.nlp_solver_args)
subprob_terminate_cond = feas_soln.solver.termination_condition
- if subprob_terminate_cond is tc.optimal:
+ if subprob_terminate_cond is tc.optimal or subprob_terminate_cond is tc.locallyOptimal:
copy_var_list_values(
MindtPy.variable_list,
solve_data.working_model.MindtPy_utils.variable_list,
@@ -212,17 +236,14 @@ def solve_NLP_feas(solve_data, config):
var_values = [v.value for v in MindtPy.variable_list]
duals = [0 for _ in MindtPy.constraint_list]
- for i, constr in enumerate(MindtPy.constraint_list):
- # TODO rhs only works if constr.upper and constr.lower do not both have values.
- # Sometimes you might have 1 <= expr <= 1. This would give an incorrect rhs of 2.
- rhs = ((0 if constr.upper is None else constr.upper)
- + (0 if constr.lower is None else constr.lower))
- sign_adjust = 1 if value(constr.upper) is None else -1
- duals[i] = sign_adjust * max(
- 0, sign_adjust * (rhs - value(constr.body)))
+ for i, c in enumerate(MindtPy.constraint_list):
+ rhs = c.upper if c. has_ub() else c.lower
+ c_geq = -1 if c.has_ub() else 1
+ duals[i] = c_geq * max(
+ 0, c_geq * (rhs - value(c.body)))
if value(MindtPy.MindtPy_feas_obj.expr) == 0:
raise ValueError(
'Problem is not feasible, check NLP solver')
- return fix_nlp, feas_soln
+ return fixed_nlp, feas_soln
diff --git a/pyomo/contrib/mindtpy/single_tree.py b/pyomo/contrib/mindtpy/single_tree.py
new file mode 100644
index 00000000000..6dd0508bd6b
--- /dev/null
+++ b/pyomo/contrib/mindtpy/single_tree.py
@@ -0,0 +1,240 @@
+from __future__ import division
+
+
+from pyomo.core import Constraint, Expression, Objective, minimize, value, Var
+from pyomo.opt import TerminationCondition as tc
+from pyomo.contrib.mindtpy.nlp_solve import (solve_NLP_subproblem,
+ handle_NLP_subproblem_optimal, handle_NLP_subproblem_infeasible,
+ handle_NLP_subproblem_other_termination, solve_NLP_feas)
+from pyomo.contrib.gdpopt.util import copy_var_list_values, identify_variables
+from math import copysign
+from pyomo.environ import *
+from pyomo.core.expr import current as EXPR
+from math import fabs
+from pyomo.repn import generate_standard_repn
+import logging
+from pyomo.common.dependencies import attempt_import
+import cplex
+from cplex.callbacks import LazyConstraintCallback
+
+
+class LazyOACallback_cplex(LazyConstraintCallback):
+ """Inherent class in Cplex to call Lazy callback."""
+
+ def copy_lazy_var_list_values(self, opt, from_list, to_list, config,
+ skip_stale=False, skip_fixed=True,
+ ignore_integrality=False):
+ """Copy variable values from one list to another.
+
+ Rounds to Binary/Integer if neccessary
+ Sets to zero for NonNegativeReals if neccessary
+ """
+ for v_from, v_to in zip(from_list, to_list):
+ if skip_stale and v_from.stale:
+ continue # Skip stale variable values.
+ if skip_fixed and v_to.is_fixed():
+ continue # Skip fixed variables.
+ try:
+ v_val = self.get_values(
+ opt._pyomo_var_to_solver_var_map[v_from])
+ v_to.set_value(v_val)
+ if skip_stale:
+ v_to.stale = False
+ except ValueError:
+ # Snap the value to the bounds
+ if v_to.has_lb() and v_val < v_to.lb and v_to.lb - v_val <= config.zero_tolerance:
+ v_to.set_value(v_to.lb)
+ elif v_to.has_ub() and v_val > v_to.ub and v_val - v_to.ub <= config.zero_tolerance:
+ v_to.set_value(v_to.ub)
+ # ... or the nearest integer
+ elif v_to.is_integer():
+ rounded_val = int(round(v_val))
+ if (ignore_integrality or fabs(v_val - rounded_val) <= config.integer_tolerance) \
+ and rounded_val in v_to.domain:
+ v_to.set_value(rounded_val)
+ else:
+ raise
+
+ def add_lazy_oa_cuts(self, target_model, dual_values, solve_data, config, opt,
+ linearize_active=True,
+ linearize_violated=True,
+ linearize_inactive=False):
+ """Add oa_cuts through Cplex inherent function self.add()"""
+
+ for (constr, dual_value) in zip(target_model.MindtPy_utils.constraint_list,
+ dual_values):
+ if constr.body.polynomial_degree() in (0, 1):
+ continue
+
+ constr_vars = list(identify_variables(constr.body))
+ jacs = solve_data.jacobians
+
+ # Equality constraint (makes the problem nonconvex)
+ if constr.has_ub() and constr.has_lb() and constr.upper == constr.lower:
+ sign_adjust = -1 if solve_data.objective_sense == minimize else 1
+ rhs = ((0 if constr.upper is None else constr.upper)
+ + (0 if constr.lower is None else constr.lower))
+ rhs = constr.lower if constr.has_lb() and constr.has_ub() else rhs
+
+ # since the cplex requires the lazy cuts in cplex type, we need to transform the pyomo expression into cplex expression
+ pyomo_expr = copysign(1, sign_adjust * dual_value) * (sum(value(jacs[constr][var]) * (
+ var - value(var)) for var in list(EXPR.identify_variables(constr.body))) + value(constr.body) - rhs)
+ cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr)
+ cplex_rhs = -generate_standard_repn(pyomo_expr).constant
+ self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
+ sense="L",
+ rhs=cplex_rhs)
+ else: # Inequality constraint (possibly two-sided)
+ if constr.has_ub() \
+ and (linearize_active and abs(constr.uslack()) < config.zero_tolerance) \
+ or (linearize_violated and constr.uslack() < 0) \
+ or (linearize_inactive and constr.uslack() > 0):
+
+ pyomo_expr = sum(
+ value(jacs[constr][var])*(var - var.value) for var in constr_vars) + value(constr.body)
+ cplex_rhs = -generate_standard_repn(pyomo_expr).constant
+ cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr)
+ self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
+ sense="L",
+ rhs=constr.upper.value+cplex_rhs)
+ if constr.has_lb() \
+ and (linearize_active and abs(constr.lslack()) < config.zero_tolerance) \
+ or (linearize_violated and constr.lslack() < 0) \
+ or (linearize_inactive and constr.lslack() > 0):
+ pyomo_expr = sum(value(jacs[constr][var]) * (var - self.get_values(
+ opt._pyomo_var_to_solver_var_map[var])) for var in constr_vars) + value(constr.body)
+ cplex_rhs = -generate_standard_repn(pyomo_expr).constant
+ cplex_expr, _ = opt._get_expr_from_pyomo_expr(pyomo_expr)
+ self.add(constraint=cplex.SparsePair(ind=cplex_expr.variables, val=cplex_expr.coefficients),
+ sense="G",
+ rhs=constr.lower.value + cplex_rhs)
+
+ def handle_lazy_master_mip_feasible_sol(self, master_mip, solve_data, config, opt):
+ """ This function is called during the branch and bound of master mip, more exactly when a feasible solution is found and LazyCallback is activated.
+ Copy the result to working model and update upper or lower bound
+ In LP-NLP, upper or lower bound are updated during solving the master problem
+ """
+ # proceed. Just need integer values
+ MindtPy = master_mip.MindtPy_utils
+ main_objective = next(
+ master_mip.component_data_objects(Objective, active=True))
+
+ # this value copy is useful since we need to fix subproblem based on the solution of the master problem
+ self.copy_lazy_var_list_values(opt,
+ master_mip.MindtPy_utils.variable_list,
+ solve_data.working_model.MindtPy_utils.variable_list,
+ config)
+ config.logger.info(
+ 'MIP %s: OBJ: %s LB: %s UB: %s'
+ % (solve_data.mip_iter, value(MindtPy.MindtPy_oa_obj.expr),
+ solve_data.LB, solve_data.UB))
+
+ def handle_lazy_NLP_subproblem_optimal(self, fixed_nlp, solve_data, config, opt):
+ """Copies result to mip(explaination see below), updates bound, adds OA and integer cut,
+ stores best solution if new one is best"""
+ for c in fixed_nlp.tmp_duals:
+ if fixed_nlp.dual.get(c, None) is None:
+ fixed_nlp.dual[c] = fixed_nlp.tmp_duals[c]
+ dual_values = list(fixed_nlp.dual[c]
+ for c in fixed_nlp.MindtPy_utils.constraint_list)
+
+ main_objective = next(
+ fixed_nlp.component_data_objects(Objective, active=True))
+ if main_objective.sense == minimize:
+ solve_data.UB = min(value(main_objective.expr), solve_data.UB)
+ solve_data.solution_improved = solve_data.UB < solve_data.UB_progress[-1]
+ solve_data.UB_progress.append(solve_data.UB)
+ else:
+ solve_data.LB = max(value(main_objective.expr), solve_data.LB)
+ solve_data.solution_improved = solve_data.LB > solve_data.LB_progress[-1]
+ solve_data.LB_progress.append(solve_data.LB)
+
+ config.logger.info(
+ 'NLP {}: OBJ: {} LB: {} UB: {}'
+ .format(solve_data.nlp_iter,
+ value(main_objective.expr),
+ solve_data.LB, solve_data.UB))
+
+ if solve_data.solution_improved:
+ solve_data.best_solution_found = fixed_nlp.clone()
+
+ if config.strategy == 'OA':
+ # In OA algorithm, OA cuts are generated based on the solution of the subproblem
+ # We need to first copy the value of variables from the subproblem and then add cuts
+ # since value(constr.body), value(jacs[constr][var]), value(var) are used in self.add_lazy_oa_cuts()
+ copy_var_list_values(fixed_nlp.MindtPy_utils.variable_list,
+ solve_data.mip.MindtPy_utils.variable_list,
+ config)
+ self.add_lazy_oa_cuts(
+ solve_data.mip, dual_values, solve_data, config, opt)
+
+ def handle_lazy_NLP_subproblem_infeasible(self, fixed_nlp, solve_data, config, opt):
+ """Solve feasibility problem, add cut according to strategy.
+
+ The solution of the feasibility problem is copied to the working model.
+ """
+ # TODO try something else? Reinitialize with different initial
+ # value?
+ config.logger.info('NLP subproblem was locally infeasible.')
+ for c in fixed_nlp.component_data_objects(ctype=Constraint):
+ rhs = ((0 if c.upper is None else c.upper)
+ + (0 if c.lower is None else c.lower))
+ sign_adjust = 1 if value(c.upper) is None else -1
+ fixed_nlp.dual[c] = (sign_adjust
+ * max(0, sign_adjust * (rhs - value(c.body))))
+ dual_values = list(fixed_nlp.dual[c]
+ for c in fixed_nlp.MindtPy_utils.constraint_list)
+
+ if config.strategy == 'OA':
+ config.logger.info('Solving feasibility problem')
+ if config.initial_feas:
+ # config.initial_feas = False
+ feas_NLP, feas_NLP_results = solve_NLP_feas(solve_data, config)
+ # In OA algorithm, OA cuts are generated based on the solution of the subproblem
+ # We need to first copy the value of variables from the subproblem and then add cuts
+ copy_var_list_values(feas_NLP.MindtPy_utils.variable_list,
+ solve_data.mip.MindtPy_utils.variable_list,
+ config)
+ self.add_lazy_oa_cuts(
+ solve_data.mip, dual_values, solve_data, config, opt)
+
+ def handle_lazy_NLP_subproblem_other_termination(self, fixed_nlp, termination_condition,
+ solve_data, config):
+ """Case that fix-NLP is neither optimal nor infeasible (i.e. max_iterations)"""
+ if termination_condition is tc.maxIterations:
+ # TODO try something else? Reinitialize with different initial value?
+ config.logger.info(
+ 'NLP subproblem failed to converge within iteration limit.')
+ var_values = list(
+ v.value for v in fixed_nlp.MindtPy_utils.variable_list)
+ else:
+ raise ValueError(
+ 'MindtPy unable to handle NLP subproblem termination '
+ 'condition of {}'.format(termination_condition))
+
+ def __call__(self):
+ solve_data = self.solve_data
+ config = self.config
+ opt = self.opt
+ master_mip = self.master_mip
+ cpx = opt._solver_model # Cplex model
+
+ self.handle_lazy_master_mip_feasible_sol(
+ master_mip, solve_data, config, opt)
+
+ # solve subproblem
+ # Solve NLP subproblem
+ # The constraint linearization happens in the handlers
+ fixed_nlp, fixed_nlp_result = solve_NLP_subproblem(solve_data, config)
+
+ # add oa cuts
+ if fixed_nlp_result.solver.termination_condition is tc.optimal or fixed_nlp_result.solver.termination_condition is tc.locallyOptimal:
+ self.handle_lazy_NLP_subproblem_optimal(
+ fixed_nlp, solve_data, config, opt)
+ elif fixed_nlp_result.solver.termination_condition is tc.infeasible:
+ self.handle_lazy_NLP_subproblem_infeasible(
+ fixed_nlp, solve_data, config, opt)
+ else:
+ self.handle_lazy_NLP_subproblem_other_termination(fixed_nlp, fixed_nlp_result.solver.termination_condition,
+ solve_data, config)
+
diff --git a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py
index 454a035c051..b91a1a264ce 100644
--- a/pyomo/contrib/mindtpy/tests/MINLP2_simple.py
+++ b/pyomo/contrib/mindtpy/tests/MINLP2_simple.py
@@ -54,7 +54,7 @@ def __init__(self, *args, **kwargs):
# DISCRETE VARIABLES
Y = m.Y = Var(J, domain=Binary, initialize=initY)
# CONTINUOUS VARIABLES
- X = m.X = Var(I, domain=NonNegativeReals, initialize=initX)
+ X = m.X = Var(I, domain=NonNegativeReals, initialize=initX, bounds=(0, 2))
"""Constraint definitions"""
# CONSTRAINTS
diff --git a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py
index f335ca7614d..5d0151e2926 100644
--- a/pyomo/contrib/mindtpy/tests/MINLP3_simple.py
+++ b/pyomo/contrib/mindtpy/tests/MINLP3_simple.py
@@ -47,7 +47,7 @@ def __init__(self, *args, **kwargs):
# DISCRETE VARIABLES
Y = m.Y = Var(J, domain=Binary, initialize=initY)
# CONTINUOUS VARIABLES
- X = m.X = Var(I, domain=Reals, initialize=initX)
+ X = m.X = Var(I, domain=Reals, initialize=initX, bounds=(-1, 50))
"""Constraint definitions"""
# CONSTRAINTS
diff --git a/pyomo/contrib/mindtpy/tests/alan.py b/pyomo/contrib/mindtpy/tests/alan.py
deleted file mode 100644
index 7348e535362..00000000000
--- a/pyomo/contrib/mindtpy/tests/alan.py
+++ /dev/null
@@ -1,51 +0,0 @@
-# MINLP written by GAMS Convert from alan.gms instance in MINLPLib (http://www.minlplib.org/alan.html)
-# Original problem appearing in Manne, Alan S, GAMS/MINOS: Three examples, Tech. Rep.,
-# Department of Operations Research, Stanford University, 1986.
-#
-# Equation counts
-# Total E G L N X C B
-# 8 3 0 5 0 0 0 0
-#
-# Variable counts
-# x b i s1s s2s sc si
-# Total cont binary integer sos1 sos2 scont sint
-# 9 5 4 0 0 0 0 0
-# FX 0 0 0 0 0 0 0 0
-#
-# Nonzero counts
-# Total const NL DLL
-# 24 21 3 0
-#
-# Reformulation has removed 1 variable and 1 equation
-
-
-from pyomo.environ import *
-
-model = m = ConcreteModel()
-
-m.x1 = Var(within=Reals, bounds=(0, None), initialize=0.302884615384618)
-m.x2 = Var(within=Reals, bounds=(0, None), initialize=0.0865384615384593)
-m.x3 = Var(within=Reals, bounds=(0, None), initialize=0.504807692307693)
-m.x4 = Var(within=Reals, bounds=(0, None), initialize=0.10576923076923)
-m.b6 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b7 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b8 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b9 = Var(within=Binary, bounds=(0, 1), initialize=0)
-
-m.obj = Objective(
- expr=m.x1 * (4 * m.x1 + 3 * m.x2 - m.x3) + m.x2 * (3 * m.x1 + 6 * m.x2 + m.x3) + m.x3 * (m.x2 - m.x1 + 10 * m.x3)
- , sense=minimize)
-
-m.c1 = Constraint(expr=m.x1 + m.x2 + m.x3 + m.x4 == 1)
-
-m.c2 = Constraint(expr=8 * m.x1 + 9 * m.x2 + 12 * m.x3 + 7 * m.x4 == 10)
-
-m.c4 = Constraint(expr=m.x1 - m.b6 <= 0)
-
-m.c5 = Constraint(expr=m.x2 - m.b7 <= 0)
-
-m.c6 = Constraint(expr=m.x3 - m.b8 <= 0)
-
-m.c7 = Constraint(expr=m.x4 - m.b9 <= 0)
-
-m.c8 = Constraint(expr=m.b6 + m.b7 + m.b8 + m.b9 <= 3)
diff --git a/pyomo/contrib/mindtpy/tests/batchdes.py b/pyomo/contrib/mindtpy/tests/batchdes.py
deleted file mode 100644
index 49b270cf15c..00000000000
--- a/pyomo/contrib/mindtpy/tests/batchdes.py
+++ /dev/null
@@ -1,85 +0,0 @@
-# MINLP written by GAMS Convert from batchdes.gms instance in MINLPLib (http://www.minlplib.org/batchdes.html)
-# Original problem appearing in Kocis, Gary R and Grossmann, I E, Global Optimization of Nonconvex MINLP
-# Problems in Process Synthesis, Industrial and Engineering Chemistry Research, 27:8, 1988, 1407-1421.
-#
-# Equation counts
-# Total E G L N X C B
-# 20 7 12 1 0 0 0 0
-#
-# Variable counts
-# x b i s1s s2s sc si
-# Total cont binary integer sos1 sos2 scont sint
-# 20 11 9 0 0 0 0 0
-# FX 0 0 0 0 0 0 0 0
-#
-# Nonzero counts
-# Total const NL DLL
-# 53 43 10 0
-#
-# Reformulation has removed 1 variable and 1 equation
-
-
-from pyomo.environ import *
-
-model = m = ConcreteModel()
-
-m.b1 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b2 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b3 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b4 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b5 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b6 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b7 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b8 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b9 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.x10 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=6.70502272492805)
-m.x11 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=7.11048783303622)
-m.x12 = Var(within=Reals, bounds=(5.52146091786225, 7.82404601085629), initialize=7.30700912709102)
-m.x13 = Var(within=Reals, bounds=(5.40367788220586, 6.4377516497364), initialize=5.92071476597113)
-m.x14 = Var(within=Reals, bounds=(4.60517018598809, 6.03228654162824), initialize=5.31872836380816)
-m.x15 = Var(within=Reals, bounds=(1.89711998488588, 2.99573227355399), initialize=1.89711998488588)
-m.x16 = Var(within=Reals, bounds=(1.38629436111989, 2.484906649788), initialize=1.38629436111989)
-m.x17 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0)
-m.x18 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0)
-m.x19 = Var(within=Reals, bounds=(0, 1.09861228866811), initialize=0)
-
-m.obj = Objective(expr=250 * exp(0.6 * m.x10 + m.x17) + 500 * exp(0.6 * m.x11 + m.x18) + 340 * exp(0.6 * m.x12 + m.x19)
- , sense=minimize)
-
-m.c1 = Constraint(expr=m.x10 - m.x13 >= 0.693147180559945)
-
-m.c2 = Constraint(expr=m.x11 - m.x13 >= 1.09861228866811)
-
-m.c3 = Constraint(expr=m.x12 - m.x13 >= 1.38629436111989)
-
-m.c4 = Constraint(expr=m.x10 - m.x14 >= 1.38629436111989)
-
-m.c5 = Constraint(expr=m.x11 - m.x14 >= 1.79175946922805)
-
-m.c6 = Constraint(expr=m.x12 - m.x14 >= 1.09861228866811)
-
-m.c7 = Constraint(expr=m.x15 + m.x17 >= 2.07944154167984)
-
-m.c8 = Constraint(expr=m.x15 + m.x18 >= 2.99573227355399)
-
-m.c9 = Constraint(expr=m.x15 + m.x19 >= 1.38629436111989)
-
-m.c10 = Constraint(expr=m.x16 + m.x17 >= 2.30258509299405)
-
-m.c11 = Constraint(expr=m.x16 + m.x18 >= 2.484906649788)
-
-m.c12 = Constraint(expr=m.x16 + m.x19 >= 1.09861228866811)
-
-m.c13 = Constraint(expr=200000 * exp(m.x15 - m.x13) + 150000 * exp(m.x16 - m.x14) <= 6000)
-
-m.c14 = Constraint(expr=- 0.693147180559945 * m.b4 - 1.09861228866811 * m.b7 + m.x17 == 0)
-
-m.c15 = Constraint(expr=- 0.693147180559945 * m.b5 - 1.09861228866811 * m.b8 + m.x18 == 0)
-
-m.c16 = Constraint(expr=- 0.693147180559945 * m.b6 - 1.09861228866811 * m.b9 + m.x19 == 0)
-
-m.c17 = Constraint(expr=m.b1 + m.b4 + m.b7 == 1)
-
-m.c18 = Constraint(expr=m.b2 + m.b5 + m.b8 == 1)
-
-m.c19 = Constraint(expr=m.b3 + m.b6 + m.b9 == 1)
diff --git a/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py
new file mode 100644
index 00000000000..3b14090b6ce
--- /dev/null
+++ b/pyomo/contrib/mindtpy/tests/constraint_qualification_example.py
@@ -0,0 +1,30 @@
+""" Example of constraint qualification.
+
+The expected optimal solution value is 3.
+
+ Problem type: convex MINLP
+ size: 1 binary variable
+ 1 continuous variables
+ 2 constraints
+
+"""
+from __future__ import division
+
+from six import iteritems
+
+from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals,
+ Objective, Param, RangeSet, Var, exp, minimize, log)
+
+
+class ConstraintQualificationExample(ConcreteModel):
+
+ def __init__(self, *args, **kwargs):
+ """Create the problem."""
+ kwargs.setdefault('name', 'ConstraintQualificationExample')
+ super(ConstraintQualificationExample, self).__init__(*args, **kwargs)
+ model = self
+ model.x = Var(bounds=(1.0, 10.0), initialize=5.0)
+ model.y = Var(within=Binary)
+ model.c1 = Constraint(expr=(model.x-3.0)**2 <= 50.0*(1-model.y))
+ model.c2 = Constraint(expr=model.x*log(model.x)+5.0 <= 50.0*(model.y))
+ model.objective = Objective(expr=model.x, sense=minimize)
diff --git a/pyomo/contrib/mindtpy/tests/example_PSE.py b/pyomo/contrib/mindtpy/tests/example_PSE.py
deleted file mode 100644
index c5ca498e0e1..00000000000
--- a/pyomo/contrib/mindtpy/tests/example_PSE.py
+++ /dev/null
@@ -1,13 +0,0 @@
-from pyomo.environ import SolverFactory
-import time
-from pyomo.contrib.mindtpy.tests.flay03m import *
-# from pyomo.contrib.mindtpy.tests.eight_process_problem import EightProcessFlowsheet
-# model = EightProcessFlowsheet()
-# with SolverFactory('mindtpy') as opt:
-with SolverFactory('mindtpy') as opt:
- print('\n Solving problem with Outer Approximation')
- start = time.time()
- # opt.solve(model, strategy='OA', init_strategy = 'rNLP')
- opt.solve(model)
-# model.pprint()
- print(time.time()-start)
\ No newline at end of file
diff --git a/pyomo/contrib/mindtpy/tests/flay03m.py b/pyomo/contrib/mindtpy/tests/flay03m.py
deleted file mode 100644
index 5a4e201f7a0..00000000000
--- a/pyomo/contrib/mindtpy/tests/flay03m.py
+++ /dev/null
@@ -1,101 +0,0 @@
-# MINLP written by GAMS Convert from flay03m.gms instance in MINLPLib (http://www.minlplib.org/flay03m.html)
-# Original problem appearing in Sawaya, Nicolas W, Reformulations, relaxations and cutting planes
-# for generalized disjunctive programming, PhD thesis, Carnegie Mellon University, 2006.
-#
-# Equation counts
-# Total E G L N X C B
-# 25 4 6 15 0 0 0 0
-#
-# Variable counts
-# x b i s1s s2s sc si
-# Total cont binary integer sos1 sos2 scont sint
-# 27 15 12 0 0 0 0 0
-# FX 0 0 0 0 0 0 0 0
-#
-# Nonzero counts
-# Total const NL DLL
-# 87 84 3 0
-#
-# Reformulation has removed 1 variable and 1 equation
-
-
-from pyomo.environ import *
-
-model = m = ConcreteModel()
-
-m.x1 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x2 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x3 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x4 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x5 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x6 = Var(within=Reals, bounds=(0, 29), initialize=0)
-m.x7 = Var(within=Reals, bounds=(1, 40), initialize=1)
-m.x8 = Var(within=Reals, bounds=(1, 50), initialize=1)
-m.x9 = Var(within=Reals, bounds=(1, 60), initialize=1)
-m.x10 = Var(within=Reals, bounds=(1, 40), initialize=1)
-m.x11 = Var(within=Reals, bounds=(1, 50), initialize=1)
-m.x12 = Var(within=Reals, bounds=(1, 60), initialize=1)
-m.x13 = Var(within=Reals, bounds=(0, 30), initialize=0)
-m.x14 = Var(within=Reals, bounds=(0, 30), initialize=0)
-m.b15 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b16 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b17 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b18 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b19 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b20 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b21 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b22 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b23 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b24 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b25 = Var(within=Binary, bounds=(0, 1), initialize=0)
-m.b26 = Var(within=Binary, bounds=(0, 1), initialize=0)
-
-m.obj = Objective(expr=2 * m.x13 + 2 * m.x14, sense=minimize)
-
-m.c2 = Constraint(expr=- m.x1 - m.x7 + m.x13 >= 0)
-
-m.c3 = Constraint(expr=- m.x2 - m.x8 + m.x13 >= 0)
-
-m.c4 = Constraint(expr=- m.x3 - m.x9 + m.x13 >= 0)
-
-m.c5 = Constraint(expr=- m.x4 - m.x10 + m.x14 >= 0)
-
-m.c6 = Constraint(expr=- m.x5 - m.x11 + m.x14 >= 0)
-
-m.c7 = Constraint(expr=- m.x6 - m.x12 + m.x14 >= 0)
-
-m.c8 = Constraint(expr=40 / m.x10 - m.x7 <= 0)
-
-m.c9 = Constraint(expr=50 / m.x11 - m.x8 <= 0)
-
-m.c10 = Constraint(expr=60 / m.x12 - m.x9 <= 0)
-
-m.c11 = Constraint(expr=m.x1 - m.x2 + m.x7 + 69 * m.b15 <= 69)
-
-m.c12 = Constraint(expr=m.x1 - m.x3 + m.x7 + 69 * m.b16 <= 69)
-
-m.c13 = Constraint(expr=m.x2 - m.x3 + m.x8 + 79 * m.b17 <= 79)
-
-m.c14 = Constraint(expr=- m.x1 + m.x2 + m.x8 + 79 * m.b18 <= 79)
-
-m.c15 = Constraint(expr=- m.x1 + m.x3 + m.x9 + 89 * m.b19 <= 89)
-
-m.c16 = Constraint(expr=- m.x2 + m.x3 + m.x9 + 89 * m.b20 <= 89)
-
-m.c17 = Constraint(expr=m.x4 - m.x5 + m.x10 + 69 * m.b21 <= 69)
-
-m.c18 = Constraint(expr=m.x4 - m.x6 + m.x10 + 69 * m.b22 <= 69)
-
-m.c19 = Constraint(expr=m.x5 - m.x6 + m.x11 + 79 * m.b23 <= 79)
-
-m.c20 = Constraint(expr=- m.x4 + m.x5 + m.x11 + 79 * m.b24 <= 79)
-
-m.c21 = Constraint(expr=- m.x4 + m.x6 + m.x12 + 89 * m.b25 <= 89)
-
-m.c22 = Constraint(expr=- m.x5 + m.x6 + m.x12 + 89 * m.b26 <= 89)
-
-m.c23 = Constraint(expr=m.b15 + m.b18 + m.b21 + m.b24 == 1)
-
-m.c24 = Constraint(expr=m.b16 + m.b19 + m.b22 + m.b25 == 1)
-
-m.c25 = Constraint(expr=m.b17 + m.b20 + m.b23 + m.b26 == 1)
diff --git a/pyomo/contrib/mindtpy/tests/from_proposal.py b/pyomo/contrib/mindtpy/tests/from_proposal.py
index 797915f620e..517a5cdf49e 100644
--- a/pyomo/contrib/mindtpy/tests/from_proposal.py
+++ b/pyomo/contrib/mindtpy/tests/from_proposal.py
@@ -22,4 +22,4 @@ def __init__(self, *args, **kwargs):
m.c3 = Constraint(expr=m.y - 10*sqrt(m.x+0.1) <= 0)
m.c4 = Constraint(expr=-m.x-m.y <= -5)
- m.obj = Objective(expr=m.x - m.y / 4.5 +2, sense=minimize)
+ m.obj = Objective(expr=m.x - m.y / 4.5 + 2, sense=minimize)
diff --git a/pyomo/contrib/mindtpy/tests/online_doc_example.py b/pyomo/contrib/mindtpy/tests/online_doc_example.py
new file mode 100644
index 00000000000..a7199eadffa
--- /dev/null
+++ b/pyomo/contrib/mindtpy/tests/online_doc_example.py
@@ -0,0 +1,31 @@
+""" Example in the online doc.
+
+The expected optimal solution value is 2.438447187191098.
+
+ Problem type: convex MINLP
+ size: 1 binary variable
+ 1 continuous variables
+ 2 constraints
+
+"""
+from __future__ import division
+
+from six import iteritems
+
+from pyomo.environ import (Binary, ConcreteModel, Constraint, Reals,
+ Objective, Param, RangeSet, Var, exp, minimize, log)
+
+
+class OnlineDocExample(ConcreteModel):
+
+ def __init__(self, *args, **kwargs):
+ """Create the problem."""
+ kwargs.setdefault('name', 'OnlineDocExample')
+ super(OnlineDocExample, self).__init__(*args, **kwargs)
+ model = self
+ model.x = Var(bounds=(1.0, 10.0), initialize=5.0)
+ model.y = Var(within=Binary)
+ model.c1 = Constraint(expr=(model.x-4.0)**2 -
+ model.x <= 50.0*(1-model.y))
+ model.c2 = Constraint(expr=model.x*log(model.x) + 5 <= 50.0*(model.y))
+ model.objective = Objective(expr=model.x, sense=minimize)
diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy.py b/pyomo/contrib/mindtpy/tests/test_mindtpy.py
index a28482c1765..9479039a59d 100644
--- a/pyomo/contrib/mindtpy/tests/test_mindtpy.py
+++ b/pyomo/contrib/mindtpy/tests/test_mindtpy.py
@@ -1,6 +1,5 @@
"""Tests for the MINDT solver plugin."""
from math import fabs
-
import pyomo.core.base.symbolic
import pyutilib.th as unittest
from pyomo.contrib.mindtpy.tests.eight_process_problem import \
@@ -9,9 +8,17 @@
from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2
from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3
from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel
+from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample
+from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample
from pyomo.environ import SolverFactory, value
+from pyomo.environ import *
+from pyomo.solvers.tests.models.LP_unbounded import LP_unbounded
+from pyomo.solvers.tests.models.QCP_simple import QCP_simple
+from pyomo.solvers.tests.models.MIQCP_simple import MIQCP_simple
+from pyomo.opt import TerminationCondition
required_solvers = ('ipopt', 'glpk')
+# required_solvers = ('gams', 'gams')
if all(SolverFactory(s).available() for s in required_solvers):
subsolvers_available = True
else:
@@ -30,29 +37,30 @@ def test_OA_8PP(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = EightProcessFlowsheet()
- print('\n Solving problem with Outer Approximation')
- opt.solve(model, strategy='OA',
- init_strategy='rNLP',
- mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0])
+ print('\n Solving 8PP problem with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='rNLP',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ bound_tolerance=1E-5)
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(fabs(value(model.cost.expr) - 68) <= 1E-2)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 68, places=1)
def test_OA_8PP_init_max_binary(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = EightProcessFlowsheet()
- print('\n Solving problem with Outer Approximation')
- opt.solve(model, strategy='OA',
- init_strategy='max_binary',
- mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0])
+ print('\n Solving 8PP problem with Outer Approximation(max_binary)')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='max_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0])
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(fabs(value(model.cost.expr) - 68) <= 1E-2)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 68, places=1)
# def test_PSC(self):
# """Test the partial surrogate cuts decomposition algorithm."""
@@ -97,75 +105,214 @@ def test_OA_MINLP_simple(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = SimpleMINLP()
- print('\n Solving problem with Outer Approximation')
- opt.solve(model, strategy='OA', init_strategy='initial_binary',
- mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0],
- obj_bound=10)
-
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(abs(value(model.cost.expr) - 3.5) <= 1E-2)
+ print('\n Solving MINLP_simple problem with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2)
def test_OA_MINLP2_simple(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = SimpleMINLP2()
- print('\n Solving problem with Outer Approximation')
- opt.solve(model, strategy='OA', init_strategy='initial_binary',
- mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0],
- obj_bound=10)
-
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(abs(value(model.cost.expr) - 6.00976) <= 1E-2)
+ print('\n Solving MINLP2_simple problem with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 6.00976, places=2)
def test_OA_MINLP3_simple(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = SimpleMINLP3()
- print('\n Solving problem with Outer Approximation')
- opt.solve(model, strategy='OA', init_strategy='initial_binary',
- mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0],
- obj_bound=10)
-
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(abs(value(model.cost.expr) - (-5.512)) <= 1E-2)
+ print('\n Solving MINLP3_simple problem with Outer Approximation')
+ results = opt.solve(model, strategy='OA', init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), -5.512, places=2)
def test_OA_Proposal(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = ProposalModel()
- print('\n Solving problem with Outer Approximation')
+ print('\n Solving Proposal problem with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0])
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2)
+
+ def test_OA_Proposal_with_int_cuts(self):
+ """Test the outer approximation decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = ProposalModel()
+ print('\n Solving Proposal problem with Outer Approximation(integer cuts)')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ add_integer_cuts=True,
+ integer_to_binary=True # if we use lazy callback, we cannot set integer_to_binary True
+ )
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2)
+
+ def test_OA_ConstraintQualificationExample(self):
+ with SolverFactory('mindtpy') as opt:
+ model = ConstraintQualificationExample()
+ print('\n Solving Constraint Qualification Example with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0]
+ )
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.objective.expr), 3, places=2)
+
+ def test_OA_ConstraintQualificationExample_integer_cut(self):
+ with SolverFactory('mindtpy') as opt:
+ model = ConstraintQualificationExample()
+ print(
+ '\n Solving Constraint Qualification Example with Outer Approximation(integer cut)')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ add_integer_cuts=True
+ )
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.feasible)
+ self.assertAlmostEqual(value(model.objective.expr), 3, places=2)
+
+ def test_OA_OnlineDocExample(self):
+ with SolverFactory('mindtpy') as opt:
+ model = OnlineDocExample()
+ print('\n Solving Online Doc Example with Outer Approximation')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0]
+ )
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(
+ value(model.objective.expr), 2.438447, places=2)
+
+ # the following tests are used to improve code coverage
+
+ def test_iteration_limit(self):
+ with SolverFactory('mindtpy') as opt:
+ model = ConstraintQualificationExample()
+ print('\n test iteration_limit to improve code coverage')
opt.solve(model, strategy='OA',
+ iteration_limit=1,
mip_solver=required_solvers[1],
- nlp_solver=required_solvers[0])
+ nlp_solver=required_solvers[0]
+ )
+ # self.assertAlmostEqual(value(model.objective.expr), 3, places=2)
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertTrue(abs(value(model.obj.expr) - 0.66555) <= 1E-2)
+ def test_time_limit(self):
+ with SolverFactory('mindtpy') as opt:
+ model = ConstraintQualificationExample()
+ print('\n test time_limit to improve code coverage')
+ opt.solve(model, strategy='OA',
+ time_limit=1,
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0]
+ )
+ def test_LP_case(self):
+ with SolverFactory('mindtpy') as opt:
+ m_class = LP_unbounded()
+ m_class._generate_model()
+ model = m_class.model
+ print('\n Solving LP case with Outer Approximation')
+ opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ )
- def test_OA_Proposal_with_int_cuts(self):
+ def test_QCP_case(self):
+ with SolverFactory('mindtpy') as opt:
+ m_class = QCP_simple()
+ m_class._generate_model()
+ model = m_class.model
+ print('\n Solving QCP case with Outer Approximation')
+ opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ )
+
+ def test_maximize_obj(self):
"""Test the outer approximation decomposition algorithm."""
with SolverFactory('mindtpy') as opt:
model = ProposalModel()
- print('\n Solving problem with Outer Approximation')
+ model.obj.sense = maximize
+ print('\n test maximize case to improve code coverage')
opt.solve(model, strategy='OA',
mip_solver=required_solvers[1],
nlp_solver=required_solvers[0],
- add_integer_cuts=True,
- integer_to_binary=True)
+ # mip_solver_args={'timelimit': 0.9}
+ )
+ self.assertAlmostEqual(value(model.obj.expr), 14.83, places=1)
- # self.assertIs(results.solver.termination_condition,
- # TerminationCondition.optimal)
- self.assertAlmostEquals(value(model.obj.expr), 0.66555, places=2)
+ def test_rNLP_add_slack(self):
+ """Test the outer approximation decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = EightProcessFlowsheet()
+ print(
+ '\n Test rNLP initialize strategy and add_slack to improve code coverage')
+ opt.solve(model, strategy='OA',
+ init_strategy='rNLP',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ bound_tolerance=1E-5,
+ add_slack=True)
+ self.assertAlmostEqual(value(model.cost.expr), 68, places=1)
+
+ def test_initial_binary_add_slack(self):
+ """Test the outer approximation decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = SimpleMINLP()
+ print(
+ '\n Test initial_binary initialize strategy and add_slack to improve code coverage')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10,
+ add_slack=True)
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2)
+
+ # def test_OA_OnlineDocExample4(self):
+ # with SolverFactory('mindtpy') as opt:
+ # m = ConcreteModel()
+ # m.x = Var(within=Binary)
+ # m.y = Var(within=Reals)
+ # m.o = Objective(expr=m.x*m.y)
+ # print('\n Solving problem with Outer Approximation')
+ # opt.solve(m, strategy='OA',
+ # mip_solver=required_solvers[1],
+ # nlp_solver=required_solvers[0],
+ # )
# def test_PSC(self):
# """Test the partial surrogate cuts decomposition algorithm."""
diff --git a/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py
new file mode 100644
index 00000000000..6cf764b0b37
--- /dev/null
+++ b/pyomo/contrib/mindtpy/tests/test_mindtpy_lp_nlp.py
@@ -0,0 +1,171 @@
+"""Tests for the MINDT solver plugin."""
+from math import fabs
+import pyomo.core.base.symbolic
+import pyutilib.th as unittest
+from pyomo.contrib.mindtpy.tests.eight_process_problem import \
+ EightProcessFlowsheet
+from pyomo.contrib.mindtpy.tests.MINLP_simple import SimpleMINLP as SimpleMINLP
+from pyomo.contrib.mindtpy.tests.MINLP2_simple import SimpleMINLP as SimpleMINLP2
+from pyomo.contrib.mindtpy.tests.MINLP3_simple import SimpleMINLP as SimpleMINLP3
+from pyomo.contrib.mindtpy.tests.from_proposal import ProposalModel
+from pyomo.contrib.mindtpy.tests.constraint_qualification_example import ConstraintQualificationExample
+from pyomo.contrib.mindtpy.tests.online_doc_example import OnlineDocExample
+from pyomo.environ import SolverFactory, value
+from pyomo.opt import TerminationCondition
+
+required_solvers = ('ipopt', 'cplex_persistent')
+if all(SolverFactory(s).available(False) for s in required_solvers):
+ subsolvers_available = True
+else:
+ subsolvers_available = False
+
+
+@unittest.skipIf(not subsolvers_available,
+ "Required subsolvers %s are not available"
+ % (required_solvers,))
+@unittest.skipIf(not pyomo.core.base.symbolic.differentiate_available,
+ "Symbolic differentiation is not available")
+class TestMindtPy(unittest.TestCase):
+ """Tests for the MindtPy solver plugin."""
+
+ # lazy callback tests
+
+ def test_lazy_OA_8PP(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = EightProcessFlowsheet()
+ print('\n Solving 8PP problem with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='rNLP',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ bound_tolerance=1E-5,
+ single_tree=True)
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 68, places=1)
+
+ def test_lazy_OA_8PP_init_max_binary(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = EightProcessFlowsheet()
+ print('\n Solving 8PP_init_max_binary problem with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='max_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ single_tree=True)
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 68, places=1)
+
+ def test_lazy_OA_MINLP_simple(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = SimpleMINLP()
+ print('\n Solving MINLP_simple problem with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10,
+ single_tree=True)
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 3.5, places=2)
+
+ def test_lazy_OA_MINLP2_simple(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = SimpleMINLP2()
+ print('\n Solving MINLP2_simple problem with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ single_tree=True,
+ bound_tolerance=1E-2)
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), 6.00976, places=2)
+
+ def test_lazy_OA_MINLP3_simple(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = SimpleMINLP3()
+ print('\n Solving MINLP3_simple problem with LP/NLP')
+ results = opt.solve(model, strategy='OA', init_strategy='initial_binary',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ obj_bound=10,
+ single_tree=True)
+ # TODO: fix the bug of bound here
+ # self.assertIs(results.solver.termination_condition,
+ # TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.cost.expr), -5.512, places=2)
+
+ def test_lazy_OA_Proposal(self):
+ """Test the LP/NLP decomposition algorithm."""
+ with SolverFactory('mindtpy') as opt:
+ model = ProposalModel()
+ print('\n Solving Proposal problem with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ single_tree=True)
+
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(value(model.obj.expr), 0.66555, places=2)
+
+ def test_lazy_OA_ConstraintQualificationExample(self):
+ with SolverFactory('mindtpy') as opt:
+ model = ConstraintQualificationExample()
+ print('\n Solving ConstraintQualificationExample with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ single_tree=True
+ )
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.maxIterations)
+ self.assertAlmostEqual(value(model.objective.expr), 3, places=2)
+
+ def test_OA_OnlineDocExample(self):
+ with SolverFactory('mindtpy') as opt:
+ model = OnlineDocExample()
+ print('\n Solving OnlineDocExample with LP/NLP')
+ results = opt.solve(model, strategy='OA',
+ mip_solver=required_solvers[1],
+ nlp_solver=required_solvers[0],
+ single_tree=True
+ )
+ self.assertIs(results.solver.termination_condition,
+ TerminationCondition.optimal)
+ self.assertAlmostEqual(
+ value(model.objective.expr), 2.438447, places=2)
+
+ # TODO fix the bug with integer_to_binary
+ # def test_OA_Proposal_with_int_cuts(self):
+ # """Test the outer approximation decomposition algorithm."""
+ # with SolverFactory('mindtpy') as opt:
+ # model = ProposalModel()
+ # print('\n Solving problem with Outer Approximation')
+ # opt.solve(model, strategy='OA',
+ # mip_solver=required_solvers[1],
+ # nlp_solver=required_solvers[0],
+ # add_integer_cuts=True,
+ # integer_to_binary=True, # if we use lazy callback, we cannot set integer_to_binary True
+ # lazy_callback=True,
+ # iteration_limit=1)
+
+ # # self.assertIs(results.solver.termination_condition,
+ # # TerminationCondition.optimal)
+ # self.assertAlmostEquals(value(model.obj.expr), 0.66555, places=2)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/pyomo/contrib/mindtpy/util.py b/pyomo/contrib/mindtpy/util.py
index fc239399608..d8c7c6c852f 100644
--- a/pyomo/contrib/mindtpy/util.py
+++ b/pyomo/contrib/mindtpy/util.py
@@ -13,6 +13,7 @@
from pyomo.core.kernel.component_set import ComponentSet
from pyomo.opt import SolverFactory
from pyomo.opt.results import ProblemSense
+from pyomo.solvers.plugins.solvers.persistent_solver import PersistentSolver
class MindtPySolveData(object):
@@ -40,19 +41,24 @@ def model_is_valid(solve_data, config):
prob.number_of_integer_variables == 0 and
prob.number_of_disjunctions == 0):
config.logger.info('Problem has no discrete decisions.')
- if len(MindtPy.working_nonlinear_constraints) > 0:
+ obj = next(m.component_data_objects(ctype=Objective, active=True))
+ if (any(c.body.polynomial_degree() not in (1, 0) for c in MindtPy.constraint_list) or
+ obj.expr.polynomial_degree() not in (1, 0)):
config.logger.info(
"Your model is an NLP (nonlinear program). "
- "Using NLP solver %s to solve." % config.nlp)
- SolverFactory(config.nlp).solve(
- solve_data.original_model, **config.nlp_options)
+ "Using NLP solver %s to solve." % config.nlp_solver)
+ SolverFactory(config.nlp_solver).solve(
+ solve_data.original_model, **config.nlp_solver_args)
return False
else:
config.logger.info(
"Your model is an LP (linear program). "
- "Using LP solver %s to solve." % config.mip)
- SolverFactory(config.mip).solve(
- solve_data.original_model, **config.mip_options)
+ "Using LP solver %s to solve." % config.mip_solver)
+ mipopt = SolverFactory(config.mip_solver)
+ if isinstance(mipopt, PersistentSolver):
+ mipopt.set_instance(solve_data.original_model)
+
+ mipopt.solve(solve_data.original_model, **config.mip_solver_args)
return False
if not hasattr(m, 'dual'): # Set up dual value reporting
@@ -72,7 +78,8 @@ def calc_jacobians(solve_data, config):
if c.body.polynomial_degree() in (1, 0):
continue # skip linear constraints
vars_in_constr = list(EXPR.identify_variables(c.body))
- jac_list = differentiate(c.body, wrt_list=vars_in_constr, mode=differentiate.Modes.sympy)
+ jac_list = differentiate(
+ c.body, wrt_list=vars_in_constr, mode=differentiate.Modes.sympy)
solve_data.jacobians[c] = ComponentMap(
(var, jac_wrt_var)
for var, jac_wrt_var in zip(vars_in_constr, jac_list))
@@ -82,10 +89,31 @@ def add_feas_slacks(m):
MindtPy = m.MindtPy_utils
# generate new constraints
for i, constr in enumerate(MindtPy.constraint_list, 1):
- rhs = ((0 if constr.upper is None else constr.upper) +
- (0 if constr.lower is None else constr.lower))
- c = MindtPy.MindtPy_feas.feas_constraints.add(
- constr.body - rhs
- <= MindtPy.MindtPy_feas.slack_var[i])
+ if constr.body.polynomial_degree() not in [0, 1]:
+ rhs = constr.upper if constr.has_ub() else constr.lower
+ c = MindtPy.MindtPy_feas.feas_constraints.add(
+ constr.body - rhs
+ <= MindtPy.MindtPy_feas.slack_var[i])
+def var_bound_add(solve_data, config):
+ """This function will add bound for variables in nonlinear constraints if they are not bounded.
+ This is to avoid an unbound master problem in the LP/NLP algorithm.
+ """
+ m = solve_data.working_model
+ MindtPy = m.MindtPy_utils
+ for c in MindtPy.constraint_list:
+ if c.body.polynomial_degree() not in (1, 0):
+ for var in list(EXPR.identify_variables(c.body)):
+ if var.has_lb() and var.has_ub():
+ continue
+ elif not var.has_lb():
+ if var.is_integer():
+ var.setlb(-config.integer_var_bound - 1)
+ else:
+ var.setlb(-config.continuous_var_bound - 1)
+ elif not var.has_ub():
+ if var.is_integer():
+ var.setub(config.integer_var_bound)
+ else:
+ var.setub(config.continuous_var_bound)
diff --git a/pyomo/contrib/parmest/__init__.py b/pyomo/contrib/parmest/__init__.py
index 8b137891791..cd6b0b75748 100644
--- a/pyomo/contrib/parmest/__init__.py
+++ b/pyomo/contrib/parmest/__init__.py
@@ -1 +1,9 @@
-
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
diff --git a/pyomo/contrib/parmest/examples/__init__.py b/pyomo/contrib/parmest/examples/__init__.py
index 8d1c8b69c3f..6b39dd18d6a 100644
--- a/pyomo/contrib/parmest/examples/__init__.py
+++ b/pyomo/contrib/parmest/examples/__init__.py
@@ -1 +1,10 @@
-
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
diff --git a/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py b/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py
new file mode 100644
index 00000000000..30f96f35017
--- /dev/null
+++ b/pyomo/contrib/parmest/examples/reactor_design/datarec_example.py
@@ -0,0 +1,87 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import numpy as np
+import pandas as pd
+import matplotlib.pylab as plt
+import pyomo.contrib.parmest.parmest as parmest
+from reactor_design import reactor_design_model
+
+plt.close('all')
+
+np.random.seed(1234)
+
+def reactor_design_model_for_datarec(data):
+
+ # Unfix inlet concentration for data rec
+ model = reactor_design_model(data)
+ model.caf.fixed = False
+
+ return model
+
+### Generate data based on real sv, caf, ca, cb, cc, and cd
+theta_real = {'k1': 5.0/6.0,
+ 'k2': 5.0/3.0,
+ 'k3': 1.0/6000.0}
+sv_real = 1.05
+caf_real = 10000
+ca_real = 3458.4
+cb_real = 1060.8
+cc_real = 1683.9
+cd_real = 1898.5
+
+data = pd.DataFrame()
+ndata = 200
+# Normal distribution, mean = 3400, std = 500
+data['ca'] = 500 * np.random.randn(ndata) + 3400
+# Random distribution between 500 and 1500
+data['cb'] = np.random.rand(ndata)*1000+500
+# Lognormal distribution
+data['cc'] = np.random.lognormal(np.log(1600),0.25,ndata)
+# Triangular distribution between 1000 and 2000
+data['cd'] = np.random.triangular(1000,1800,3000,size=ndata)
+
+data['sv'] = sv_real
+data['caf'] = caf_real
+
+data_std = data.std()
+
+# Define sum of squared error objective function for data rec
+def SSE(model, data):
+ expr = ((float(data['ca']) - model.ca)/float(data_std['ca']))**2 + \
+ ((float(data['cb']) - model.cb)/float(data_std['cb']))**2 + \
+ ((float(data['cc']) - model.cc)/float(data_std['cc']))**2 + \
+ ((float(data['cd']) - model.cd)/float(data_std['cd']))**2
+ return expr
+
+### Data reconciliation
+
+theta_names = [] # no variables to estimate, use initialized values
+
+pest = parmest.Estimator(reactor_design_model_for_datarec, data, theta_names, SSE)
+obj, theta, data_rec = pest.theta_est(return_values=['ca', 'cb', 'cc', 'cd', 'caf'])
+print(obj)
+print(theta)
+
+parmest.grouped_boxplot(data[['ca', 'cb', 'cc', 'cd']],
+ data_rec[['ca', 'cb', 'cc', 'cd']],
+ group_names=['Data', 'Data Rec'])
+
+
+### Parameter estimation using reconciled data
+
+theta_names = ['k1', 'k2', 'k3']
+data_rec['sv'] = data['sv']
+
+pest = parmest.Estimator(reactor_design_model, data_rec, theta_names, SSE)
+obj, theta = pest.theta_est()
+print(obj)
+print(theta)
+print(theta_real)
diff --git a/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py b/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py
new file mode 100644
index 00000000000..105ccbcd5d0
--- /dev/null
+++ b/pyomo/contrib/parmest/examples/reactor_design/leaveNout_example.py
@@ -0,0 +1,77 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import numpy as np
+import pandas as pd
+import pyomo.contrib.parmest.parmest as parmest
+from reactor_design import reactor_design_model
+
+### Parameter estimation
+
+# Vars to estimate
+theta_names = ['k1', 'k2', 'k3']
+
+# Data
+data = pd.read_excel('reactor_data.xlsx')
+
+# Create more data for the example
+df_std = data.std().to_frame().transpose()
+df_rand = pd.DataFrame(np.random.normal(size=100))
+df_sample = data.sample(100, replace=True).reset_index(drop=True)
+data = df_sample + df_rand.dot(df_std)/10
+
+# Sum of squared error function
+def SSE(model, data):
+ expr = (float(data['ca']) - model.ca)**2 + \
+ (float(data['cb']) - model.cb)**2 + \
+ (float(data['cc']) - model.cc)**2 + \
+ (float(data['cd']) - model.cd)**2
+ return expr
+
+pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE)
+obj, theta = pest.theta_est()
+print(obj)
+print(theta)
+
+### Parameter estimation with 'leave-N-out'
+# Example use case: For each combination of data where one data point is left
+# out, estimate theta
+lNo_theta = pest.theta_est_leaveNout(1)
+print(lNo_theta.head())
+
+parmest.pairwise_plot(lNo_theta, theta)
+
+### Leave one out/boostrap analysis
+# Example use case: leave 50 data points out, run 75 bootstrap samples with the
+# remaining points, determine if the theta estimate using the points left out
+# is inside or outside an alpha region based on the bootstrap samples, repeat
+# 10 times. Results are stored as a list of tuples, see API docs for information.
+lNo = 50
+lNo_samples = 10
+bootstrap_samples = 75
+dist = 'MVN'
+alphas = [0.7, 0.8, 0.9]
+
+results = pest.leaveNout_bootstrap_test(lNo, lNo_samples, bootstrap_samples,
+ dist, alphas, seed=524)
+
+# Plot results for a single value of alpha
+alpha = 0.8
+for i in range(lNo_samples):
+ theta_est_N = results[i][1]
+ bootstrap_results = results[i][2]
+ parmest.pairwise_plot(bootstrap_results, theta_est_N, alpha, ['MVN'],
+ title= 'Alpha: '+ str(alpha) + ', '+ \
+ str(theta_est_N.loc[0,alpha]))
+
+# Extract the percent of points that are within the alpha region
+r = [results[i][1].loc[0,alpha] for i in range(lNo_samples)]
+percent_true = sum(r)/len(r)
+print(percent_true)
diff --git a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_multisensor.py b/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py
similarity index 52%
rename from pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_multisensor.py
rename to pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py
index 01b95dfb51d..c97bd46ddf9 100644
--- a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_multisensor.py
+++ b/pyomo/contrib/parmest/examples/reactor_design/multisensor_data_example.py
@@ -1,6 +1,14 @@
-import numpy as np
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import pandas as pd
-from itertools import product
import pyomo.contrib.parmest.parmest as parmest
from reactor_design import reactor_design_model
@@ -27,27 +35,3 @@ def SSE_multisensor(model, data):
obj, theta = pest.theta_est()
print(obj)
print(theta)
-
-### Parameter estimation with bootstrap resampling
-
-bootstrap_theta = pest.theta_est_bootstrap(50)
-print(bootstrap_theta.head())
-
-parmest.pairwise_plot(bootstrap_theta)
-parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'])
-
-### Likelihood ratio test
-
-k1 = [0.83]
-k2 = np.arange(1.48, 1.79, 0.05) # Only vary k2 and k3 in this example
-k3 = np.arange(0.000155, 0.000185, 0.000005)
-theta_vals = pd.DataFrame(list(product(k1, k2, k3)), columns=theta_names)
-
-obj_at_theta = pest.objective_at_theta(theta_vals)
-print(obj_at_theta.head())
-
-LR = pest.likelihood_ratio_test(obj_at_theta, obj, [0.8, 0.85, 0.9, 0.95])
-print(LR.head())
-
-theta_slice = {'k1': 0.83, 'k2': theta['k2'], 'k3': theta['k3']}
-parmest.pairwise_plot(LR, theta_slice, 0.8)
diff --git a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest.py b/pyomo/contrib/parmest/examples/reactor_design/parmest_example.py
similarity index 61%
rename from pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest.py
rename to pyomo/contrib/parmest/examples/reactor_design/parmest_example.py
index 1609d35d902..f2f9324c258 100644
--- a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest.py
+++ b/pyomo/contrib/parmest/examples/reactor_design/parmest_example.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import numpy as np
import pandas as pd
from itertools import product
@@ -30,8 +40,9 @@ def SSE(model, data):
bootstrap_theta = pest.theta_est_bootstrap(50)
print(bootstrap_theta.head())
-parmest.pairwise_plot(bootstrap_theta)
-parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'])
+parmest.pairwise_plot(bootstrap_theta, title='Bootstrap theta estimates')
+parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'],
+ title='Bootstrap theta with confidence regions')
### Likelihood ratio test
@@ -46,4 +57,5 @@ def SSE(model, data):
LR = pest.likelihood_ratio_test(obj_at_theta, obj, [0.8, 0.85, 0.9, 0.95])
print(LR.head())
-parmest.pairwise_plot(LR, theta, 0.8)
+parmest.pairwise_plot(LR, theta, 0.8,
+ title='LR results within 80% confidence region')
diff --git a/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py b/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py
index 9e6c68746ba..c90da3ab820 100644
--- a/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py
+++ b/pyomo/contrib/parmest/examples/reactor_design/reactor_design.py
@@ -1,3 +1,12 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
"""
Continuously stirred tank reactor model, based on
pyomo\examples\doc\pyomobook\nonlinear-ch\react_design\ReactorDesign.py
@@ -20,10 +29,12 @@ def reactor_design_model(data):
model.k3.fixed = True
# Inlet concentration of A, gmol/m^3
- model.caf = float(data['caf'])
-
+ model.caf = Var(initialize = float(data['caf']), within=PositiveReals)
+ model.caf.fixed = True
+
# Space velocity (flowrate/volume)
- model.sv = float(data['sv'])
+ model.sv = Var(initialize = float(data['sv']), within=PositiveReals)
+ model.sv.fixed = True
# Outlet concentration of each component
model.ca = Var(initialize = 5000.0, within=PositiveReals)
diff --git a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_timeseries.py b/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py
similarity index 66%
rename from pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_timeseries.py
rename to pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py
index e3cbc6488bc..2be16356485 100644
--- a/pyomo/contrib/parmest/examples/reactor_design/reactor_design_parmest_timeseries.py
+++ b/pyomo/contrib/parmest/examples/reactor_design/timeseries_data_example.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import pandas as pd
import pyomo.contrib.parmest.parmest as parmest
from reactor_design import reactor_design_model
diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py b/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py
index 8d1c8b69c3f..d9f70706c29 100644
--- a/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py
+++ b/pyomo/contrib/parmest/examples/rooney_biegler/__init__.py
@@ -1 +1,9 @@
-
+ # ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_parmest.py b/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py
similarity index 58%
rename from pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_parmest.py
rename to pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py
index 864dce94e13..19438444aaf 100644
--- a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler_parmest.py
+++ b/pyomo/contrib/parmest/examples/rooney_biegler/parmest_example.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import numpy as np
import pandas as pd
from itertools import product
@@ -19,7 +29,10 @@ def SSE(model, data):
expr = sum((data.y[i] - model.response_function[data.hour[i]])**2 for i in data.index)
return expr
-pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE)
+
+solver_options = {"max_iter": 6000} # not really needed in this case
+
+pest = parmest.Estimator(rooney_biegler_model, data, theta_names, SSE, solver_options)
obj, theta = pest.theta_est()
print(obj)
print(theta)
@@ -29,7 +42,9 @@ def SSE(model, data):
bootstrap_theta = pest.theta_est_bootstrap(50, seed=4581)
print(bootstrap_theta.head())
-parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'])
+parmest.pairwise_plot(bootstrap_theta, title='Bootstrap theta')
+parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'],
+ title='Bootstrap theta with confidence regions')
### Likelihood ratio test
@@ -43,4 +58,5 @@ def SSE(model, data):
LR = pest.likelihood_ratio_test(obj_at_theta, obj, [0.8, 0.85, 0.9, 0.95])
print(LR.head())
-parmest.pairwise_plot(LR, theta, 0.8)
+parmest.pairwise_plot(LR, theta, 0.8,
+ title='LR results within 80% confidence region')
diff --git a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py
index 40f56a56b15..72a60799bf4 100644
--- a/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py
+++ b/pyomo/contrib/parmest/examples/rooney_biegler/rooney_biegler.py
@@ -1,3 +1,12 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
"""
Rooney Biegler model, based on Rooney, W. C. and Biegler, L. T. (2001). Design for
model parameter uncertainty using nonlinear confidence regions. AIChE Journal,
@@ -26,7 +35,8 @@ def SSE_rule(m):
if __name__ == '__main__':
- data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],[4,16.0],[5,15.6],[6,19.8]],
+ # These were taken from Table A1.4 in Bates and Watts (1988).
+ data = pd.DataFrame(data=[[1,8.3],[2,10.3],[3,19.0],[4,16.0],[5,15.6],[7,19.8]],
columns=['hour', 'y'])
model = rooney_biegler_model(data)
diff --git a/pyomo/contrib/parmest/examples/semibatch/__init__.py b/pyomo/contrib/parmest/examples/semibatch/__init__.py
index 8d1c8b69c3f..5296dafcc78 100644
--- a/pyomo/contrib/parmest/examples/semibatch/__init__.py
+++ b/pyomo/contrib/parmest/examples/semibatch/__init__.py
@@ -1 +1,10 @@
-
+ # ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
diff --git a/pyomo/contrib/parmest/examples/semibatch/semibatch_parmest.py b/pyomo/contrib/parmest/examples/semibatch/parmest_example.py
similarity index 62%
rename from pyomo/contrib/parmest/examples/semibatch/semibatch_parmest.py
rename to pyomo/contrib/parmest/examples/semibatch/parmest_example.py
index 4ec2863673d..57f42d068f9 100644
--- a/pyomo/contrib/parmest/examples/semibatch/semibatch_parmest.py
+++ b/pyomo/contrib/parmest/examples/semibatch/parmest_example.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import numpy as np
import pandas as pd
from itertools import product
@@ -31,7 +41,9 @@
bootstrap_theta = pest.theta_est_bootstrap(50)
print(bootstrap_theta.head())
-parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'])
+parmest.pairwise_plot(bootstrap_theta, title='Bootstrap theta estimates')
+parmest.pairwise_plot(bootstrap_theta, theta, 0.8, ['MVN', 'KDE', 'Rect'],
+ title='Bootstrap theta with confidence regions')
### Likelihood ratio test
@@ -48,4 +60,5 @@
print(LR.head())
theta_slice = {'k1': 19, 'k2': theta['k2'], 'E1': 30524, 'E2': theta['E2']}
-parmest.pairwise_plot(LR, theta_slice, 0.8)
+parmest.pairwise_plot(LR, theta_slice, 0.8,
+ title='LR results within 80% confidence region')
diff --git a/pyomo/contrib/parmest/examples/semibatch/semibatch_parmest_parallel.py b/pyomo/contrib/parmest/examples/semibatch/parmest_parallel_example.py
similarity index 70%
rename from pyomo/contrib/parmest/examples/semibatch/semibatch_parmest_parallel.py
rename to pyomo/contrib/parmest/examples/semibatch/parmest_parallel_example.py
index 151c29211cc..c7bafd59ef6 100644
--- a/pyomo/contrib/parmest/examples/semibatch/semibatch_parmest_parallel.py
+++ b/pyomo/contrib/parmest/examples/semibatch/parmest_parallel_example.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
"""
The following script can be used to run semibatch parameter estimation in
parallel and save results to files for later analysis and graphics.
diff --git a/pyomo/contrib/parmest/examples/semibatch/scencreate.py b/pyomo/contrib/parmest/examples/semibatch/scencreate.py
new file mode 100644
index 00000000000..61a21e530e1
--- /dev/null
+++ b/pyomo/contrib/parmest/examples/semibatch/scencreate.py
@@ -0,0 +1,42 @@
+# scenario creation example; DLW March 2020
+
+import os
+import json
+import pyomo.contrib.parmest.parmest as parmest
+from pyomo.contrib.parmest.examples.semibatch.semibatch import generate_model
+import pyomo.contrib.parmest.scenariocreator as sc
+
+def main(dirname):
+ """ dirname gives the location of the experiment input files"""
+ # Semibatch Vars to estimate in parmest
+ theta_names = ['k1', 'k2', 'E1', 'E2']
+
+ # Semibatch data: list of dictionaries
+ data = []
+ for exp_num in range(10):
+ fname = os.path.join(dirname, 'exp'+str(exp_num+1)+'.out')
+ with open(fname,'r') as infile:
+ d = json.load(infile)
+ data.append(d)
+
+ pest = parmest.Estimator(generate_model, data, theta_names)
+
+ scenmaker = sc.ScenarioCreator(pest, "ipopt")
+
+ ofile = "delme_exp.csv"
+ print("Make one scenario per experiment and write to {}".format(ofile))
+ experimentscens = sc.ScenarioSet("Experiments")
+ scenmaker.ScenariosFromExperiments(experimentscens)
+ ###experimentscens.write_csv(ofile)
+
+ numtomake = 3
+ print("\nUse the bootstrap to make {} scenarios and print.".format(numtomake))
+ bootscens = sc.ScenarioSet("Bootstrap")
+ scenmaker.ScenariosFromBoostrap(bootscens, numtomake)
+ for s in bootscens.ScensIterator():
+ print("{}, {}".format(s.name, s.probability))
+ for n,v in s.ThetaVals.items():
+ print(" {}={}".format(n, v))
+
+if __name__ == "__main__":
+ main(".")
diff --git a/pyomo/contrib/parmest/examples/semibatch/semibatch.py b/pyomo/contrib/parmest/examples/semibatch/semibatch.py
index 466e244d5f5..7c267e65bff 100644
--- a/pyomo/contrib/parmest/examples/semibatch/semibatch.py
+++ b/pyomo/contrib/parmest/examples/semibatch/semibatch.py
@@ -1,3 +1,12 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
"""
Semibatch model, based on Nicholson et al. (2018). pyomo.dae: A modeling and
automatic discretization framework for optimization with di
diff --git a/pyomo/contrib/parmest/graphics.py b/pyomo/contrib/parmest/graphics.py
index bb8c95d7712..c54a01ae995 100644
--- a/pyomo/contrib/parmest/graphics.py
+++ b/pyomo/contrib/parmest/graphics.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
try:
import numpy as np
import pandas as pd
@@ -15,8 +25,8 @@
def _get_variables(ax,columns):
sps = ax.get_subplotspec()
- nx = sps.get_geometry()[0]
- ny = sps.get_geometry()[1]
+ nx = sps.get_geometry()[1]
+ ny = sps.get_geometry()[0]
cell = sps.get_geometry()[2]
xloc = int(np.mod(cell,nx))
yloc = int(np.mod((cell-xloc)/nx, ny))
@@ -81,20 +91,19 @@ def _add_scatter(x,y,color,label,columns,theta_star):
ax.scatter(theta_star[xvar], theta_star[yvar], c=color, s=35)
-def _add_rectangle_CI(x,y,color,label,columns,alpha):
+def _add_rectangle_CI(x,y,color,label,columns,lower_bound,upper_bound):
ax = plt.gca()
xvar, yvar, loc = _get_variables(ax,columns)
-
- tval = stats.t.ppf(1-(1-alpha)/2, len(x)-1) # Two-tail
- xm = x.mean()
- ym = y.mean()
- xs = x.std()
- ys = y.std()
-
- ax.plot([xm-tval*xs, xm+tval*xs], [ym-tval*ys, ym-tval*ys], color=color)
- ax.plot([xm+tval*xs, xm+tval*xs], [ym-tval*ys, ym+tval*ys], color=color)
- ax.plot([xm+tval*xs, xm-tval*xs], [ym+tval*ys, ym+tval*ys], color=color)
- ax.plot([xm-tval*xs, xm-tval*xs], [ym+tval*ys, ym-tval*ys], color=color)
+
+ xmin = lower_bound[xvar]
+ ymin = lower_bound[yvar]
+ xmax = upper_bound[xvar]
+ ymax = upper_bound[yvar]
+
+ ax.plot([xmin, xmax], [ymin, ymin], color=color)
+ ax.plot([xmax, xmax], [ymin, ymax], color=color)
+ ax.plot([xmax, xmin], [ymax, ymax], color=color)
+ ax.plot([xmin, xmin], [ymax, ymin], color=color)
def _add_scipy_dist_CI(x,y,color,label,columns,ncells,alpha,dist,theta_star):
@@ -136,6 +145,7 @@ def _add_scipy_dist_CI(x,y,color,label,columns,ncells,alpha,dist,theta_star):
def _add_obj_contour(x,y,color,label,columns,data,theta_star):
ax = plt.gca()
xvar, yvar, loc = _get_variables(ax,columns)
+
try:
X, Y, Z = _get_data_slice(xvar,yvar,columns,data,theta_star)
@@ -158,8 +168,11 @@ def _add_LR_contour(x,y,color,label,columns,data,theta_star,threshold):
plt.tricontour(triang,Z,[threshold], colors='r')
-def _set_axis_limits(g, axis_limits, theta_vals):
+def _set_axis_limits(g, axis_limits, theta_vals, theta_star):
+ if theta_star is not None:
+ theta_vals = theta_vals.append(theta_star, ignore_index=True)
+
if axis_limits is None:
axis_limits = {}
for col in theta_vals.columns:
@@ -175,32 +188,39 @@ def _set_axis_limits(g, axis_limits, theta_vals):
ax.set_xlim(axis_limits[xvar])
else: # on diagonal
ax.set_xlim(axis_limits[xvar])
-
+
def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
- axis_limits=None, add_obj_contour=True,
- add_legend=True, filename=None, return_scipy_distributions=False):
+ axis_limits=None, title=None, add_obj_contour=True,
+ add_legend=True, filename=None):
"""
- Plot pairwise relationship for theta values, and optionally confidence
- intervals and results from likelihood ratio tests
+ Plot pairwise relationship for theta values, and optionally alpha-level
+ confidence intervals and objective value contours
Parameters
----------
theta_values: DataFrame, columns = variable names and (optionally) 'obj' and alpha values
- Theta values and (optionally) an objective value and results from the
- likelihood ratio test
- theta_star: dict, keys = variable names, optional
+ Theta values and (optionally) an objective value and results from
+ leaveNout_bootstrap_test, likelihood_ratio_test, or
+ confidence_region_test
+ theta_star: dict or Series, keys = variable names, optional
Theta* (or other individual values of theta, also used to
slice higher dimensional contour intervals in 2D)
alpha: float, optional
- Confidence interval value
+ Confidence interval value, if an alpha value is given and the
+ distributions list is empty, the data will be filtered by True/False
+ values using the column name whose value equals alpha (see results from
+ leaveNout_bootstrap_test, likelihood_ratio_test, or
+ confidence_region_test)
distributions: list of strings, optional
- Statistical distribution used for confidence intervals,
+ Statistical distribution used to define a confidence region,
options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde, and
'Rect' for rectangular.
Confidence interval is a 2D slice, using linear interpolation at theta*.
axis_limits: dict, optional
Axis limits in the format {variable: [min, max]}
+ title: string, optional
+ Plot title
add_obj_contour: bool, optional
Add a contour plot using the column 'obj' in theta_values.
Contour plot is a 2D slice, using linear interpolation at theta*.
@@ -208,30 +228,36 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
Add a legend to the plot
filename: string, optional
Filename used to save the figure
- return_scipy_distributions: bool, optional
- Return the scipy distributions for MVN and KDE
-
- Returns
- ----------
- (mvn_dist, kde_dist): tuple
- If return_scipy_distributions = True, return the MVN and KDE scipy
- distributions
"""
-
+ assert isinstance(theta_values, pd.DataFrame)
+ assert isinstance(theta_star, (type(None), dict, pd.Series, pd.DataFrame))
+ assert isinstance(alpha, (type(None), int, float))
+ assert isinstance(distributions, list)
+ assert set(distributions).issubset(set(['MVN', 'KDE', 'Rect']))
+ assert isinstance(axis_limits, (type(None), dict))
+ assert isinstance(title, (type(None), str))
+ assert isinstance(add_obj_contour, bool)
+ assert isinstance(filename, (type(None), str))
+
if len(theta_values) == 0:
return('Empty data')
if isinstance(theta_star, dict):
theta_star = pd.Series(theta_star)
-
- theta_names = [col for col in theta_values.columns if (col not in ['obj']) and (not isinstance(col, float))]
+ if isinstance(theta_star, pd.DataFrame):
+ theta_star = theta_star.loc[0,:]
+
+ theta_names = [col for col in theta_values.columns if (col not in ['obj'])
+ and (not isinstance(col, float)) and (not isinstance(col, int))]
- filter_data_by_alpha = False
- if (alpha is not None) and (alpha in theta_values.columns):
- filter_data_by_alpha = True
+ # Filter data by alpha
+ if (alpha in theta_values.columns) and (len(distributions) == 0):
thetas = theta_values.loc[theta_values[alpha] == True, theta_names]
else:
thetas = theta_values[theta_names]
+ if theta_star is not None:
+ theta_star = theta_star[theta_names]
+
legend_elements = []
g = sns.PairGrid(thetas)
@@ -242,7 +268,7 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
# Plot filled contours using all theta values based on obj
if 'obj' in theta_values.columns and add_obj_contour:
g.map_offdiag(_add_obj_contour, columns=theta_names, data=theta_values,
- theta_star=theta_star)
+ theta_star=theta_star)
# Plot thetas
g.map_offdiag(plt.scatter, s=10)
@@ -252,6 +278,7 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
# Plot theta*
if theta_star is not None:
g.map_offdiag(_add_scatter, color='k', columns=theta_names, theta_star=theta_star)
+
legend_elements.append(Line2D([0], [0], marker='o', color='w', label='theta*',
markerfacecolor='k', markersize=6))
@@ -260,43 +287,38 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
if (alpha is not None) and (len(distributions) > 0):
if theta_star is None:
- print('theta* not defined, condifence interval slice is at mean value of theta')
+ print("""theta_star is not defined, confidence region slice will be
+ plotted at the mean value of theta""")
theta_star = thetas.mean()
-
- if filter_data_by_alpha:
- alpha = 1 # Data is already filtered by alpha
mvn_dist = None
kde_dist = None
for i, dist in enumerate(distributions):
if dist == 'Rect':
+ lb, ub = fit_rect_dist(thetas, alpha)
g.map_offdiag(_add_rectangle_CI, color=colors[i], columns=theta_names,
- alpha=alpha)
+ lower_bound=lb, upper_bound=ub)
legend_elements.append(Line2D([0], [0], color=colors[i], lw=1, label=dist))
elif dist == 'MVN':
- mvn_dist = stats.multivariate_normal(thetas.mean(),
- thetas.cov(), allow_singular=True)
+ mvn_dist = fit_mvn_dist(thetas)
Z = mvn_dist.pdf(thetas)
- score = stats.scoreatpercentile(Z.transpose(), (1-alpha)*100)
+ score = stats.scoreatpercentile(Z, (1-alpha)*100)
g.map_offdiag(_add_scipy_dist_CI, color=colors[i], columns=theta_names,
ncells=100, alpha=score, dist=mvn_dist,
theta_star=theta_star)
legend_elements.append(Line2D([0], [0], color=colors[i], lw=1, label=dist))
elif dist == 'KDE':
- kde_dist = stats.gaussian_kde(thetas.transpose().values)
+ kde_dist = fit_kde_dist(thetas)
Z = kde_dist.pdf(thetas.transpose())
- score = stats.scoreatpercentile(Z.transpose(), (1-alpha)*100)
+ score = stats.scoreatpercentile(Z, (1-alpha)*100)
g.map_offdiag(_add_scipy_dist_CI, color=colors[i], columns=theta_names,
ncells=100, alpha=score, dist=kde_dist,
theta_star=theta_star)
legend_elements.append(Line2D([0], [0], color=colors[i], lw=1, label=dist))
- else:
- print('Invalid distribution')
-
- _set_axis_limits(g, axis_limits, thetas)
+ _set_axis_limits(g, axis_limits, thetas, theta_star)
for ax in g.axes.flatten():
ax.ticklabel_format(style='sci', scilimits=(-2,2), axis='both')
@@ -305,12 +327,208 @@ def pairwise_plot(theta_values, theta_star=None, alpha=None, distributions=[],
xvar, yvar, loc = _get_variables(ax, theta_names)
if loc == (len(theta_names)-1,0):
ax.legend(handles=legend_elements, loc='best', prop={'size': 8})
+ if title:
+ g.fig.subplots_adjust(top=0.9)
+ g.fig.suptitle(title)
+
+ # Work in progress
+ # Plot lower triangle graphics in separate figures, useful for presentations
+ lower_triangle_only = False
+ if lower_triangle_only:
+ for ax in g.axes.flatten():
+ xvar, yvar, (xloc, yloc) = _get_variables(ax, theta_names)
+ if xloc < yloc: # lower triangle
+ ax.remove()
+
+ ax.set_xlabel(xvar)
+ ax.set_ylabel(yvar)
+
+ fig = plt.figure()
+ ax.figure=fig
+ fig.axes.append(ax)
+ fig.add_axes(ax)
+
+ f, dummy = plt.subplots()
+ bbox = dummy.get_position()
+ ax.set_position(bbox)
+ dummy.remove()
+ plt.close(f)
+ ax.tick_params(reset=True)
+
+ if add_legend:
+ ax.legend(handles=legend_elements, loc='best', prop={'size': 8})
+
+ plt.close(g.fig)
+
if filename is None:
plt.show()
else:
plt.savefig(filename)
plt.close()
- if return_scipy_distributions:
- return mvn_dist, kde_dist
+
+def fit_rect_dist(theta_values, alpha):
+ """
+ Fit an alpha-level rectangular distribution to theta values
+
+ Parameters
+ ----------
+ theta_values: DataFrame, columns = variable names
+ Theta values
+ alpha: float, optional
+ Confidence interval value
+
+ Returns
+ ---------
+ tuple containing lower bound and upper bound for each variable
+ """
+ assert isinstance(theta_values, pd.DataFrame)
+ assert isinstance(alpha, (int, float))
+
+ tval = stats.t.ppf(1-(1-alpha)/2, len(theta_values)-1) # Two-tail
+ m = theta_values.mean()
+ s = theta_values.std()
+ lower_bound = m-tval*s
+ upper_bound = m+tval*s
+
+ return lower_bound, upper_bound
+
+def fit_mvn_dist(theta_values):
+ """
+ Fit a multivariate normal distribution to theta values
+
+ Parameters
+ ----------
+ theta_values: DataFrame, columns = variable names
+ Theta values
+
+ Returns
+ ---------
+ scipy.stats.multivariate_normal distribution
+ """
+ assert isinstance(theta_values, pd.DataFrame)
+
+ dist = stats.multivariate_normal(theta_values.mean(),
+ theta_values.cov(), allow_singular=True)
+ return dist
+
+def fit_kde_dist(theta_values):
+ """
+ Fit a Gaussian kernel-density distribution to theta values
+
+ Parameters
+ ----------
+ theta_values: DataFrame, columns = variable names
+ Theta values
+
+ Returns
+ ---------
+ scipy.stats.gaussian_kde distribution
+ """
+ assert isinstance(theta_values, pd.DataFrame)
+
+ dist = stats.gaussian_kde(theta_values.transpose().values)
+
+ return dist
+
+def _get_grouped_data(data1, data2, normalize, group_names):
+ if normalize:
+ data_median = data1.median()
+ data_std = data1.std()
+ data1 = (data1 - data_median)/data_std
+ data2 = (data2 - data_median)/data_std
+
+ # Combine data1 and data2 to create a grouped histogram
+ data = pd.concat({group_names[0]: data1,
+ group_names[1]: data2})
+ data.reset_index(level=0, inplace=True)
+ data.rename(columns={'level_0': 'set'}, inplace=True)
+
+ data = data.melt(id_vars='set', value_vars=data1.columns, var_name='columns')
+
+ return data
+
+def grouped_boxplot(data1, data2, normalize=False, group_names=['data1', 'data2'],
+ filename=None):
+ """
+ Plot a grouped boxplot to compare two datasets
+
+ The datasets can be normalized by the median and standard deviation of data1.
+
+ Parameters
+ ----------
+ data1: DataFrame, columns = variable names
+ Data set
+ data2: DataFrame, columns = variable names
+ Data set
+ normalize : bool, optional
+ Normalize both datasets by the median and standard deviation of data1
+ group_names : list, optional
+ Names used in the legend
+ filename: string, optional
+ Filename used to save the figure
+ """
+ assert isinstance(data1, pd.DataFrame)
+ assert isinstance(data2, pd.DataFrame)
+ assert isinstance(normalize, bool)
+ assert isinstance(group_names, list)
+ assert isinstance(filename, (type(None), str))
+
+ data = _get_grouped_data(data1, data2, normalize, group_names)
+
+ plt.figure()
+ sns.boxplot(data=data, hue='set', y='value', x='columns',
+ order=data1.columns)
+
+ plt.gca().legend().set_title('')
+ plt.gca().set_xlabel('')
+ plt.gca().set_ylabel('')
+
+ if filename is None:
+ plt.show()
+ else:
+ plt.savefig(filename)
+ plt.close()
+
+def grouped_violinplot(data1, data2, normalize=False, group_names=['data1', 'data2'],
+ filename=None):
+ """
+ Plot a grouped violinplot to compare two datasets
+
+ The datasets can be normalized by the median and standard deviation of data1.
+
+ Parameters
+ ----------
+ data1: DataFrame, columns = variable names
+ Data set
+ data2: DataFrame, columns = variable names
+ Data set
+ normalize : bool, optional
+ Normalize both datasets by the median and standard deviation of data1
+ group_names : list, optional
+ Names used in the legend
+ filename: string, optional
+ Filename used to save the figure
+ """
+ assert isinstance(data1, pd.DataFrame)
+ assert isinstance(data2, pd.DataFrame)
+ assert isinstance(normalize, bool)
+ assert isinstance(group_names, list)
+ assert isinstance(filename, (type(None), str))
+
+ data = _get_grouped_data(data1, data2, normalize, group_names)
+
+ plt.figure()
+ sns.violinplot(data=data, hue='set', y='value', x='columns',
+ order=data1.columns, split=True)
+
+ plt.gca().legend().set_title('')
+ plt.gca().set_xlabel('')
+ plt.gca().set_ylabel('')
+
+ if filename is None:
+ plt.show()
+ else:
+ plt.savefig(filename)
+ plt.close()
diff --git a/pyomo/contrib/parmest/ipopt_solver_wrapper.py b/pyomo/contrib/parmest/ipopt_solver_wrapper.py
index e72534302f7..f5758397989 100644
--- a/pyomo/contrib/parmest/ipopt_solver_wrapper.py
+++ b/pyomo/contrib/parmest/ipopt_solver_wrapper.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import pyutilib.services
from pyomo.opt import TerminationCondition
diff --git a/pyomo/contrib/parmest/mpi_utils.py b/pyomo/contrib/parmest/mpi_utils.py
index 54b0f0a866b..7183461f443 100644
--- a/pyomo/contrib/parmest/mpi_utils.py
+++ b/pyomo/contrib/parmest/mpi_utils.py
@@ -1,3 +1,13 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
from collections import OrderedDict
import importlib
"""
diff --git a/pyomo/contrib/parmest/parmest.py b/pyomo/contrib/parmest/parmest.py
index 286e7cce683..0de5a6646d8 100644
--- a/pyomo/contrib/parmest/parmest.py
+++ b/pyomo/contrib/parmest/parmest.py
@@ -1,23 +1,36 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
import re
import importlib as im
import types
import json
-try:
- import numpy as np
- import pandas as pd
- from scipy import stats
- parmest_available = True
-except ImportError:
- parmest_available = False
+from itertools import combinations
+
+from pyomo.common.dependencies import (
+ numpy as np, numpy_available,
+ pandas as pd, pandas_available,
+ scipy, scipy_available,
+)
+parmest_available = numpy_available & pandas_available & scipy_available
import pyomo.environ as pyo
import pyomo.pysp.util.rapper as st
from pyomo.pysp.scenariotree.tree_structure_model import CreateAbstractScenarioTreeModel
from pyomo.opt import SolverFactory
+from pyomo.environ import Block
import pyomo.contrib.parmest.mpi_utils as mpiu
import pyomo.contrib.parmest.ipopt_solver_wrapper as ipopt_solver_wrapper
-from pyomo.contrib.parmest.graphics import pairwise_plot
+from pyomo.contrib.parmest.graphics import pairwise_plot, grouped_boxplot, grouped_violinplot, \
+ fit_rect_dist, fit_mvn_dist, fit_kde_dist
__version__ = 0.1
@@ -227,14 +240,13 @@ def _treemaker(scenlist):
"""
num_scenarios = len(scenlist)
- m = CreateAbstractScenarioTreeModel()
+ m = CreateAbstractScenarioTreeModel().create_instance()
m.Stages.add('Stage1')
m.Stages.add('Stage2')
m.Nodes.add('RootNode')
for i in scenlist:
m.Nodes.add('LeafNode_Experiment'+str(i))
m.Scenarios.add('Experiment'+str(i))
- m = m.create_instance()
m.NodeStage['RootNode'] = 'Stage1'
m.ConditionalProbability['RootNode'] = 1.0
for node in m.Nodes:
@@ -250,17 +262,17 @@ def _treemaker(scenlist):
def group_data(data, groupby_column_name, use_mean=None):
"""
- Group data by experiment/scenario
+ Group data by scenario
Parameters
----------
data: DataFrame
Data
groupby_column_name: strings
- Name of data column which contains experiment/scenario numbers
+ Name of data column which contains scenario numbers
use_mean: list of column names or None, optional
Name of data columns which should be reduced to a single value per
- experiment/scenario by taking the mean
+ scenario by taking the mean
Returns
----------
@@ -293,8 +305,7 @@ def __call__(self, model):
class Estimator(object):
"""
- Parameter estimation class. Provides methods for parameter estimation,
- bootstrap resampling, and likelihood ratio test.
+ Parameter estimation class
Parameters
----------
@@ -305,10 +316,10 @@ class Estimator(object):
Data that is used to build an instance of the Pyomo model and build
the objective function
theta_names: list of strings
- List of Vars to estimate
+ List of Var names to estimate
obj_function: function, optional
Function used to formulate parameter estimation objective, generally
- sum of squared error between measurments and model variables.
+ sum of squared error between measurements and model variables.
If no function is specified, the model is used
"as is" and should be defined with a "FirstStateCost" and
"SecondStageCost" expression that are used to build an objective
@@ -316,17 +327,25 @@ class Estimator(object):
tee: bool, optional
Indicates that ef solver output should be teed
diagnostic_mode: bool, optional
- if True, print diagnostics from the solver
+ If True, print diagnostics from the solver
+ solver_options: dict, optional
+ Provides options to the solver (also the name of an attribute)
"""
def __init__(self, model_function, data, theta_names, obj_function=None,
- tee=False, diagnostic_mode=False):
+ tee=False, diagnostic_mode=False, solver_options=None):
self.model_function = model_function
self.callback_data = data
- self.theta_names = theta_names
+
+ if len(theta_names) == 0:
+ self.theta_names = ['parmest_dummy_var']
+ else:
+ self.theta_names = theta_names
+
self.obj_function = obj_function
self.tee = tee
self.diagnostic_mode = diagnostic_mode
+ self.solver_options = solver_options
self._second_stage_cost_exp = "SecondStageCost"
self._numbers_list = list(range(len(data)))
@@ -339,13 +358,16 @@ def _create_parmest_model(self, data):
from pyomo.core import Objective
model = self.model_function(data)
-
+
+ if (len(self.theta_names) == 1) and (self.theta_names[0] == 'parmest_dummy_var'):
+ model.parmest_dummy_var = pyo.Var(initialize = 1.0)
+
for theta in self.theta_names:
try:
var_validate = eval('model.'+theta)
var_validate.fixed = False
except:
- print(theta +'is not a variable')
+ print(theta +' is not a variable')
if self.obj_function:
for obj in model.component_objects(Objective):
@@ -392,7 +414,8 @@ def _instance_creation_callback(self, experiment_number=None, cb_data=None):
return model
- def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", bootlist=None):
+ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt",
+ return_values=[], bootlist=None):
"""
Set up all thetas as first stage Vars, return resulting theta
values as well as the objective function value.
@@ -430,10 +453,11 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", bootlist=None):
stsolver = st.StochSolver(fsfile = "pyomo.contrib.parmest.parmest",
fsfct = "_pysp_instance_creation_callback",
tree_model = tree_model)
+
if solver == "ef_ipopt":
- sopts = {}
- sopts['max_iter'] = 6000
- ef_sol = stsolver.solve_ef('ipopt', sopts=sopts, tee=self.tee)
+ ef_sol = stsolver.solve_ef('ipopt',
+ sopts=self.solver_options,
+ tee=self.tee)
if self.diagnostic_mode:
print(' Solver termination condition = ',
str(ef_sol.solver.termination_condition))
@@ -444,6 +468,21 @@ def _Q_opt(self, ThetaVals=None, solver="ef_ipopt", bootlist=None):
thetavals[name] = solval
objval = stsolver.root_E_obj()
+
+ if len(return_values) > 0:
+ var_values = []
+ for exp_i in stsolver.ef_instance.component_objects(Block, descend_into=False):
+ vals = {}
+ for var in return_values:
+ exp_i_var = eval('exp_i.'+ str(var))
+ temp = [_.value for _ in exp_i_var.itervalues()]
+ if len(temp) == 1:
+ vals[var] = temp[0]
+ else:
+ vals[var] = temp
+ var_values.append(vals)
+ var_values = pd.DataFrame(var_values)
+ return objval, thetavals, var_values
return objval, thetavals
@@ -598,155 +637,283 @@ def _Q_at_theta(self, thetavals):
objval = pyo.value(objobject)
totobj += objval
retval = totobj / len(self._numbers_list) # -1??
- return retval, thetavals, WorstStatus
+ return retval, thetavals, WorstStatus
- def _Estimate_Hessian(self, thetavals, epsilon=1e-1):
- """
- Unused, Crude estimate of the Hessian of Q at thetavals
-
- Parameters
- ----------
- thetavals: dict
- A dictionary of values for theta
-
- Return
- ------
- FirstDeriv: dict
- Dictionary of scaled first differences
- HessianDict: dict
- Matrix (in dicionary form) of Hessian values
- """
+ def _get_sample_list(self, samplesize, num_samples, replacement=True):
- def firstdiffer(tvals, tstr):
- tvals[tstr] = tvals[tstr] - epsilon / 2
- lval, foo, w = self.Q_at_theta(tvals)
- tvals[tstr] = tvals[tstr] + epsilon / 2
- rval, foo, w = self.Q_at_theta(tvals)
- tvals[tstr] = thetavals[tstr]
- return rval - lval
-
- # make a working copy of thetavals and get the Hessian dict started
- tvals = {}
- Hessian = {}
- for tstr in thetavals:
- tvals[tstr] = thetavals[tstr]
- Hessian[tstr] = {}
-
- # get "basline" first differences
- firstdiffs = {}
- for tstr in tvals:
- # TBD, dlw jan 2018: check for bounds on theta
- print("Debug firstdiffs for ",tstr)
- firstdiffs[tstr] = firstdiffer(tvals, tstr)
-
- # now get the second differences
- # as of Jan 2018, do not assume symmetry so it can be "checked."
- for firstdim in tvals:
- for seconddim in tvals:
- print("Debug H for ",firstdim,seconddim)
- tvals[seconddim] = thetavals[seconddim] + epsilon
- d2 = firstdiffer(tvals, firstdim)
- Hessian[firstdim][seconddim] = \
- (d2 - firstdiffs[firstdim]) / (epsilon * epsilon)
- tvals[seconddim] = thetavals[seconddim]
-
- FirstDeriv = {}
- for tstr in thetavals:
- FirstDeriv[tstr] = firstdiffs[tstr] / epsilon
-
- return FirstDeriv, Hessian
+ samplelist = list()
+
+ if num_samples is None:
+ # This could get very large
+ for i, l in enumerate(combinations(self._numbers_list, samplesize)):
+ samplelist.append((i, np.sort(l)))
+ else:
+ for i in range(num_samples):
+ attempts = 0
+ unique_samples = 0 # check for duplicates in each sample
+ duplicate = False # check for duplicates between samples
+ while (unique_samples <= len(self.theta_names)) and (not duplicate):
+ sample = np.random.choice(self._numbers_list,
+ samplesize,
+ replace=replacement)
+ sample = np.sort(sample).tolist()
+ unique_samples = len(np.unique(sample))
+ if sample in samplelist:
+ duplicate = True
+
+ attempts += 1
+ if attempts > num_samples: # arbitrary timeout limit
+ raise RuntimeError("""Internal error: timeout constructing
+ a sample, the dim of theta may be too
+ close to the samplesize""")
+ samplelist.append((i, sample))
+
+ return samplelist
- def theta_est(self, solver="ef_ipopt", bootlist=None):
+ def theta_est(self, solver="ef_ipopt", return_values=[], bootlist=None):
"""
- Run parameter estimation using all data
+ Parameter estimation using all scenarios in the data
Parameters
----------
solver: string, optional
"ef_ipopt" or "k_aug". Default is "ef_ipopt".
-
+ return_values: list, optional
+ List of Variable names used to return values from the model
+ bootlist: list, optional
+ List of bootstrap sample numbers, used internally when calling theta_est_bootstrap
+
Returns
-------
objectiveval: float
The objective function value
thetavals: dict
A dictionary of all values for theta
+ variable values: pd.DataFrame
+ Variable values for each variable name in return_values (only for ef_ipopt)
Hessian: dict
A dictionary of dictionaries for the Hessian.
- The Hessian is not returned if the solver is ef.
+ The Hessian is not returned if the solver is ef_ipopt.
"""
- return self._Q_opt(solver=solver, bootlist=bootlist)
+ assert isinstance(solver, str)
+ assert isinstance(return_values, list)
+ assert isinstance(bootlist, (type(None), list))
+
+ return self._Q_opt(solver=solver, return_values=return_values,
+ bootlist=bootlist)
- def theta_est_bootstrap(self, N, samplesize=None, replacement=True, seed=None, return_samples=False):
+ def theta_est_bootstrap(self, bootstrap_samples, samplesize=None,
+ replacement=True, seed=None, return_samples=False):
"""
- Run parameter estimation using N bootstap samples
+ Parameter estimation using bootstrap resampling of the data
Parameters
----------
- N: int
+ bootstrap_samples: int
Number of bootstrap samples to draw from the data
samplesize: int or None, optional
- Sample size, if None samplesize will be set to the number of experiments
+ Size of each bootstrap sample. If samplesize=None, samplesize will be
+ set to the number of samples in the data
replacement: bool, optional
Sample with or without replacement
seed: int or None, optional
- Set the random seed
+ Random seed
return_samples: bool, optional
- Return a list of experiment numbers used in each bootstrap estimation
+ Return a list of sample numbers used in each bootstrap estimation
Returns
-------
bootstrap_theta: DataFrame
- Theta values for each bootstrap sample and (if return_samples = True)
+ Theta values for each sample and (if return_samples = True)
the sample numbers used in each estimation
"""
- bootstrap_theta = list()
+ assert isinstance(bootstrap_samples, int)
+ assert isinstance(samplesize, (type(None), int))
+ assert isinstance(replacement, bool)
+ assert isinstance(seed, (type(None), int))
+ assert isinstance(return_samples, bool)
if samplesize is None:
samplesize = len(self._numbers_list)
+
if seed is not None:
np.random.seed(seed)
+
+ global_list = self._get_sample_list(samplesize, bootstrap_samples,
+ replacement)
+
+ task_mgr = mpiu.ParallelTaskManager(bootstrap_samples)
+ local_list = task_mgr.global_to_local_data(global_list)
+
+ # Reset numbers_list
+ self._numbers_list = list(range(samplesize))
+
+ bootstrap_theta = list()
+ for idx, sample in local_list:
+ objval, thetavals = self.theta_est(bootlist=list(sample))
+ thetavals['samples'] = sample
+ bootstrap_theta.append(thetavals)
- task_mgr = mpiu.ParallelTaskManager(N)
- global_bootlist = list()
- for i in range(N):
- j = unique_samples = 0
- while unique_samples <= len(self.theta_names):
- bootlist = np.random.choice(self._numbers_list,
- samplesize,
- replace=replacement)
- unique_samples = len(np.unique(bootlist))
- j += 1
- if j > N: # arbitrary timeout limit
- raise RuntimeError("Internal error: timeout in bootstrap"+\
- " constructing a sample; possible hint:"+\
- " the dim of theta may be too close to N")
- global_bootlist.append((i, bootlist))
-
- local_bootlist = task_mgr.global_to_local_data(global_bootlist)
-
- for idx, bootlist in local_bootlist:
- #print('Bootstrap Run Number: ', idx + 1, ' out of ', N)
- objval, thetavals = self.theta_est(bootlist=bootlist)
- thetavals['samples'] = bootlist
- bootstrap_theta.append(thetavals)#, ignore_index=True)
+ # Reset numbers_list (back to original)
+ self._numbers_list = list(range(len(self.callback_data)))
global_bootstrap_theta = task_mgr.allgather_global_data(bootstrap_theta)
- bootstrap_theta = pd.DataFrame(global_bootstrap_theta)
- #bootstrap_theta.set_index('samples', inplace=True)
+ bootstrap_theta = pd.DataFrame(global_bootstrap_theta)
if not return_samples:
del bootstrap_theta['samples']
-
+
return bootstrap_theta
+ def theta_est_leaveNout(self, lNo, lNo_samples=None, seed=None,
+ return_samples=False):
+ """
+ Parameter estimation where N data points are left out of each sample
+
+ Parameters
+ ----------
+ lNo: int
+ Number of data points to leave out for parameter estimation
+ lNo_samples: int
+ Number of leave-N-out samples. If lNo_samples=None, the maximum
+ number of combinations will be used
+ seed: int or None, optional
+ Random seed
+ return_samples: bool, optional
+ Return a list of sample numbers that were left out
+
+ Returns
+ -------
+ lNo_theta: DataFrame
+ Theta values for each sample and (if return_samples = True)
+ the sample numbers left out of each estimation
+ """
+ assert isinstance(lNo, int)
+ assert isinstance(lNo_samples, (type(None), int))
+ assert isinstance(seed, (type(None), int))
+ assert isinstance(return_samples, bool)
+
+ samplesize = len(self._numbers_list)-lNo
+
+ if seed is not None:
+ np.random.seed(seed)
+
+ global_list = self._get_sample_list(samplesize, lNo_samples, replacement=False)
+
+ task_mgr = mpiu.ParallelTaskManager(len(global_list))
+ local_list = task_mgr.global_to_local_data(global_list)
+
+ # Reset numbers_list
+ self._numbers_list = list(range(samplesize))
+
+ lNo_theta = list()
+ for idx, sample in local_list:
+ objval, thetavals = self.theta_est(bootlist=list(sample))
+ lNo_s = list(set(range(len(self.callback_data))) - set(sample))
+ thetavals['lNo'] = np.sort(lNo_s)
+ lNo_theta.append(thetavals)
+
+ # Reset numbers_list (back to original)
+ self._numbers_list = list(range(len(self.callback_data)))
+
+ global_bootstrap_theta = task_mgr.allgather_global_data(lNo_theta)
+ lNo_theta = pd.DataFrame(global_bootstrap_theta)
+
+ if not return_samples:
+ del lNo_theta['lNo']
+
+ return lNo_theta
+
+
+ def leaveNout_bootstrap_test(self, lNo, lNo_samples, bootstrap_samples,
+ distribution, alphas, seed=None):
+ """
+ Leave-N-out bootstrap test to compare theta values where N data points are
+ left out to a bootstrap analysis using the remaining data,
+ results indicate if theta is within a confidence region
+ determined by the bootstrap analysis
+
+ Parameters
+ ----------
+ lNo: int
+ Number of data points to leave out for parameter estimation
+ lNo_samples: int
+ Leave-N-out sample size. If lNo_samples=None, the maximum number
+ of combinations will be used
+ bootstrap_samples: int:
+ Bootstrap sample size
+ distribution: string
+ Statistical distribution used to define a confidence region,
+ options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde,
+ and 'Rect' for rectangular.
+ alphas: list
+ List of alpha values used to determine if theta values are inside
+ or outside the region.
+ seed: int or None, optional
+ Random seed
+
+ Returns
+ ----------
+ List of tuples with one entry per lNo_sample:
+
+ * The first item in each tuple is the list of N samples that are left
+ out.
+ * The second item in each tuple is a DataFrame of theta estimated using
+ the N samples.
+ * The third item in each tuple is a DataFrame containing results from
+ the bootstrap analysis using the remaining samples.
+
+ For each DataFrame a column is added for each value of alpha which
+ indicates if the theta estimate is in (True) or out (False) of the
+ alpha region for a given distribution (based on the bootstrap results)
+ """
+ assert isinstance(lNo, int)
+ assert isinstance(lNo_samples, (type(None), int))
+ assert isinstance(bootstrap_samples, int)
+ assert distribution in ['Rect', 'MVN', 'KDE']
+ assert isinstance(alphas, list)
+ assert isinstance(seed, (type(None), int))
+
+ if seed is not None:
+ np.random.seed(seed)
+
+ data = self.callback_data.copy()
+
+ global_list = self._get_sample_list(lNo, lNo_samples, replacement=False)
+
+ results = []
+ for idx, sample in global_list:
+
+ # Reset callback_data and numbers_list
+ self.callback_data = data.loc[sample,:]
+ self._numbers_list = self.callback_data.index
+ obj, theta = self.theta_est()
+
+ # Reset callback_data and numbers_list
+ self.callback_data = data.drop(index=sample)
+ self._numbers_list = self.callback_data.index
+ bootstrap_theta = self.theta_est_bootstrap(bootstrap_samples)
+
+ training, test = self.confidence_region_test(bootstrap_theta,
+ distribution=distribution, alphas=alphas,
+ test_theta_values=theta)
+
+ results.append((sample, test, training))
+
+ # Reset callback_data and numbers_list (back to original)
+ self.callback_data = data
+ self._numbers_list = self.callback_data.index
+
+ return results
+
+
def objective_at_theta(self, theta_values):
"""
- Compute the objective over a range of theta values
+ Objective value for each theta
Parameters
----------
@@ -756,9 +923,11 @@ def objective_at_theta(self, theta_values):
Returns
-------
obj_at_theta: DataFrame
- Objective values for each theta value (infeasible solutions are
+ Objective value for each theta (infeasible solutions are
omitted).
"""
+ assert isinstance(theta_values, pd.DataFrame)
+
# for parallel code we need to use lists and dicts in the loop
theta_names = theta_values.columns
all_thetas = theta_values.to_dict('records')
@@ -776,42 +945,46 @@ def objective_at_theta(self, theta_values):
global_all_obj = task_mgr.allgather_global_data(all_obj)
dfcols = list(theta_names) + ['obj']
obj_at_theta = pd.DataFrame(data=global_all_obj, columns=dfcols)
-
+
return obj_at_theta
- def likelihood_ratio_test(self, obj_at_theta, obj_value, alpha,
+ def likelihood_ratio_test(self, obj_at_theta, obj_value, alphas,
return_thresholds=False):
"""
- Compute the likelihood ratio for each value of alpha
+ Likelihood ratio test to identify theta values within a confidence
+ region using the :math:`\chi^2` distribution
Parameters
----------
obj_at_theta: DataFrame, columns = theta_names + 'obj'
Objective values for each theta value (returned by
objective_at_theta)
-
- obj_value: float
+ obj_value: int or float
Objective value from parameter estimation using all data
-
- alpha: list
+ alphas: list
List of alpha values to use in the chi2 test
-
return_thresholds: bool, optional
Return the threshold value for each alpha
Returns
-------
LR: DataFrame
- Objective values for each theta value along wit True or False for
+ Objective values for each theta value along with True or False for
+ each alpha
thresholds: dictionary
If return_threshold = True, the thresholds are also returned.
"""
+ assert isinstance(obj_at_theta, pd.DataFrame)
+ assert isinstance(obj_value, (int, float))
+ assert isinstance(alphas, list)
+ assert isinstance(return_thresholds, bool)
+
LR = obj_at_theta.copy()
S = len(self.callback_data)
thresholds = {}
- for a in alpha:
- chi2_val = stats.chi2.ppf(a, 2)
+ for a in alphas:
+ chi2_val = scipy.stats.chi2.ppf(a, 2)
thresholds[a] = obj_value * ((chi2_val / (S - 2)) + 1)
LR[a] = LR['obj'] < thresholds[a]
@@ -819,3 +992,87 @@ def likelihood_ratio_test(self, obj_at_theta, obj_value, alpha,
return LR, thresholds
else:
return LR
+
+ def confidence_region_test(self, theta_values, distribution, alphas,
+ test_theta_values=None):
+ """
+ Confidence region test to determine if theta values are within a
+ rectangular, multivariate normal, or Gaussian kernel density distribution
+ for a range of alpha values
+
+ Parameters
+ ----------
+ theta_values: DataFrame, columns = theta_names
+ Theta values used to generate a confidence region
+ (generally returned by theta_est_bootstrap)
+ distribution: string
+ Statistical distribution used to define a confidence region,
+ options = 'MVN' for multivariate_normal, 'KDE' for gaussian_kde,
+ and 'Rect' for rectangular.
+ alphas: list
+ List of alpha values used to determine if theta values are inside
+ or outside the region.
+ test_theta_values: dictionary or DataFrame, keys/columns = theta_names, optional
+ Additional theta values that are compared to the confidence region
+ to determine if they are inside or outside.
+
+ Returns
+ -------
+ training_results: DataFrame
+ Theta value used to generate the confidence region along with True
+ (inside) or False (outside) for each alpha
+ test_results: DataFrame
+ If test_theta_values is not None, returns test theta value along
+ with True (inside) or False (outside) for each alpha
+ """
+ assert isinstance(theta_values, pd.DataFrame)
+ assert distribution in ['Rect', 'MVN', 'KDE']
+ assert isinstance(alphas, list)
+ assert isinstance(test_theta_values, (type(None), dict, pd.DataFrame))
+
+ if isinstance(test_theta_values, dict):
+ test_theta_values = pd.Series(test_theta_values).to_frame().transpose()
+
+ training_results = theta_values.copy()
+
+ if test_theta_values is not None:
+ test_result = test_theta_values.copy()
+
+ for a in alphas:
+
+ if distribution == 'Rect':
+ lb, ub = fit_rect_dist(theta_values, a)
+ training_results[a] = ((theta_values > lb).all(axis=1) & \
+ (theta_values < ub).all(axis=1))
+
+ if test_theta_values is not None:
+ # use upper and lower bound from the training set
+ test_result[a] = ((test_theta_values > lb).all(axis=1) & \
+ (test_theta_values < ub).all(axis=1))
+
+ elif distribution == 'MVN':
+ dist = fit_mvn_dist(theta_values)
+ Z = dist.pdf(theta_values)
+ score = scipy.stats.scoreatpercentile(Z, (1-a)*100)
+ training_results[a] = (Z >= score)
+
+ if test_theta_values is not None:
+ # use score from the training set
+ Z = dist.pdf(test_theta_values)
+ test_result[a] = (Z >= score)
+
+ elif distribution == 'KDE':
+ dist = fit_kde_dist(theta_values)
+ Z = dist.pdf(theta_values.transpose())
+ score = scipy.stats.scoreatpercentile(Z, (1-a)*100)
+ training_results[a] = (Z >= score)
+
+ if test_theta_values is not None:
+ # use score from the training set
+ Z = dist.pdf(test_theta_values.transpose())
+ test_result[a] = (Z >= score)
+
+ if test_theta_values is not None:
+ return training_results, test_result
+ else:
+ return training_results
diff --git a/pyomo/contrib/parmest/scenariocreator.py b/pyomo/contrib/parmest/scenariocreator.py
new file mode 100644
index 00000000000..46e946c555f
--- /dev/null
+++ b/pyomo/contrib/parmest/scenariocreator.py
@@ -0,0 +1,160 @@
+# ScenariosCreator.py - Class to create and deliver scenarios using parmest
+# DLW March 2020
+
+import json
+import pyomo.contrib.parmest.parmest as parmest
+import pyomo.environ as pyo
+
+
+class ScenarioSet(object):
+ """
+ Class to hold scenario sets
+
+ Args:
+ name (str): name of the set (might be "")
+
+ """
+
+ def __init__(self, name):
+ # Note: If there was a use-case, the list could be a dataframe.
+ self._scens = list() # use a df instead?
+ self.name = name # might be ""
+
+
+ def _firstscen(self):
+ # Return the first scenario for testing and to get Theta names.
+ assert(len(self._scens) > 0)
+ return self._scens[0]
+
+
+ def ScensIterator(self):
+ """ Usage: for scenario in ScensIterator()"""
+ return iter(self._scens)
+
+
+ def ScenarioNumber(self, scennum):
+ """ Returns the scenario with the given, zero-based number"""
+ return self._scens[scennum]
+
+
+ def addone(self, scen):
+ """ Add a scenario to the set
+
+ Args:
+ scen (ParmestScen): the scenario to add
+ """
+ assert(isinstance(self._scens, list))
+ self._scens.append(scen)
+
+
+ def append_bootstrap(self, bootstrap_theta):
+ """ Append a boostrap theta df to the scenario set; equally likely
+
+ Args:
+ boostrap_theta (dataframe): created by the bootstrap
+ Note: this can be cleaned up a lot with the list becomes a df,
+ which is why I put it in the ScenarioSet class.
+ """
+ assert(len(bootstrap_theta) > 0)
+ prob = 1. / len(bootstrap_theta)
+
+ # dict of ThetaVal dicts
+ dfdict = bootstrap_theta.to_dict(orient='index')
+
+ for index, ThetaVals in dfdict.items():
+ name = "Boostrap"+str(index)
+ self.addone(ParmestScen(name, ThetaVals, prob))
+
+
+ def write_csv(self, filename):
+ """ write a csv file with the scenarios in the set
+
+ Args:
+ filename (str): full path and full name of file
+ """
+ if len(self._scens) == 0:
+ print ("Empty scenario set, not writing file={}".format(filename))
+ return
+ with open(filename, "w") as f:
+ f.write("Name,Probability")
+ for n in self._firstscen().ThetaVals.keys():
+ f.write(",{}".format(n))
+ f.write('\n')
+ for s in self.ScensIterator():
+ f.write("{},{}".format(s.name, s.probability))
+ for v in s.ThetaVals.values():
+ f.write(",{}".format(v))
+ f.write('\n')
+
+
+class ParmestScen(object):
+ """ A little container for scenarios; the Args are the attributes.
+
+ Args:
+ name (str): name for reporting; might be ""
+ ThetaVals (dict): ThetaVals[name]=val
+ probability (float): probability of occurance "near" these ThetaVals
+ """
+
+ def __init__(self, name, ThetaVals, probability):
+ self.name = name
+ assert(isinstance(ThetaVals, dict))
+ self.ThetaVals = ThetaVals
+ self.probability = probability
+
+############################################################
+
+
+class ScenarioCreator(object):
+ """ Create scenarios from parmest.
+
+ Args:
+ pest (Estimator): the parmest object
+ solvername (str): name of the solver (e.g. "ipopt")
+
+ """
+
+ def __init__(self, pest, solvername):
+ self.pest = pest
+ self.solvername = solvername
+ self.experiment_numbers = pest._numbers_list
+
+
+ def ScenariosFromExperiments(self, addtoSet):
+ """Creates new self.Scenarios list using the experiments only.
+
+ Args:
+ addtoSet (ScenarioSet): the scenarios will be added to this set
+ Returns:
+ a ScenarioSet
+ """
+
+ assert(isinstance(addtoSet, ScenarioSet))
+ prob = 1. / len(self.pest._numbers_list)
+ for exp_num in self.pest._numbers_list:
+ ##print("Experiment number=", exp_num)
+ model = self.pest._instance_creation_callback(exp_num,
+ self.pest.callback_data)
+ opt = pyo.SolverFactory(self.solvername)
+ results = opt.solve(model) # solves and updates model
+ ## pyo.check_termination_optimal(results)
+ ThetaVals = dict()
+ for theta in self.pest.theta_names:
+ tvar = eval('model.'+theta)
+ tval = pyo.value(tvar)
+ ##print(" theta, tval=", tvar, tval)
+ ThetaVals[theta] = tval
+ addtoSet.addone(ParmestScen("ExpScen"+str(exp_num), ThetaVals, prob))
+
+ def ScenariosFromBoostrap(self, addtoSet, numtomake, seed=None):
+ """Creates new self.Scenarios list using the experiments only.
+
+ Args:
+ addtoSet (ScenarioSet): the scenarios will be added to this set
+ numtomake (int) : number of scenarios to create
+ """
+
+ assert(isinstance(addtoSet, ScenarioSet))
+
+ bootstrap_thetas = self.pest.theta_est_bootstrap(numtomake, seed=seed)
+ addtoSet.append_bootstrap(bootstrap_thetas)
diff --git a/pyomo/contrib/parmest/tests/__init__.py b/pyomo/contrib/parmest/tests/__init__.py
index 8b137891791..6b39dd18d6a 100644
--- a/pyomo/contrib/parmest/tests/__init__.py
+++ b/pyomo/contrib/parmest/tests/__init__.py
@@ -1 +1,10 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
diff --git a/pyomo/contrib/parmest/tests/test_parmest.py b/pyomo/contrib/parmest/tests/test_parmest.py
index d13f37b8db6..7e58909f878 100644
--- a/pyomo/contrib/parmest/tests/test_parmest.py
+++ b/pyomo/contrib/parmest/tests/test_parmest.py
@@ -1,15 +1,28 @@
-# the matpolotlib stuff is to avoid $DISPLAY errors on Travis (DLW Oct 2018)
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
try:
import matplotlib
matplotlib.use('Agg')
except:
pass
-try:
- import numpy as np
- import pandas as pd
- imports_not_present = False
-except:
- imports_not_present = True
+from pyomo.common.dependencies import (
+ numpy as np, numpy_available,
+ pandas as pd, pandas_available,
+ scipy, scipy_available,
+)
+imports_present = numpy_available & pandas_available & scipy_available
+
+import platform
+is_osx = platform.mac_ver()[0] != ''
+
import pyutilib.th as unittest
import tempfile
import sys
@@ -17,6 +30,7 @@
import shutil
import glob
import subprocess
+import sys
from itertools import product
import pyomo.contrib.parmest.parmest as parmest
@@ -35,7 +49,7 @@ def setUp(self):
self.instance.IDX = pyo.Set(initialize=['a', 'b', 'c'])
self.instance.x = pyo.Var(self.instance.IDX, initialize=1134)
# TBD add a block
- if not imports_not_present:
+ if imports_present:
np.random.seed(1134)
def tearDown(self):
@@ -88,29 +102,21 @@ def test_bootstrap(self):
del theta_est['samples']
- filename = os.path.abspath(os.path.join(testdir, 'pairwise_bootstrap.png'))
- if os.path.isfile(filename):
- os.remove(filename)
- parmest.pairwise_plot(theta_est, filename=filename)
- #self.assertTrue(os.path.isfile(filename))
-
- filename = os.path.abspath(os.path.join(testdir, 'pairwise_bootstrap_theta.png'))
- if os.path.isfile(filename):
- os.remove(filename)
- parmest.pairwise_plot(theta_est, thetavals, filename=filename)
- #self.assertTrue(os.path.isfile(filename))
-
- filename = os.path.abspath(os.path.join(testdir, 'pairwise_bootstrap_theta_CI.png'))
- if os.path.isfile(filename):
- os.remove(filename)
- parmest.pairwise_plot(theta_est, thetavals, 0.8, ['MVN', 'KDE', 'Rect'],
- filename=filename)
- #self.assertTrue(os.path.isfile(filename))
+ # apply cofidence region test
+ CR = self.pest.confidence_region_test(theta_est, 'MVN', [0.5, 0.75, 1.0])
+
+ self.assertTrue(set(CR.columns) >= set([0.5, 0.75, 1.0]))
+ self.assertTrue(CR[0.5].sum() == 5)
+ self.assertTrue(CR[0.75].sum() == 7)
+ self.assertTrue(CR[1.0].sum() == 10) # all true
+
+ parmest.pairwise_plot(theta_est)
+ parmest.pairwise_plot(theta_est, thetavals)
+ parmest.pairwise_plot(theta_est, thetavals, 0.8, ['MVN', 'KDE', 'Rect'])
@unittest.skipIf(not graphics.imports_available,
"parmest.graphics imports are unavailable")
def test_likelihood_ratio(self):
- # tbd: write the plot file(s) to a temp dir and delete in cleanup
objval, thetavals = self.pest.theta_est()
asym = np.arange(10, 30, 2)
@@ -119,16 +125,32 @@ def test_likelihood_ratio(self):
obj_at_theta = self.pest.objective_at_theta(theta_vals)
- LR = self.pest.likelihood_ratio_test(obj_at_theta, objval, [0.8, 0.85, 0.9, 0.95])
+ LR = self.pest.likelihood_ratio_test(obj_at_theta, objval, [0.8, 0.9, 1.0])
- self.assertTrue(set(LR.columns) >= set([0.8, 0.85, 0.9, 0.95]))
+ self.assertTrue(set(LR.columns) >= set([0.8, 0.9, 1.0]))
+ self.assertTrue(LR[0.8].sum() == 7)
+ self.assertTrue(LR[0.9].sum() == 11)
+ self.assertTrue(LR[1.0].sum() == 60) # all true
- filename = os.path.abspath(os.path.join(testdir, 'pairwise_LR_plot.png'))
- if os.path.isfile(filename):
- os.remove(filename)
- parmest.pairwise_plot(LR, thetavals, 0.8, filename=filename)
- #self.assertTrue(os.path.isfile(filename))
+ parmest.pairwise_plot(LR, thetavals, 0.8)
+ def test_leaveNout(self):
+ lNo_theta = self.pest.theta_est_leaveNout(1)
+ self.assertTrue(lNo_theta.shape == (6,2))
+
+ results = self.pest.leaveNout_bootstrap_test(1, None, 3, 'Rect', [0.5, 1.0])
+ self.assertTrue(len(results) == 6) # 6 lNo samples
+ i = 1
+ samples = results[i][0] # list of N samples that are left out
+ lno_theta = results[i][1]
+ bootstrap_theta = results[i][2]
+ self.assertTrue(samples == [1]) # sample 1 was left out
+ self.assertTrue(lno_theta.shape[0] == 1) # lno estimate for sample 1
+ self.assertTrue(set(lno_theta.columns) >= set([0.5, 1.0]))
+ self.assertTrue(lno_theta[1.0].sum() == 1) # all true
+ self.assertTrue(bootstrap_theta.shape[0] == 3) # bootstrap for sample 1
+ self.assertTrue(bootstrap_theta[1.0].sum() == 3) # all true
+
def test_diagnostic_mode(self):
self.pest.diagnostic_mode = True
@@ -152,10 +174,10 @@ def test_rb_main(self):
"rooney_biegler" + os.sep + "rooney_biegler.py"
rbpath = os.path.abspath(rbpath) # paranoia strikes deep...
if sys.version_info >= (3,5):
- ret = subprocess.run(["python", rbpath])
+ ret = subprocess.run([sys.executable, rbpath])
retcode = ret.returncode
else:
- retcode = subprocess.call(["python", rbpath])
+ retcode = subprocess.call([sys.executable, rbpath])
assert(retcode == 0)
@unittest.skip("Presently having trouble with mpiexec on appveyor")
@@ -168,7 +190,7 @@ def test_parallel_parmest(self):
rbpath = parmestpath + os.sep + "examples" + os.sep + \
"rooney_biegler" + os.sep + "rooney_biegler_parmest.py"
rbpath = os.path.abspath(rbpath) # paranoia strikes deep...
- rlist = ["mpiexec", "--allow-run-as-root", "-n", "2", "python", rbpath]
+ rlist = ["mpiexec", "--allow-run-as-root", "-n", "2", sys.executable, rbpath]
if sys.version_info >= (3,5):
ret = subprocess.run(rlist)
retcode = ret.returncode
@@ -183,7 +205,7 @@ def test_theta_k_aug_for_Hessian(self):
self.assertAlmostEqual(objval, 4.4675, places=2)
-@unittest.skipIf(imports_not_present, "Cannot test parmest: required dependencies are missing")
+@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing")
@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
class parmest_object_Tester_reactor_design(unittest.TestCase):
@@ -220,8 +242,11 @@ def SSE(model, data):
(float(data['cc']) - model.cc)**2 + \
(float(data['cd']) - model.cd)**2
return expr
+
+ solver_options = {"max_iter": 6000}
- self.pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE)
+ self.pest = parmest.Estimator(reactor_design_model, data,
+ theta_names, SSE, solver_options)
def test_theta_est(self):
objval, thetavals = self.pest.theta_est()
@@ -230,6 +255,26 @@ def test_theta_est(self):
self.assertAlmostEqual(thetavals['k2'], 5.0/3.0, places=4)
self.assertAlmostEqual(thetavals['k3'], 1.0/6000.0, places=7)
-
+@unittest.skipIf(not parmest.parmest_available,
+ "Cannot test parmest: required dependencies are missing")
+@unittest.skipIf(not graphics.imports_available,
+ "parmest.graphics imports are unavailable")
+@unittest.skipIf(is_osx, "Disabling graphics tests on OSX due to issue in Matplotlib, see Pyomo PR #1337")
+class parmest_graphics(unittest.TestCase):
+
+ def setUp(self):
+ self.A = pd.DataFrame(np.random.randint(0,100,size=(100,4)), columns=list('ABCD'))
+ self.B = pd.DataFrame(np.random.randint(0,100,size=(100,4)), columns=list('ABCD'))
+
+ def test_pairwise_plot(self):
+ parmest.pairwise_plot(self.A, alpha=0.8, distributions=['Rect', 'MVN', 'KDE'])
+
+ def test_grouped_boxplot(self):
+ parmest.grouped_boxplot(self.A, self.B, normalize=True,
+ group_names=['A', 'B'])
+
+ def test_grouped_violinplot(self):
+ parmest.grouped_violinplot(self.A, self.B)
+
if __name__ == '__main__':
unittest.main()
diff --git a/pyomo/contrib/parmest/tests/test_scenariocreator.py b/pyomo/contrib/parmest/tests/test_scenariocreator.py
new file mode 100644
index 00000000000..5a0aa43ecab
--- /dev/null
+++ b/pyomo/contrib/parmest/tests/test_scenariocreator.py
@@ -0,0 +1,146 @@
+# the matpolotlib stuff is to avoid $DISPLAY errors on Travis (DLW Oct 2018)
+try:
+ import matplotlib
+ matplotlib.use('Agg')
+except:
+ pass
+from pyomo.common.dependencies import (
+ numpy as np, numpy_available,
+ pandas as pd, pandas_available,
+ scipy, scipy_available,
+)
+imports_present = numpy_available & pandas_available & scipy_available
+
+uuid_available = True
+try:
+ import uuid
+except:
+ uuid_available = False
+
+import pyutilib.th as unittest
+import os
+import sys
+
+import pyomo.contrib.parmest.parmest as parmest
+import pyomo.contrib.parmest.scenariocreator as sc
+import pyomo.contrib.parmest.graphics as graphics
+import pyomo.contrib.parmest as parmestbase
+import pyomo.environ as pyo
+import pyomo.contrib.parmest.examples.semibatch.scencreate as sbc
+
+from pyomo.opt import SolverFactory
+ipopt_available = SolverFactory('ipopt').available()
+
+testdir = os.path.dirname(os.path.abspath(__file__))
+
+
+@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing")
+@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
+class pamest_Scenario_creator_reactor_design(unittest.TestCase):
+
+ def setUp(self):
+ from pyomo.contrib.parmest.examples.reactor_design.reactor_design import reactor_design_model
+
+ # Data from the design
+ data = pd.DataFrame(data=[[1.05, 10000, 3458.4, 1060.8, 1683.9, 1898.5],
+ [1.10, 10000, 3535.1, 1064.8, 1613.3, 1893.4],
+ [1.15, 10000, 3609.1, 1067.8, 1547.5, 1887.8],
+ [1.20, 10000, 3680.7, 1070.0, 1486.1, 1881.6],
+ [1.25, 10000, 3750.0, 1071.4, 1428.6, 1875.0],
+ [1.30, 10000, 3817.1, 1072.2, 1374.6, 1868.0],
+ [1.35, 10000, 3882.2, 1072.4, 1324.0, 1860.7],
+ [1.40, 10000, 3945.4, 1072.1, 1276.3, 1853.1],
+ [1.45, 10000, 4006.7, 1071.3, 1231.4, 1845.3],
+ [1.50, 10000, 4066.4, 1070.1, 1189.0, 1837.3],
+ [1.55, 10000, 4124.4, 1068.5, 1148.9, 1829.1],
+ [1.60, 10000, 4180.9, 1066.5, 1111.0, 1820.8],
+ [1.65, 10000, 4235.9, 1064.3, 1075.0, 1812.4],
+ [1.70, 10000, 4289.5, 1061.8, 1040.9, 1803.9],
+ [1.75, 10000, 4341.8, 1059.0, 1008.5, 1795.3],
+ [1.80, 10000, 4392.8, 1056.0, 977.7, 1786.7],
+ [1.85, 10000, 4442.6, 1052.8, 948.4, 1778.1],
+ [1.90, 10000, 4491.3, 1049.4, 920.5, 1769.4],
+ [1.95, 10000, 4538.8, 1045.8, 893.9, 1760.8]],
+ columns=['sv', 'caf', 'ca', 'cb', 'cc', 'cd'])
+
+ theta_names = ['k1', 'k2', 'k3']
+
+ def SSE(model, data):
+ expr = (float(data['ca']) - model.ca)**2 + \
+ (float(data['cb']) - model.cb)**2 + \
+ (float(data['cc']) - model.cc)**2 + \
+ (float(data['cd']) - model.cd)**2
+ return expr
+
+ self.pest = parmest.Estimator(reactor_design_model, data, theta_names, SSE)
+
+ def test_scen_from_exps(self):
+ scenmaker = sc.ScenarioCreator(self.pest, "ipopt")
+ experimentscens = sc.ScenarioSet("Experiments")
+ scenmaker.ScenariosFromExperiments(experimentscens)
+ experimentscens.write_csv("delme_exp_csv.csv")
+ df = pd.read_csv("delme_exp_csv.csv")
+ os.remove("delme_exp_csv.csv")
+ # March '20: all reactor_design experiments have the same theta values!
+ k1val = df.loc[5].at["k1"]
+ self.assertAlmostEqual(k1val, 5.0/6.0, places=2)
+ tval = experimentscens.ScenarioNumber(0).ThetaVals["k1"]
+ self.assertAlmostEqual(tval, 5.0/6.0, places=2)
+
+
+ @unittest.skipIf(not uuid_available, "The uuid module is not available")
+ def test_no_csv_if_empty(self):
+ # low level test of scenario sets
+ # verify that nothing is written, but no errors with empty set
+
+ emptyset = sc.ScenarioSet("empty")
+ tfile = uuid.uuid4().hex+".csv"
+ emptyset.write_csv(tfile)
+ self.assertFalse(os.path.exists(tfile),
+ "ScenarioSet wrote csv in spite of empty set")
+
+
+
+
+@unittest.skipIf(not imports_present, "Cannot test parmest: required dependencies are missing")
+@unittest.skipIf(not ipopt_available, "The 'ipopt' command is not available")
+class pamest_Scenario_creator_semibatch(unittest.TestCase):
+
+ def setUp(self):
+ import pyomo.contrib.parmest.examples.semibatch.semibatch as sb
+ import json
+
+ # Vars to estimate in parmest
+ theta_names = ['k1', 'k2', 'E1', 'E2']
+
+ self.fbase = os.path.join(testdir,"..","examples","semibatch")
+ # Data, list of dictionaries
+ data = []
+ for exp_num in range(10):
+ fname = "exp"+str(exp_num+1)+".out"
+ fullname = os.path.join(self.fbase, fname)
+ with open(fullname,'r') as infile:
+ d = json.load(infile)
+ data.append(d)
+
+ # Note, the model already includes a 'SecondStageCost' expression
+ # for the sum of squared error that will be used in parameter estimation
+
+ self.pest = parmest.Estimator(sb.generate_model, data, theta_names)
+
+
+ def test_semibatch_bootstrap(self):
+
+ scenmaker = sc.ScenarioCreator(self.pest, "ipopt")
+ bootscens = sc.ScenarioSet("Bootstrap")
+ numtomake = 2
+ scenmaker.ScenariosFromBoostrap(bootscens, numtomake, seed=1134)
+ tval = bootscens.ScenarioNumber(0).ThetaVals["k1"]
+ self.assertAlmostEqual(tval, 20.64, places=1)
+
+ def test_semibatch_example(self):
+ # this is referenced in the documentation so at least look for smoke
+ sbc.main(self.fbase)
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py
index 4fbdbc6d8fc..56de3cf8399 100644
--- a/pyomo/contrib/preprocessing/plugins/constraint_tightener.py
+++ b/pyomo/contrib/preprocessing/plugins/constraint_tightener.py
@@ -1,6 +1,8 @@
import logging
-import textwrap
+from six.moves import zip
+
+from pyomo.common import deprecated
from pyomo.core import Constraint, value, TransformationFactory
from pyomo.core.plugins.transform.hierarchy import IsomorphicTransformation
from pyomo.repn.standard_repn import generate_standard_repn
@@ -8,8 +10,9 @@
logger = logging.getLogger('pyomo.contrib.preprocessing')
-@TransformationFactory.register('core.tighten_constraints_from_vars',
- doc="Tightens upper and lower bound on linear constraints.")
+@TransformationFactory.register(
+ 'core.tighten_constraints_from_vars',
+ doc="Tightens upper and lower bound on linear constraints.")
class TightenContraintFromVars(IsomorphicTransformation):
"""Tightens upper and lower bound on constraints based on variable bounds.
@@ -20,8 +23,17 @@ class TightenContraintFromVars(IsomorphicTransformation):
"""
- def _apply_to(self, instance):
- for constr in instance.component_data_objects(
+ @deprecated(
+ "Use of the constraint tightener transformation is deprecated. "
+ "Its functionality may be partially replicated using "
+ "`pyomo.contrib.fbbt.compute_bounds_on_expr(constraint.body)`.",
+ version='TBD', remove_in='TBD')
+ def __init__(self):
+ super(TightenContraintFromVars, self).__init__()
+
+ def _apply_to(self, model):
+ """Apply the transformation."""
+ for constr in model.component_data_objects(
ctype=Constraint, active=True, descend_into=True):
repn = generate_standard_repn(constr.body)
if not repn.is_linear():
@@ -31,33 +43,34 @@ def _apply_to(self, instance):
LB = UB = 0
if repn.constant:
LB = UB = repn.constant
+
# loop through each coefficent and variable pair
- for i, coef in enumerate(repn.linear_coefs):
- # TODO: Rounding issues
+ for var, coef in zip(repn.linear_vars, repn.linear_coefs):
# Calculate bounds using interval arithmetic
if coef >= 0:
- if repn.linear_vars[i].has_ub():
- UB = UB + coef * value(repn.linear_vars[i].ub)
+ if var.has_ub():
+ UB = UB + coef * value(var.ub)
else:
UB = float('Inf')
- if repn.linear_vars[i].has_lb():
- LB = LB + coef * value(repn.linear_vars[i].lb)
+ if var.has_lb():
+ LB = LB + coef * value(var.lb)
else:
LB = float('-Inf')
else:
# coef is negative, so signs switch
- if repn.linear_vars[i].has_lb():
- UB = UB + coef * value(repn.linear_vars[i].lb)
- else:
- LB = float('-Inf')
- if repn.linear_vars[i].has_ub():
- LB = LB + coef * value(repn.linear_vars[i].ub)
+ if var.has_lb():
+ UB = UB + coef * value(var.lb)
else:
UB = float('Inf')
+ if var.has_ub():
+ LB = LB + coef * value(var.ub)
+ else:
+ LB = float('-Inf')
# if inferred bound is tighter, replace bound
new_ub = min(value(constr.upper), UB) if constr.has_ub() else UB
new_lb = max(value(constr.lower), LB) if constr.has_lb() else LB
+
constr.set_value((new_lb, constr.body, new_ub))
if UB < LB:
diff --git a/pyomo/contrib/preprocessing/plugins/induced_linearity.py b/pyomo/contrib/preprocessing/plugins/induced_linearity.py
index 48420a2bc58..f353cd1bab5 100644
--- a/pyomo/contrib/preprocessing/plugins/induced_linearity.py
+++ b/pyomo/contrib/preprocessing/plugins/induced_linearity.py
@@ -88,7 +88,7 @@ def _process_container(blk, config):
if not hasattr(blk, '_induced_linearity_info'):
blk._induced_linearity_info = Block()
else:
- assert blk._induced_linearity_info.type() == Block
+ assert blk._induced_linearity_info.ctype == Block
eff_discr_vars = detect_effectively_discrete_vars(
blk, config.equality_tolerance)
# TODO will need to go through this for each disjunct, since it does
@@ -185,7 +185,7 @@ def prune_possible_values(block_scope, possible_values, config):
Constraint, active=True, descend_into=(Block, Disjunct)):
if constr.body.polynomial_degree() not in (1, 0):
constr.deactivate()
- if block_scope.type() == Disjunct:
+ if block_scope.ctype == Disjunct:
disj = tmp_clone_blk._tmp_block_scope[0]
disj.indicator_var.fix(1)
TransformationFactory('gdp.bigm').apply_to(model)
@@ -224,7 +224,7 @@ def _process_bilinear_constraints(block, v1, v2, var_values, bilinear_constrs):
.replace('[', '').replace(']', ''))
block._induced_linearity_info.add_component(unique_name, blk)
# TODO think about not using floats as indices in a set
- blk.valid_values = Set(initialize=var_values)
+ blk.valid_values = Set(initialize=sorted(var_values))
blk.x_active = Var(blk.valid_values, domain=Binary, initialize=1)
blk.v_increment = Var(
blk.valid_values, domain=v2.domain,
diff --git a/pyomo/contrib/preprocessing/plugins/strip_bounds.py b/pyomo/contrib/preprocessing/plugins/strip_bounds.py
index 5fbd33b530b..244fef6d0b5 100644
--- a/pyomo/contrib/preprocessing/plugins/strip_bounds.py
+++ b/pyomo/contrib/preprocessing/plugins/strip_bounds.py
@@ -4,7 +4,7 @@
from pyomo.core.base.plugin import TransformationFactory
from pyomo.core.base.var import Var
from pyomo.core.kernel.component_map import ComponentMap
-from pyomo.core.kernel.set_types import Reals
+from pyomo.core.base.set_types import Reals
from pyomo.core.plugins.transform.hierarchy import NonIsomorphicTransformation
from pyomo.common.config import ConfigBlock, ConfigValue, add_docstring_list
diff --git a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py
index 00c94932859..2855518cbd3 100644
--- a/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py
+++ b/pyomo/contrib/preprocessing/tests/test_constraint_tightener.py
@@ -80,6 +80,22 @@ def test_unbounded_one_direction(self):
self.assertEqual(value(m.c1.upper), -1)
self.assertFalse(m.c1.has_lb())
+ def test_negative_coeff(self):
+ """Unbounded in one direction with negative coefficient"""
+ m = ConcreteModel()
+ m.v1 = Var(initialize=7, bounds=(1, float('inf')))
+ m.v2 = Var(initialize=2, bounds=(2, 5))
+ m.v3 = Var(initialize=6, bounds=(6, 9))
+ m.v4 = Var(initialize=1, bounds=(1, 1))
+ m.c1 = Constraint(expr=2 * m.v2 + m.v3 + m.v4 - m.v1 <= 50)
+
+ self.assertEqual(value(m.c1.upper), 50)
+ self.assertTrue(m.c1.has_ub())
+ self.assertFalse(m.c1.has_lb())
+ TransformationFactory('core.tighten_constraints_from_vars').apply_to(m)
+ self.assertEqual(value(m.c1.upper), 19)
+ self.assertFalse(m.c1.has_lb())
+
def test_ignore_nonlinear(self):
m = ConcreteModel()
m.v1 = Var()
diff --git a/pyomo/contrib/pynumero/README.md b/pyomo/contrib/pynumero/README.md
new file mode 100644
index 00000000000..0d165dbc39c
--- /dev/null
+++ b/pyomo/contrib/pynumero/README.md
@@ -0,0 +1,73 @@
+PyNumero
+========
+
+PyNumero: A high-level Python framework for rapid development of
+nonlinear optimization algorithms without large sacrifices on
+computational performance.
+
+PyNumero dramatically reduces the time required to prototype new NLP
+algorithms and parallel decomposition approaches with minimal
+performance penalties.
+
+PyNumero libraries
+==================
+
+PyNumero relies on C/C++ extensions for expensive computing operations.
+
+If you installed Pyomo using conda (from conda-forge), then you can
+obtain precompiled versions of the redistributable interfaces
+(pynumero_ASL) using conda. Through Pyomo 5.6.9 these libraries are
+available by installing the `pynumero_libraries` package from
+conda-forge. Beginning in Pyomo 5.7, the redistributable pynumero
+libraries (pynumero_ASL) are included in the pyomo conda-forge package.
+
+If you are not using conda or want to build the nonredistributable
+interfaces (pynumero_MA27, pynumero_MA57), you can build the extensions
+locally one of three ways:
+
+1. By running the `build.py` Python script in this directory. This
+script will automatically drive the `cmake` build harness to compile the
+libraries and install them into your local Pyomo configuration
+directory. Cmake options may be specified in the command. For example,
+
+ python build.py -DBUILD_ASL=ON
+
+If you have compiled Ipopt, and you would like to link against the
+libraries built with Ipopt, you can. For example,
+
+ python build.py -DBUILD_ASL=ON -DBUILD_MA27=ON -DIPOPT_DIR=/lib/
+
+If you do so, you will likely need to update an environment variable
+for the path to shared libraries. For example, on Linux,
+
+ export LD_LIBRARY_PATH=/lib/
+
+2. By running `pyomo build-extensions`. This will build all registered
+Pyomo binary extensions, including PyNumero (using the `build.py` script
+from option 1).
+
+3. By manually running cmake to build the libraries. You will need to
+ensure that the libraries are then installed into a location that Pyomo
+(and PyNumero) can find them (e.g., in the Pyomo configuration
+`lib` directory, in a common system location, or in a location included in
+the LD_LIBRARY_PATH environment variable).
+
+Prerequisites
+-------------
+
+1. `pynumero_ASL`:
+ - cmake
+ - a C/C++ compiler
+ - ASL library and headers (optionally, the build harness can
+ automatically check out and build AMPL/MP from GitHub to obtain
+ this library)
+
+2. `pynumero_MA27`:
+ - cmake
+ - a C/C++ compiler
+ - MA27 library, COIN-HSL Archive, or COIN-HSL Full
+
+2. `pynumero_MA57`:
+ - cmake
+ - a C/C++ compiler
+ - MA57 library or COIN-HSL Full
diff --git a/pyomo/contrib/pynumero/__init__.py b/pyomo/contrib/pynumero/__init__.py
index 87142be00b0..2358e8f6cf9 100644
--- a/pyomo/contrib/pynumero/__init__.py
+++ b/pyomo/contrib/pynumero/__init__.py
@@ -7,39 +7,5 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-try:
- import numpy as np
- # Note: sparse.BlockVector leverages the __array__ufunc__ interface
- # released in numpy 1.13
- numpy_available = np.lib.NumpyVersion(np.__version__) >= '1.13.0'
- if not numpy_available:
- import pyomo.common # ...to set up the logger
- import logging
- logging.getLogger('pyomo.contrib.pynumero').warn(
- "Pynumero requires numpy>=1.13.0; found %s" % (np.__version__,))
-except ImportError:
- numpy_available = False
-try:
- import scipy
- scipy_available = True
-except ImportError:
- scipy_available = False
- import pyomo.common # ...to set up the logger
- import logging
- logging.getLogger('pyomo.contrib.pynumero').warn(
- "Scipy not available. Install scipy before using pynumero")
-
-if numpy_available:
- from .sparse.intrinsic import *
-else:
- # In general, generating output in __init__.py is undesirable, as
- # many __init__.py get imported automatically by pyomo.environ.
- # Fortunately, at the moment, pynumero doesn't implement any
- # plugins, so pyomo.environ ignores it. When we start implementing
- # general solvers in pynumero we will want to remove / move this
- # warning somewhere deeper in the code.
- import pyomo.common # ...to set up the logger
- import logging
- logging.getLogger('pyomo.contrib.pynumero').warn(
- "Numpy not available. Install numpy>=1.13.0 before using pynumero")
+from .intrinsic import norm, allclose, where, isin, intersect1d, setdiff1d
diff --git a/pyomo/contrib/pynumero/algorithms/__init__.py b/pyomo/contrib/pynumero/algorithms/__init__.py
index f8ffb764677..6b39dd18d6a 100644
--- a/pyomo/contrib/pynumero/algorithms/__init__.py
+++ b/pyomo/contrib/pynumero/algorithms/__init__.py
@@ -8,5 +8,3 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from .. import numpy_available
-
diff --git a/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py b/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py
index da98a50ce5a..90c7fa9ed55 100644
--- a/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py
+++ b/pyomo/contrib/pynumero/algorithms/solvers/cyipopt_solver.py
@@ -33,7 +33,7 @@
@six.add_metaclass(abc.ABCMeta)
-class CyIpoptProblemInterface(abc.ABC):
+class CyIpoptProblemInterface(object):
@abc.abstractmethod
def x_init(self):
"""Return the initial values for x as a numpy ndarray
diff --git a/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py b/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py
new file mode 100644
index 00000000000..13533f1de05
--- /dev/null
+++ b/pyomo/contrib/pynumero/algorithms/solvers/pyomo_ext_cyipopt.py
@@ -0,0 +1,336 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import numpy as np
+import six
+import abc
+from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptProblemInterface
+from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
+from pyomo.contrib.pynumero.sparse.block_vector import BlockVector
+from pyomo.environ import Var, Constraint, value
+from pyomo.core.base.var import _VarData
+from pyomo.common.modeling import unique_component_name
+
+"""
+This module is used for interfacing a multi-input / multi-output external
+evaluation code with a Pyomo model and then solve the coupled model
+with CyIpopt.
+
+To use this interface:
+ * inherit from ExternalInputOutputModel and implement the necessary methods
+ (This provides methods to set the input values, evaluate the output values,
+ and evaluate the jacobian of the outputs with respect to the inputs.)
+ * create a PyomoExternalCyIpoptProblem object, giving it your pyomo model, an
+ instance of the derived ExternalInputOutputModel, a list of the Pyomo variables
+ that map to the inputs of the external model, and a list of the Pyomo variables
+ that map to the outputs from the external model.
+ * The standard CyIpopt solver interface can be called using the PyomoExternalCyIpoptProblem
+
+See the PyNumero tests for this interface to see an example of use.
+
+Todo:
+ * Currently, you cannot "fix" a pyomo variable that corresponds to an input or output
+ and you must use a constraint instead (this is because Pyomo removes fixed variables
+ before sending them to the solver)
+ * Remove the dummy variable and constraint once Pyomo supports non-removal of certain
+ variables
+"""
+@six.add_metaclass(abc.ABCMeta)
+class ExternalInputOutputModel(object):
+ """
+ This is the base class for building external input output models
+ for use with Pyomo and CyIpopt
+ """
+ def __init__(self):
+ pass
+
+ @abc.abstractmethod
+ def set_inputs(self, input_values):
+ """
+ This method is called by the solver to set the current values
+ for the input variables. The derived class must cache these if
+ necessary for any subsequent calls to evalute_outputs or
+ evaluate_derivatives.
+ """
+ pass
+
+ @abc.abstractmethod
+ def evaluate_outputs(self):
+ """
+ Compute the outputs from the model (using the values
+ set in input_values) and return as a numpy array
+ """
+ pass
+
+ @abc.abstractmethod
+ def evaluate_derivatives(self):
+ """
+ Compute the derivatives of the outputs with respect
+ to the inputs (using the values set in input_values).
+ This should be a dense matrix with the rows in
+ the order of the output variables and the cols in
+ the order of the input variables.
+ """
+ pass
+
+ # ToDo: Hessians not yet handled
+
+class PyomoExternalCyIpoptProblem(CyIpoptProblemInterface):
+ def __init__(self, pyomo_model, ex_input_output_model, inputs, outputs):
+ """
+ Create an instance of this class to pass as a problem to CyIpopt.
+
+ Parameters
+ ----------
+ pyomo_model : ConcreteModel
+ The ConcreteModel representing the Pyomo part of the problem. This
+ model must contain Pyomo variables for the inputs and the outputs.
+
+ ex_input_output_model : ExternalInputOutputModel
+ An instance of a derived class (from ExternalInputOutputModel) that provides
+ the methods to compute the outputs and the derivatives.
+
+ inputs : list of Pyomo variables (_VarData)
+ The Pyomo model needs to have variables to represent the inputs to the
+ external model. This is the list of those input variables in the order
+ that corresponds to the input_values vector provided in the set_inputs call.
+
+ outputs : list of Pyomo variables (_VarData)
+ The Pyomo model needs to have variables to represent the outputs from the
+ external model. This is the list of those output variables in the order
+ that corresponds to the numpy array returned from the evaluate_outputs call.
+ """
+ self._pyomo_model = pyomo_model
+ self._ex_io_model = ex_input_output_model
+
+ # verify that the inputs and outputs were passed correctly
+ self._inputs = [v for v in inputs]
+ for v in self._inputs:
+ if not isinstance(v, _VarData):
+ raise RuntimeError('Argument inputs passed to PyomoExternalCyIpoptProblem must be'
+ ' a list of VarData objects. Note: if you have an indexed variable, pass'
+ ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).')
+
+ self._outputs = [v for v in outputs]
+ for v in self._outputs:
+ if not isinstance(v, _VarData):
+ raise RuntimeError('Argument outputs passed to PyomoExternalCyIpoptProblem must be'
+ ' a list of VarData objects. Note: if you have an indexed variable, pass'
+ ' each index as a separate entry in the list (e.g., inputs=[m.x[1], m.x[2]]).')
+
+ # we need to add a dummy variable and constraint to the pyomo_nlp
+ # to make sure it does not remove variables that do not
+ # appear in the pyomo part of the model - also ensure unique name in case model
+ # is used in more than one instance of this class
+ # ToDo: Improve this by convincing Pyomo not to remove the inputs and outputs
+ dummy_var_name = unique_component_name(self._pyomo_model, '_dummy_variable_CyIpoptPyomoExNLP')
+ dummy_var = Var()
+ setattr(self._pyomo_model, dummy_var_name, dummy_var)
+ dummy_con_name = unique_component_name(self._pyomo_model, '_dummy_constraint_CyIpoptPyomoExNLP')
+ dummy_con = Constraint(
+ expr = getattr(self._pyomo_model, dummy_var_name) == \
+ sum(v for v in self._inputs) + sum(v for v in self._outputs)
+ )
+ setattr(self._pyomo_model, dummy_con_name, dummy_con)
+
+ # initialize the dummy var to the right hand side
+ dummy_var_value = 0
+ for v in self._inputs:
+ if v.value is not None:
+ dummy_var_value += value(v)
+ for v in self._outputs:
+ if v.value is not None:
+ dummy_var_value += value(v)
+ dummy_var.value = dummy_var_value
+
+ # make an nlp interface from the pyomo model
+ self._pyomo_nlp = PyomoNLP(self._pyomo_model)
+
+ # create initial value vectors for primals and duals
+ init_primals = self._pyomo_nlp.init_primals()
+ init_duals_pyomo = self._pyomo_nlp.init_duals()
+ if np.any(np.isnan(init_duals_pyomo)):
+ # set initial values to 1 for any entries that we don't get
+ # (typically, all are set, or none are set)
+ init_duals_pyomo[np.isnan(init_duals_pyomo)] = 1.0
+ init_duals_ex = np.ones(len(self._outputs), dtype=np.float64)
+ init_duals = BlockVector(2)
+ init_duals.set_block(0, init_duals_pyomo)
+ init_duals.set_block(1, init_duals_ex)
+
+ # build the map from inputs and outputs to the full x vector
+ self._input_columns = self._pyomo_nlp.get_primal_indices(self._inputs)
+ #self._input_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64)
+ #self._input_x_mask[self._input_columns] = 1.0
+ self._output_columns = self._pyomo_nlp.get_primal_indices(self._outputs)
+ #self._output_x_mask = np.zeros(self._pyomo_nlp.n_primals(), dtype=np.float64)
+ #self._output_x_mask[self._output_columns] = 1.0
+
+ # create caches for primals and duals
+ self._cached_primals = init_primals.copy()
+ self._cached_duals = init_duals.clone(copy=True)
+ self._cached_obj_factor = 1.0
+
+ # set the initial values for the pyomo primals and duals
+ self._pyomo_nlp.set_primals(self._cached_primals)
+ self._pyomo_nlp.set_duals(self._cached_duals.get_block(0))
+ # set the initial values for the external inputs
+ ex_inputs = self._ex_io_inputs_from_full_primals(self._cached_primals)
+ self._ex_io_model.set_inputs(ex_inputs)
+
+ # create the lower and upper bounds for the complete problem
+ pyomo_nlp_con_lb = self._pyomo_nlp.constraints_lb()
+ ex_con_lb = np.zeros(len(self._outputs), dtype=np.float64)
+ self._gL = np.concatenate((pyomo_nlp_con_lb, ex_con_lb))
+ pyomo_nlp_con_ub = self._pyomo_nlp.constraints_ub()
+ ex_con_ub = np.zeros(len(self._outputs), dtype=np.float64)
+ self._gU = np.concatenate((pyomo_nlp_con_ub, ex_con_ub))
+
+ ### setup the jacobian structures
+ self._jac_pyomo = self._pyomo_nlp.evaluate_jacobian()
+
+ # We will be mapping the dense external jacobian (doutputs/dinputs)
+ # to the correct columns from the full x vector
+ ex_start_row = self._pyomo_nlp.n_constraints()
+
+ jac_ex = self._ex_io_model.evaluate_derivatives()
+
+ # the jacobian returned from the external model is in the
+ # space of the external model only. We need to shift
+ # the rows down and shift the columns appropriately
+ jac_ex_irows = np.copy(jac_ex.row)
+ jac_ex_irows += ex_start_row
+ jac_ex_jcols = np.copy(jac_ex.col)
+ for z,col in enumerate(jac_ex_jcols):
+ jac_ex_jcols[z] = self._input_columns[col]
+ jac_ex_data = np.copy(jac_ex.data)
+
+ # CDL: this code was for the dense version of evaluate_derivatives
+ # for i in range(len(self._outputs)):
+ # for j in range(len(self._inputs)):
+ # jac_ex_irows.append(ex_start_row + i)
+ # jac_ex_jcols.append(self._input_columns[j])
+ # jac_ex_data.append(jac_ex[i,j])
+
+ jac_ex_output_irows = list()
+ jac_ex_output_jcols = list()
+ jac_ex_output_data = list()
+
+ # add the jac for output variables from the extra equations
+ for i in range(len(self._outputs)):
+ jac_ex_output_irows.append(ex_start_row + i)
+ jac_ex_output_jcols.append(self._output_columns[i])
+ jac_ex_output_data.append(-1.0)
+
+ self._full_jac_irows = np.concatenate((self._jac_pyomo.row, jac_ex_irows, jac_ex_output_irows))
+ self._full_jac_jcols = np.concatenate((self._jac_pyomo.col, jac_ex_jcols, jac_ex_output_jcols))
+ self._full_jac_data = np.concatenate((self._jac_pyomo.data, jac_ex_data, jac_ex_output_data))
+
+ # currently, this interface does not do anything with Hessians
+
+ def load_x_into_pyomo(self, primals):
+ """
+ Use this method to load a numpy array of values into the corresponding
+ Pyomo variables (e.g., the solution from CyIpopt)
+
+ Parameters
+ ----------
+ primals : numpy array
+ The array of values that will be given to the Pyomo variables. The
+ order of this array is the same as the order in the PyomoNLP created
+ internally.
+ """
+ pyomo_variables = self._pyomo_nlp.get_pyomo_variables()
+ for i,v in enumerate(primals):
+ pyomo_variables[i].set_value(v)
+
+ def _set_primals_if_necessary(self, primals):
+ if not np.array_equal(primals, self._cached_primals):
+ self._pyomo_nlp.set_primals(primals)
+ ex_inputs = self._ex_io_inputs_from_full_primals(primals)
+ self._ex_io_model.set_inputs(ex_inputs)
+ self._cached_primals = primals.copy()
+
+ def _set_duals_if_necessary(self, duals):
+ if not np.array_equal(duals, self._cached_duals):
+ self._cached_duals.copy_from(duals)
+ self._pyomo_nlp.set_duals(self._cached_duals.get_block(0))
+
+ def _set_obj_factor_if_necessary(self, obj_factor):
+ if obj_factor != self._cached_obj_factor:
+ self._pyomo_nlp.set_obj_factor(obj_factor)
+ self._cached_obj_factor = obj_factor
+
+ def x_init(self):
+ return self._pyomo_nlp.init_primals()
+
+ def x_lb(self):
+ return self._pyomo_nlp.primals_lb()
+
+ def x_ub(self):
+ return self._pyomo_nlp.primals_ub()
+
+ def g_lb(self):
+ return self._gL.copy()
+
+ def g_ub(self):
+ return self._gU.copy()
+
+ def objective(self, primals):
+ self._set_primals_if_necessary(primals)
+ return self._pyomo_nlp.evaluate_objective()
+
+ def gradient(self, primals):
+ self._set_primals_if_necessary(primals)
+ return self._pyomo_nlp.evaluate_grad_objective()
+
+ def constraints(self, primals):
+ self._set_primals_if_necessary(primals)
+ pyomo_constraints = self._pyomo_nlp.evaluate_constraints()
+ ex_io_outputs = self._ex_io_model.evaluate_outputs()
+ ex_io_constraints = ex_io_outputs - self._ex_io_outputs_from_full_primals(primals)
+ constraints = BlockVector(2)
+ constraints.set_block(0, pyomo_constraints)
+ constraints.set_block(1, ex_io_constraints)
+ return constraints.flatten()
+
+ def jacobianstructure(self):
+ return self._full_jac_irows, self._full_jac_jcols
+
+ def jacobian(self, primals):
+ self._set_primals_if_necessary(primals)
+ self._pyomo_nlp.evaluate_jacobian(out=self._jac_pyomo)
+ pyomo_data = self._jac_pyomo.data
+ ex_io_deriv = self._ex_io_model.evaluate_derivatives()
+ # CDL: dense version: ex_io_deriv = self._ex_io_model.evaluate_derivatives().flatten('C')
+ self._full_jac_data[0:len(pyomo_data)] = pyomo_data
+ self._full_jac_data[len(pyomo_data):len(pyomo_data)+len(ex_io_deriv.data)] = ex_io_deriv.data
+ # CDL: dense version: self._full_jac_data[len(pyomo_data):len(pyomo_data)+len(ex_io_deriv)] = ex_io_deriv
+
+ # the -1s for the output variables should still be here
+ return self._full_jac_data
+
+ def hessianstructure(self):
+ return np.zeros(0), np.zeros(0)
+ #raise NotImplementedError('No Hessians for now')
+
+ def hessian(self, x, y, obj_factor):
+ raise NotImplementedError('No Hessians for now')
+
+ def _ex_io_inputs_from_full_primals(self, primals):
+ return primals[self._input_columns]
+ #return np.compress(self._input_x_mask, primals)
+
+ def _ex_io_outputs_from_full_primals(self, primals):
+ return primals[self._output_columns]
+ #return np.compress(self._output_x_mask, primals)
+
+
+
+
diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py
index 184cb1e4c04..dfdc612082d 100644
--- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py
+++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_interfaces.py
@@ -11,19 +11,17 @@
import pyutilib.th as unittest
import pyomo.environ as pyo
-from pyomo.contrib.pynumero import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse as spa, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-import scipy.sparse as spa
-import numpy as np
-
-from pyomo.contrib.pynumero.extensions.asl import AmplInterface
+from pyomo.contrib.pynumero.asl import AmplInterface
if not AmplInterface.available():
raise unittest.SkipTest(
"Pynumero needs the ASL extension to run CyIpoptSolver tests")
-import scipy.sparse as sp
from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
try:
@@ -103,10 +101,10 @@ def test_model1(self):
# test jacobian
expected = np.asarray([[8.0, 0, 1.0],[0.0, 8.0, 1.0]])
- spexpected = sp.coo_matrix(expected).todense()
+ spexpected = spa.coo_matrix(expected).todense()
rows, cols = cynlp.jacobianstructure()
values = cynlp.jacobian(x)
- jac = sp.coo_matrix((values, (rows,cols)), shape=(len(constraints), len(x))).todense()
+ jac = spa.coo_matrix((values, (rows,cols)), shape=(len(constraints), len(x))).todense()
self.assertTrue(np.allclose(spexpected, jac))
# test hessian
@@ -114,6 +112,6 @@ def test_model1(self):
y.fill(1.0)
rows, cols = cynlp.hessianstructure()
values = cynlp.hessian(x, y, obj_factor=1.0)
- hess_lower = sp.coo_matrix((values, (rows,cols)), shape=(len(x), len(x))).todense()
+ hess_lower = spa.coo_matrix((values, (rows,cols)), shape=(len(x), len(x))).todense()
expected_hess_lower = np.asarray([[-286.0, 0.0, 0.0], [0.0, 4.0, 0.0], [-144.0, 0.0, 192.0]], dtype=np.float64)
self.assertTrue(np.allclose(expected_hess_lower, hess_lower))
diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py
index c24afbf6539..2f9a09ed8ff 100644
--- a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py
+++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_cyipopt_solver.py
@@ -11,14 +11,13 @@
import pyutilib.th as unittest
import pyomo.environ as pyo
-from pyomo.contrib.pynumero import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse as spa, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-import scipy.sparse as spa
-import numpy as np
-
-from pyomo.contrib.pynumero.extensions.asl import AmplInterface
+from pyomo.contrib.pynumero.asl import AmplInterface
if not AmplInterface.available():
raise unittest.SkipTest(
"Pynumero needs the ASL extension to run CyIpoptSolver tests")
@@ -30,8 +29,9 @@
except ImportError:
raise unittest.SkipTest("Pynumero needs cyipopt to run CyIpoptSolver tests")
-from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver, CyIpoptNLP
-
+from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import (
+ CyIpoptSolver, CyIpoptNLP
+)
def create_model1():
m = pyo.ConcreteModel()
diff --git a/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py
new file mode 100644
index 00000000000..ac67cbeab09
--- /dev/null
+++ b/pyomo/contrib/pynumero/algorithms/solvers/tests/test_pyomo_ext_cyipopt.py
@@ -0,0 +1,146 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import pyutilib.th as unittest
+import pyomo.environ as pyo
+
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse as spa, scipy_available
+)
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
+
+from pyomo.contrib.pynumero.asl import AmplInterface
+if not AmplInterface.available():
+ raise unittest.SkipTest(
+ "Pynumero needs the ASL extension to run CyIpoptSolver tests")
+
+try:
+ import ipopt
+except ImportError:
+ raise unittest.SkipTest("Pynumero needs cyipopt to run CyIpoptSolver tests")
+
+from pyomo.contrib.pynumero.algorithms.solvers.pyomo_ext_cyipopt import ExternalInputOutputModel, PyomoExternalCyIpoptProblem
+from pyomo.contrib.pynumero.algorithms.solvers.cyipopt_solver import CyIpoptSolver
+
+class PressureDropModel(ExternalInputOutputModel):
+ def __init__(self):
+ self._Pin = None
+ self._c1 = None
+ self._c2 = None
+ self._F = None
+
+ def set_inputs(self, input_values):
+ assert len(input_values) == 4
+ self._Pin = input_values[0]
+ self._c1 = input_values[1]
+ self._c2 = input_values[2]
+ self._F = input_values[3]
+
+ def evaluate_outputs(self):
+ P1 = self._Pin - self._c1*self._F**2
+ P2 = P1 - self._c2*self._F**2
+ return np.asarray([P1, P2], dtype=np.float64)
+
+ def evaluate_derivatives(self):
+ jac = [[1, -self._F**2, 0, -2*self._c1*self._F],
+ [1, -self._F**2, -self._F**2, -2*self._F*(self._c1 + self._c2)]]
+ jac = np.asarray(jac, dtype=np.float64)
+ return spa.coo_matrix(jac)
+
+class TestExternalInputOutputModel(unittest.TestCase):
+
+ def test_interface(self):
+ # weird, this is really a test of the test class above
+ # but we could add code later, so...
+ iom = PressureDropModel()
+ iom.set_inputs(np.ones(4))
+ o = iom.evaluate_outputs()
+ expected_o = np.asarray([0.0, -1.0], dtype=np.float64)
+ self.assertTrue(np.array_equal(o, expected_o))
+
+ jac = iom.evaluate_derivatives()
+ expected_jac = np.asarray([[1, -1, 0, -2], [1, -1, -1, -4]], dtype=np.float64)
+ self.assertTrue(np.array_equal(jac.todense(), expected_jac))
+
+ def test_pyomo_external_model(self):
+ m = pyo.ConcreteModel()
+ m.Pin = pyo.Var(initialize=100, bounds=(0,None))
+ m.c1 = pyo.Var(initialize=1.0, bounds=(0,None))
+ m.c2 = pyo.Var(initialize=1.0, bounds=(0,None))
+ m.F = pyo.Var(initialize=10, bounds=(0,None))
+
+ m.P1 = pyo.Var()
+ m.P2 = pyo.Var()
+
+ m.F_con = pyo.Constraint(expr = m.F == 10)
+ m.Pin_con = pyo.Constraint(expr = m.Pin == 100)
+
+ # simple parameter estimation test
+ m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2)
+
+ cyipopt_problem = \
+ PyomoExternalCyIpoptProblem(m,
+ PressureDropModel(),
+ [m.Pin, m.c1, m.c2, m.F],
+ [m.P1, m.P2]
+ )
+
+ # check that the dummy variable is initialized
+ expected_dummy_var_value = pyo.value(m.Pin) + pyo.value(m.c1) + pyo.value(m.c2) + pyo.value(m.F) \
+ + 0 + 0
+ # + pyo.value(m.P1) + pyo.value(m.P2) # not initialized - therefore should use zero
+ self.assertAlmostEqual(pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value)
+
+ # solve the problem
+ solver = CyIpoptSolver(cyipopt_problem, {'hessian_approximation':'limited-memory'})
+ x, info = solver.solve(tee=False)
+ cyipopt_problem.load_x_into_pyomo(x)
+ self.assertAlmostEqual(pyo.value(m.c1), 0.1, places=5)
+ self.assertAlmostEqual(pyo.value(m.c2), 0.5, places=5)
+
+ def test_pyomo_external_model_dummy_var_initialization(self):
+ m = pyo.ConcreteModel()
+ m.Pin = pyo.Var(initialize=100, bounds=(0,None))
+ m.c1 = pyo.Var(initialize=1.0, bounds=(0,None))
+ m.c2 = pyo.Var(initialize=1.0, bounds=(0,None))
+ m.F = pyo.Var(initialize=10, bounds=(0,None))
+
+ m.P1 = pyo.Var(initialize=75.0)
+ m.P2 = pyo.Var(initialize=50.0)
+
+ m.F_con = pyo.Constraint(expr = m.F == 10)
+ m.Pin_con = pyo.Constraint(expr = m.Pin == 100)
+
+ # simple parameter estimation test
+ m.obj = pyo.Objective(expr= (m.P1 - 90)**2 + (m.P2 - 40)**2)
+
+ cyipopt_problem = \
+ PyomoExternalCyIpoptProblem(m,
+ PressureDropModel(),
+ [m.Pin, m.c1, m.c2, m.F],
+ [m.P1, m.P2]
+ )
+
+ # check that the dummy variable is initialized
+ expected_dummy_var_value = pyo.value(m.Pin) + pyo.value(m.c1) + pyo.value(m.c2) + pyo.value(m.F) \
+ + pyo.value(m.P1) + pyo.value(m.P2)
+ self.assertAlmostEqual(pyo.value(m._dummy_variable_CyIpoptPyomoExNLP), expected_dummy_var_value)
+ # check that the dummy constraint is satisfied
+ self.assertAlmostEqual(pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body),pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.lower))
+ self.assertAlmostEqual(pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.body),pyo.value(m._dummy_constraint_CyIpoptPyomoExNLP.upper))
+
+ # solve the problem
+ solver = CyIpoptSolver(cyipopt_problem, {'hessian_approximation':'limited-memory'})
+ x, info = solver.solve(tee=False)
+ cyipopt_problem.load_x_into_pyomo(x)
+ self.assertAlmostEqual(pyo.value(m.c1), 0.1, places=5)
+ self.assertAlmostEqual(pyo.value(m.c2), 0.5, places=5)
+
diff --git a/pyomo/contrib/pynumero/extensions/asl.py b/pyomo/contrib/pynumero/asl.py
similarity index 99%
rename from pyomo/contrib/pynumero/extensions/asl.py
rename to pyomo/contrib/pynumero/asl.py
index 6cdab41a06e..a14223f5b98 100644
--- a/pyomo/contrib/pynumero/extensions/asl.py
+++ b/pyomo/contrib/pynumero/asl.py
@@ -7,8 +7,7 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from pyomo.contrib.pynumero.extensions.utils import find_pynumero_library
-from pkg_resources import resource_filename
+from pyomo.common.fileutils import find_library
import numpy.ctypeslib as npct
import numpy as np
import platform
@@ -16,12 +15,17 @@
import sys
import os
+class _NotSet:
+ pass
class AmplInterface(object):
- libname = find_pynumero_library('pynumero_ASL')
+ libname = _NotSet
+
@classmethod
def available(cls):
+ if cls.libname is _NotSet:
+ cls.libname = find_library('pynumero_ASL')
if cls.libname is None:
return False
return os.path.exists(cls.libname)
diff --git a/pyomo/contrib/pynumero/build.py b/pyomo/contrib/pynumero/build.py
new file mode 100644
index 00000000000..d6391f8a213
--- /dev/null
+++ b/pyomo/contrib/pynumero/build.py
@@ -0,0 +1,112 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import errno
+import os
+import shutil
+import stat
+import sys
+import tempfile
+
+from pyomo.common import config
+from pyomo.common.fileutils import this_file_dir, find_executable
+
+def handleReadonly(function, path, excinfo):
+ excvalue = excinfo[1]
+ if excvalue.errno == errno.EACCES:
+ os.chmod(path, stat.S_IRWXU| stat.S_IRWXG| stat.S_IRWXO) # 0777
+ function(path)
+ else:
+ raise
+
+def build_pynumero(user_args=[], parallel=None):
+ import distutils.core
+ from setuptools import Extension
+ from distutils.command.build_ext import build_ext
+
+ class _CMakeBuild(build_ext, object):
+ def run(self):
+ project_dir = self.extensions[0].project_dir
+
+ cmake_config = 'Debug' if self.debug else 'Release'
+ cmake_args = [
+ '-DCMAKE_INSTALL_PREFIX=' + config.PYOMO_CONFIG_DIR,
+ '-DBUILD_AMPLMP_IF_NEEDED=ON',
+ #'-DCMAKE_BUILD_TYPE=' + cmake_config,
+ ] + user_args
+
+ try:
+ # Redirect all stderr to stdout (to prevent powershell
+ # from inadvertently failing builds)
+ sys.stderr.flush()
+ sys.stdout.flush()
+ old_stderr = os.dup(sys.stderr.fileno())
+ os.dup2(sys.stdout.fileno(), sys.stderr.fileno())
+ old_environ = dict(os.environ)
+ if parallel:
+ # --parallel was only added in cmake 3.12. Use an
+ # environment variable so that we don't have to bump
+ # the minimum cmake version.
+ os.environ['CMAKE_BUILD_PARALLEL_LEVEL'] = str(parallel)
+
+ cmake = find_executable('cmake')
+ if cmake is None:
+ raise IOError("cmake not found in the system PATH")
+ self.spawn([cmake, project_dir] + cmake_args)
+ if not self.dry_run:
+ # Skip build and go straight to install: the build
+ # harness should take care of dependencies and this
+ # will prevent repeated builds in MSVS
+ #
+ #self.spawn(['cmake', '--build', '.',
+ # '--config', cmake_config])
+ self.spawn([cmake, '--build', '.',
+ '--target', 'install',
+ '--config', cmake_config])
+ finally:
+ # Restore stderr
+ sys.stderr.flush()
+ sys.stdout.flush()
+ os.dup2(old_stderr, sys.stderr.fileno())
+ os.environ = old_environ
+
+ class CMakeExtension(Extension, object):
+ def __init__(self, name):
+ # don't invoke the original build_ext for this special extension
+ super(CMakeExtension, self).__init__(name, sources=[])
+ self.project_dir = os.path.join(this_file_dir(), name)
+
+ sys.stdout.write("\n**** Building PyNumero libraries ****\n")
+ package_config = {
+ 'name': 'pynumero_libraries',
+ 'packages': [],
+ 'ext_modules': [CMakeExtension("src")],
+ 'cmdclass': {'build_ext': _CMakeBuild},
+ }
+ dist = distutils.core.Distribution(package_config)
+ try:
+ basedir = os.path.abspath(os.path.curdir)
+ tmpdir = os.path.abspath(tempfile.mkdtemp())
+ os.chdir(tmpdir)
+ dist.run_command('build_ext')
+ install_dir = os.path.join(config.PYOMO_CONFIG_DIR, 'lib')
+ finally:
+ os.chdir(basedir)
+ shutil.rmtree(tmpdir, onerror=handleReadonly)
+ sys.stdout.write("Installed PyNumero libraries to %s\n" % ( install_dir, ))
+
+
+class PyNumeroBuilder(object):
+ def __call__(self, parallel):
+ return build_pynumero(parallel=parallel)
+
+if __name__ == "__main__":
+ build_pynumero(sys.argv[1:])
+
diff --git a/pyomo/contrib/pynumero/cmake/CMakeLists.txt b/pyomo/contrib/pynumero/cmake/CMakeLists.txt
deleted file mode 100644
index e0c10b54083..00000000000
--- a/pyomo/contrib/pynumero/cmake/CMakeLists.txt
+++ /dev/null
@@ -1,95 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-
-
-cmake_minimum_required(VERSION 3.2)
-
-PROJECT( Pynumero )
-
-set(CMAKE_BUILD_TYPE release)
-
-##################### Checks for compiler #####################
-include(CheckCXXCompilerFlag)
-CHECK_CXX_COMPILER_FLAG("-std=c++11" COMPILER_SUPPORTS_CXX11)
-
-if(COMPILER_SUPPORTS_CXX11)
- set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++11")
-#elseif(COMPILER_SUPPORTS_CXX0X)
-# set(CMAKE_CXX_FLAGS "${CMAKE_CXX_FLAGS} -std=c++0x")
-else()
- message(STATUS "The compiler ${CMAKE_CXX_COMPILER} has no C++11 support. Please use a different C++ compiler.")
- if ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Clang")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 3.1)
- message(FATAL_ERROR "CLANG version must be at least 3.1!")
- endif()
- elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "GNU")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 4.7)
- message(FATAL_ERROR "GCC version must be at least 4.7!")
- endif()
- elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "Intel")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
- message(FATAL_ERROR "ICC version must be at least 12.0!")
- endif()
- elseif ("${CMAKE_CXX_COMPILER_ID}" STREQUAL "MSVC")
- if (CMAKE_CXX_COMPILER_VERSION VERSION_LESS 12.0)
- message(FATAL_ERROR "MSVC version must be at least 12.0!")
- endif()
- endif()
-endif()
-
-# to find our dependencies
-set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/")
-
-if(WIN32)
-set(CMAKE_CXX_FLAGS_DEBUG "-g -O3")
-set(CMAKE_CXX_FLAGS_RELEASE "-O2 -static-libstdc++")
-endif()
-
-option(STATIC_LINK "STATIC_LINK" OFF)
-
-# check fpr windows
-if(MSVC OR MSYS OR MINGW)
- set(CMAKE_CXX_FLAGS_DEBUG "-g -O3")
- if(${STATIC_LINK})
- set(CMAKE_CXX_FLAGS_RELEASE "-O2 -static-libstdc++")
- else()
- set(CMAKE_CXX_FLAGS_RELEASE "-O2")
- endif()
-# check for apple
-elseif(APPLE)
- set(CMAKE_CXX_FLAGS_DEBUG "-g -O3 -fpermissive")
- if(${STATIC_LINK})
- set(CMAKE_CXX_FLAGS_RELEASE "-O2 -static-libstdc++ -fpermissive")
- else()
- set(CMAKE_CXX_FLAGS_RELEASE "-O2 -fpermissive")
- endif()
-# check for linux
-else()
- set(CMAKE_CXX_FLAGS_DEBUG "-g -O3")
- if(${STATIC_LINK})
- set(CMAKE_CXX_FLAGS_RELEASE "-O2 -static-libstdc++")
- else()
- set(CMAKE_CXX_FLAGS_RELEASE "-O2")
- endif()
-endif()
-
-
-option(BUILD_ASL "BUILD_ASL" ON)
-#option(BUILD_HSL "BUILD_HSL" OFF)
-
-if(${BUILD_ASL})
-add_subdirectory(asl_interface)
-endif()
-
-#if(${BUILD_HSL})
-#add_subdirectory(hsl_interface)
-#endif()
-
-add_subdirectory(tests)
diff --git a/pyomo/contrib/pynumero/cmake/README.md b/pyomo/contrib/pynumero/cmake/README.md
deleted file mode 100644
index b90dc71516a..00000000000
--- a/pyomo/contrib/pynumero/cmake/README.md
+++ /dev/null
@@ -1,38 +0,0 @@
-PyNumero libraries
-==================
-
-Pynumero relies on C/C++ extensions for expensive computing operations. This folder contain the C/C++ code to build the libraires.
-
-Instructions:
-
-# if conda is not available
-cd third_party/ASL
-./get.ASL
-cd solvers
-./configurehere
-make # remove -DNo_dtoa from cflags in makefile
-cd ../../
-mkdir build
-cd build
-cmake ..
-make
-cp asl_interface/libpynumero_ASL* ../../extensions/lib/
-cp sparse_utils/libpynumero_SPARSE* ../../extensions/lib/
-
-# if conda is available and want to link to ASL in ampl-mp
-conda install -c conda-forge ampl-mp
-mkdir build
-cd build
-cmake .. -DMP_PATH=
-make
-cp asl_interface/libpynumero_ASL* ../../extensions/lib/
-cp sparse_utils/libpynumero_SPARSE* ../../extensions/lib/
-
-# if conda available and do not want to compile
-conda install -c conda-forge pynumero_libraries
-
-# Note: by default libraries are linked dynamically to stdlib. To link statically enable option -DSTATIC_LINK=ON
-
-
-
-
diff --git a/pyomo/contrib/pynumero/cmake/asl_interface/CMakeLists.txt b/pyomo/contrib/pynumero/cmake/asl_interface/CMakeLists.txt
deleted file mode 100644
index 2e5586bb049..00000000000
--- a/pyomo/contrib/pynumero/cmake/asl_interface/CMakeLists.txt
+++ /dev/null
@@ -1,67 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-
-cmake_minimum_required(VERSION 3.2)
-
-#set(USE_ASL_PATH "" CACHE FILEPATH "Set the path to the ASL solvers directory containing a compiled amplsolver.a library.")
-set(MP_PATH "" CACHE FILEPATH "Set the path to the ampl-mp package to link against ampl-mp asl library")
-
-# set the default directory for ASL
-set(DEFAULT_ASL_PATH "${PROJECT_SOURCE_DIR}/third_party/ASL/solvers")
-
-if(APPLE)
- set(ASL_SOURCES
- src/AmplInterface.cpp
- src/AssertUtils.hpp
- )
-else()
- set(ASL_SOURCES
- src/AmplInterface.cpp
- src/AssertUtils.hpp
- )
-endif()
-
-if (MP_PATH)
- ADD_LIBRARY( pynumero_ASL SHARED ${ASL_SOURCES})
-
- FIND_LIBRARY(ASL_LIB
- NAMES asl libdasl
- HINTS "${MP_PATH}/lib/"
- )
-
- set(ASL_INCLUDE_DIRS "${MP_PATH}/include/asl")
- set( ASL_LIBRARIES "${ASL_LIB}" )
- set( WITH_AMPL true)
-
-elseif(DEFAULT_ASL_PATH)
- ADD_LIBRARY( pynumero_ASL SHARED ${ASL_SOURCES})
- set(ASL_INCLUDE_DIRS "${DEFAULT_ASL_PATH}")
- set( ASL_LIBRARIES "${DEFAULT_ASL_PATH}/amplsolver.a" )
- set( WITH_AMPL true)
-else()
- MESSAGE( STATUS "*** ASL (AMPL) support not included - set USE_ASL_PATH to compile in ASL support." )
- set( ASL_INCLUDE_DIRS "" )
- set( ASL_LIBRARIES "" )
- set( WITH_AMPL false)
-endif()
-
-
-if (${WITH_AMPL})
- MESSAGE("-- ASL_INCLUDE_DIRS ${ASL_INCLUDE_DIRS}")
- MESSAGE("-- ASL_LIBRARIES ${ASL_LIBRARIES}")
- TARGET_INCLUDE_DIRECTORIES( pynumero_ASL PUBLIC ${ASL_INCLUDE_DIRS} )
- TARGET_LINK_LIBRARIES( pynumero_ASL ${ASL_LIBRARIES} ${CMAKE_DL_LIBS})
- INSTALL(TARGETS pynumero_ASL
- DESTINATION lib
- )
- INSTALL(DIRECTORY src
- DESTINATION include
- )
-endif()
diff --git a/pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.cpp b/pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.cpp
deleted file mode 100644
index a9f1503a470..00000000000
--- a/pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.cpp
+++ /dev/null
@@ -1,540 +0,0 @@
-/**___________________________________________________________________________
- *
- * Pyomo: Python Optimization Modeling Objects
- * Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
- * Under the terms of Contract DE-NA0003525 with National Technology and
- * Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
- * rights in this software.
- * This software is distributed under the 3-clause BSD License.
- * ___________________________________________________________________________
-**/
-
-#include "AmplInterface.hpp"
-#include "AssertUtils.hpp"
-#include "asl_pfgh.h"
-#include "getstub.h"
-
-#include
-
-AmplInterface::AmplInterface()
- :
- _p_asl(NULL), // pointer to the ASL struct
- _obj_direction(1), // minimize by default
- nnz_hes_lag_(-1) // cache this since sphsetup called only once
-{
-}
-
-char* new_char_p_from_std_str(std::string str)
-{
- char* ret = new char[str.length() + 1];
- strcpy(ret, str.c_str());
- return ret;
-}
-
-void AmplInterface::initialize(const char *nlfilename)
-{
- // The includes from the Ampl Solver Library
- // have a number of macros that expand to include
- // the local variable "asl".
- // For example:
- // #define X0 asl->i.X0_
- // Therefore, in many of these methods, you will
- // often see the assignment the asl pointer followed
- // by calls to the macros from the ASL.
-
- // TODO: add possible options later
- std::vector options;
-
- std::string cp_nlfilename(nlfilename);
-
- // translate options to command input
- std::vector arguments;
- arguments.push_back("pynumero");
- arguments.push_back(cp_nlfilename);
- for (const auto &opt : options) {
- arguments.push_back(opt);
- }
-
- std::vector argv;
-
- for (const auto &arg : arguments)
- argv.push_back((char *) arg.data());
- argv.push_back(nullptr);
-
- // Allocate memory for the asl structure
- ASL_pfgh *asl = (ASL_pfgh *) ASL_alloc(ASL_read_pfgh);
- _p_asl = asl; // store this pointer to write back to "asl" when necessary
- _ASSERT_(_p_asl);
-
- // Create the Option_Info structure - see getstub.h (more entries than in hooking.pdf)
- // ToDo: should allow many of these to be passed in to initialize (so different solvers
- // can set them appropriately).
- oi = new Option_Info;
- oi->sname = new_char_p_from_std_str("solver_exe_name_not_set");
- oi->bsname = new_char_p_from_std_str("Solver_name_not_set");
- oi->opname = new_char_p_from_std_str("solver_options_env_var_not_set");
- oi->keywds = NULL;
- oi->n_keywds = 0;
- oi->flags = 0;
- oi->version = NULL;
- oi->usage = NULL;
- oi->kwf = NULL;
- oi->feq = NULL;
- oi->options = NULL;
- oi->n_options = 0;
- oi->driver_date = 0;
- oi->wantsol = 0;
- oi->nS = 0;
- oi->S = NULL;
- oi->uinfo = NULL;
- oi->asl = NULL;
- oi->eqsign = NULL;
- oi->n_badopts = 0;
- oi->option_echo = 0;
- oi->nnl = 0;
-
- // read the options and get the name of the .nl file (stub)
- char *stub = getstops(argv.data(), oi);
-
- delete[] oi->sname;
- oi->sname = NULL;
- delete[] oi->bsname;
- oi->bsname = NULL;
- delete[] oi->opname;
- oi->opname = NULL;
- // this pointer may need to be stored for the call to write_sol
- //delete oi;
-
- FILE *nl = this->open_nl(asl, stub);
- _ASSERT_(nl != NULL);
-
- // want initial values for the variables and the
- // multipliers
- want_xpi0 = 1 | 2;
- // allocate space in the ASL structure for the initial values
- X0 = new double[n_var];
- havex0 = new char[n_var];
- pi0 = new double[n_con];
- havepi0 = new char[n_con];
-
- _ASSERT_EXIT_(n_var > 0, "Problem does not have any continuous variables");
- _ASSERT_EXIT_(nbv == 0 && niv == 0, "PyNumero does not support discrete variables");
- _ASSERT_EXIT_(nwv == 0 && nlnc == 0 && lnc == 0,
- "PyNumero does not support network constraints");
- _ASSERT_EXIT_(n_cc == 0, "PyNumero does not support complementarities");
-
- // call ASL to parse the nl file
- int retcode = pfgh_read(nl, ASL_findgroups);
- _ASSERT_EXIT_(retcode == ASL_readerr_none,
- "Error reading the ASL .nl file");
-
- // determine maximization or minimization
- _ASSERT_EXIT_(n_obj == 1, "PyNumero supports single objective problems only");
- _obj_direction = 1;
- if (objtype[0] != 0) {
- _obj_direction = -1;
- }
-
- // see comments in https://github.com/ampl/mp/blob/master/src/asl/solvers/changes
- // void hesset(int flags, int obj, int nnobj, int con, int nncon)
- // tells AMPL which objectives and constraints to include when building the
- // Hessian structure. Seems like:
- // obj is the obj. number to start,
- // nnobj is the number past that to include
- // con is the constraint number to start
- // nncon is the number past that to include
- // we only support single objective problems
- hesset(1, 0, 1, 0, nlc);
-
- // setup the structure for the Hessian of the Lagrangian
- nnz_hes_lag_ = sphsetup(-1, 1, 1, 1); // num obj, factor on obj, flag to indicate if multipliers supplied, and flag for upper triangular
-}
-
-AmplInterface::~AmplInterface() {
- ASL_pfgh *asl = _p_asl;
- delete[] X0;
- X0 = NULL;
- delete[] havex0;
- havex0 = NULL;
- delete[] pi0;
- pi0 = NULL;
- delete[] havepi0;
- havepi0 = NULL;
- delete oi;
-
- if (asl) {
- ASL *p_asl_to_free = (ASL *) _p_asl;
- ASL_free(&p_asl_to_free);
- _p_asl = NULL;
- }
-}
-
-int AmplInterface::get_n_vars() const {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- int n_x;
- n_x = n_var;
- return n_x;
-}
-
-int AmplInterface::get_n_constraints() const {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- int n_c;
- n_c = n_con;
- return n_c;
-}
-
-int AmplInterface::get_nnz_jac_g() const {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- int nnz_jac_g;
- nnz_jac_g = nzc;
- return nnz_jac_g;
-}
-
-int AmplInterface::get_nnz_hessian_lag() const {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(asl);
- int nnz_hes_lag;
- nnz_hes_lag = nnz_hes_lag_;
- return nnz_hes_lag;
-}
-
-void AmplInterface::get_lower_bounds_x(double *invec, int n) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(n == n_var);
- for (int i = 0; i < n; i++) {
- invec[i] = LUv[2 * i];
- }
-}
-
-void AmplInterface::get_upper_bounds_x(double *invec, int n) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(n == n_var);
-
- for (int i = 0; i < n; i++) {
- invec[i] = LUv[2 * i + 1];
- }
-}
-
-void AmplInterface::get_lower_bounds_g(double *invec, int m) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(m == n_con);
- for (int i = 0; i < m; i++) {
- invec[i] = LUrhs[2 * i];
- }
-}
-
-void AmplInterface::get_upper_bounds_g(double *invec, int m) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(m == n_con);
-
- for (int i = 0; i < m; i++) {
- invec[i] = LUrhs[2 * i + 1];
- }
-}
-
-void AmplInterface::get_init_x(double *invec, int n) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(n == n_var);
-
- for (int i = 0; i < n; i++) {
- if (havex0[i]) {
- invec[i] = X0[i];
- } else {
- invec[i] = 0.0;
- }
- }
-}
-
-void AmplInterface::get_init_multipliers(double *invec, int n) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
-
- // get dual starting point
- if (n_con == 0) { return; } // unconstrained problem or do not want to use the exist dual values
- _ASSERT_(n == n_con);
-
- for (int i = 0; i < n; i++) {
- if (havepi0[i]) {
- invec[i] = pi0[i];
- } else {
- invec[i] = 0.0;
- }
- }
-}
-
-bool AmplInterface::eval_f(double *const_x, int nx, double& f) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(n_obj == 1 && "AMPL problem must have a single objective function");
-
- fint nerror = 1;
- double retval = objval(obj_no, (double *) const_x, &nerror);
-
- if (nerror != 0) {
- return false;
- }
- f = _obj_direction * retval;
- return true;
-
-}
-
-bool AmplInterface::eval_deriv_f(double *const_x, double *deriv_f, int nx) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(n_obj == 1 && "AMPL problem must have a single objective function");
-
- fint nerror = 1;
- objgrd(obj_no, (double *) const_x, deriv_f, &nerror);
-
- if (nerror != 0) {
- return false;
- }
-
- if (_obj_direction == -1) {
- for (int i = 0; i < nx; i++) {
- deriv_f[i] *= -1.0;
- }
- }
- return true;
-}
-
-bool AmplInterface::eval_g(double *const_x, int nx, double *g, int ng) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(nx == n_var);
- _ASSERT_(ng == n_con);
-
- fint nerror = 1;
- conval((double *) const_x, g, &nerror);
- if (nerror != 0) {
- return false;
- }
- return true;
-}
-
-void AmplInterface::struct_jac_g(int *irow, int *jcol, int nnz_jac_g) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(nnz_jac_g == nzc);
- _ASSERT_(irow && jcol);
-
- // get the non zero structure of the Jacobian of g wrt x
- for (int i = 0; i < n_con; i++) {
- for (cgrad *cg = Cgrad[i]; cg; cg = cg->next) {
- irow[cg->goff] = i + 1;
- jcol[cg->goff] = cg->varno + 1;
- }
- }
-}
-
-bool AmplInterface::eval_jac_g(double *const_x, int nx, double *jac_g_values, int nnz_jac_g) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(nx == n_var);
- _ASSERT_(nnz_jac_g == nzc);
- _ASSERT_(jac_g_values);
-
- fint nerror = 1;
- jacval((double *) const_x, jac_g_values, &nerror);
- if (nerror != 0) {
- return false;
- }
- return true;
-}
-
-void AmplInterface::struct_hes_lag(int *irow, int *jcol, int nnz_hes_lag) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(nnz_hes_lag_ == nnz_hes_lag);
-
- int idx = 0;
- for (int i = 0; i < n_var; i++) {
- for (int j = sputinfo->hcolstarts[i]; j < sputinfo->hcolstarts[i + 1]; j++) {
- irow[idx] = i + 1;
- jcol[idx] = sputinfo->hrownos[j] + 1;
- idx++;
- }
- }
-}
-
-bool AmplInterface::eval_hes_lag(double *const_x,
- int nx,
- double *const_lam,
- int nc,
- double *hes_lag,
- int nnz_hes_lag,
- double obj_factor) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(_p_asl);
- _ASSERT_(nx == n_var);
- _ASSERT_(nc == n_con);
- _ASSERT_(n_obj == 1);
- _ASSERT_(nnz_hes_lag_ == nnz_hes_lag);
-
- double OW = _obj_direction * obj_factor;
- sphes(hes_lag, -1, &OW, (double *) const_lam);
- return true;
-}
-
-void AmplInterface::finalize_solution(int ampl_solve_result_num, char* msg, double *const_x, int nx, double *const_lam, int nc) {
- ASL_pfgh *asl = _p_asl;
- _ASSERT_(asl);
- _ASSERT_(const_x && const_lam);
-
- // set the AMPL solver status'
- _ASSERT_MSG_(ampl_solve_result_num >= 0 && ampl_solve_result_num < 600,
- "ampl_solve_result_num must be between 0 and 599 in AmplInterface::finalize_solution");
-
- write_sol(msg, const_cast(const_x), const_cast(const_lam), 0);
-}
-
-AmplInterfaceFile::AmplInterfaceFile()
- : AmplInterface()
-{}
-
-FILE* AmplInterfaceFile::open_nl(ASL_pfgh *asl, char* stub)
-{
- _ASSERT_EXIT_(stub, "No .nl file was specified.");
- return jac0dim(stub, (int) strlen(stub));
-}
-
-AmplInterfaceStr::AmplInterfaceStr(char* nl, size_t size)
- : AmplInterface(),
- nl_content(nl),
- nl_size(size)
-{}
-
-// THIS METHOD IS DIABLED FOR NOW
-FILE* AmplInterfaceStr::open_nl(ASL_pfgh *asl, char* stub)
-{
- // Ignore the stub and use the cached NL file content
- //#if defined(__APPLE__) && defined(__MACH__)
- //FILE* nl = fmemopen(this->nl_content, this->nl_size, "rb");
- //return jac0dim_FILE(nl);
- return NULL;
- // #elif defined(_WIN32)
- //return NULL;
- //#else
- //FILE* nl = fmemopen(this->nl_content, this->nl_size, "rb");
- //return jac0dim_FILE(nl);
- //return NULL;
- //#endif
-
-}
-
-
-extern "C"
-{
- AmplInterface *EXTERNAL_AmplInterface_new_file(char *nlfilename) {
- AmplInterface* ans = new AmplInterfaceFile();
- ans->initialize(nlfilename);
- return ans;
- }
-
- AmplInterface *EXTERNAL_AmplInterface_new_str(char *nl, size_t size) {
- AmplInterface* ans = new AmplInterfaceStr(nl, size);
- ans->initialize("membuf.nl");
- return ans;
- }
-
- AmplInterface *EXTERNAL_AmplInterface_new(char *nlfilename) {
- return EXTERNAL_AmplInterface_new_file(nlfilename);
- }
-
- int EXTERNAL_AmplInterface_n_vars(AmplInterface *p_ai) {
- return p_ai->get_n_vars();
- }
-
- int EXTERNAL_AmplInterface_n_constraints(AmplInterface *p_ai) {
- return p_ai->get_n_constraints();
- }
-
- int EXTERNAL_AmplInterface_nnz_jac_g(AmplInterface *p_ai) {
- return p_ai->get_nnz_jac_g();
- }
-
- int EXTERNAL_AmplInterface_nnz_hessian_lag(AmplInterface *p_ai) {
- return p_ai->get_nnz_hessian_lag();
- }
-
- void EXTERNAL_AmplInterface_x_lower_bounds(AmplInterface *p_ai, double *invec, int n) {
- p_ai->get_lower_bounds_x(invec, n);
- }
-
- void EXTERNAL_AmplInterface_x_upper_bounds(AmplInterface *p_ai, double *invec, int n) {
- p_ai->get_upper_bounds_x(invec, n);
- }
-
- void EXTERNAL_AmplInterface_g_lower_bounds(AmplInterface *p_ai, double *invec, int m) {
- p_ai->get_lower_bounds_g(invec, m);
- }
-
- void EXTERNAL_AmplInterface_g_upper_bounds(AmplInterface *p_ai, double *invec, int m) {
- p_ai->get_upper_bounds_g(invec, m);
- }
-
- void EXTERNAL_AmplInterface_get_init_x(AmplInterface *p_ai, double *invec, int n) {
- p_ai->get_init_x(invec, n);
- }
-
- void EXTERNAL_AmplInterface_get_init_multipliers(AmplInterface *p_ai, double *invec, int n) {
- p_ai->get_init_multipliers(invec, n);
- }
-
- bool EXTERNAL_AmplInterface_eval_f(AmplInterface *p_ai, double *invec, int n, double& f) {
- return p_ai->eval_f(invec, n, f);
- }
-
- bool EXTERNAL_AmplInterface_eval_deriv_f(AmplInterface *p_ai, double *const_x, double *deriv_f, int nx) {
- return p_ai->eval_deriv_f(const_x, deriv_f, nx);
- }
-
- bool EXTERNAL_AmplInterface_eval_g(AmplInterface *p_ai, double *const_x, int nx, double *g, int ng) {
- return p_ai->eval_g(const_x, nx, g, ng);
- }
-
- void EXTERNAL_AmplInterface_struct_jac_g(AmplInterface *p_ai, int *irow, int *jcol, int nnz_jac_g) {
- p_ai->struct_jac_g(irow, jcol, nnz_jac_g);
- }
-
- bool EXTERNAL_AmplInterface_eval_jac_g(AmplInterface *p_ai, double *const_x, int nx, double *jac_g_values,
- int nnz_jac_g) {
- return p_ai->eval_jac_g(const_x, nx, jac_g_values, nnz_jac_g);
- }
-
- void EXTERNAL_AmplInterface_struct_hes_lag(AmplInterface *p_ai, int *irow, int *jcol,
- int nnz_hes_lag) {
- p_ai->struct_hes_lag(irow, jcol, nnz_hes_lag);
- }
-
- bool EXTERNAL_AmplInterface_eval_hes_lag(AmplInterface *p_ai, double *const_x, int nx,
- double *const_lam, int nc, double *hes_lag,
- int nnz_hes_lag, double obj_factor) {
- return p_ai->eval_hes_lag(const_x, nx, const_lam, nc, hes_lag, nnz_hes_lag, obj_factor);
- }
-
- void EXTERNAL_AmplInterface_finalize_solution(AmplInterface *p_ai,
- int ampl_solve_result_num,
- char* msg,
- double *const_x, int nx,
- double *const_lam, int nc) {
- p_ai->finalize_solution(ampl_solve_result_num, msg,
- const_x, nx, const_lam, nc);
- }
-
- void EXTERNAL_AmplInterface_free_memory(AmplInterface *p_ai) {
- p_ai->~AmplInterface();
- }
-
- void EXTERNAL_AmplInterface_dummy(AmplInterface *p_ai) {
- std::cout<<"hola\n";
- }
-
-}
diff --git a/pyomo/contrib/pynumero/cmake/tests/src/simple_test.cpp b/pyomo/contrib/pynumero/cmake/tests/src/simple_test.cpp
deleted file mode 100644
index d6d4ed1e1fe..00000000000
--- a/pyomo/contrib/pynumero/cmake/tests/src/simple_test.cpp
+++ /dev/null
@@ -1,21 +0,0 @@
-/**___________________________________________________________________________
- *
- * Pyomo: Python Optimization Modeling Objects
- * Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
- * Under the terms of Contract DE-NA0003525 with National Technology and
- * Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
- * rights in this software.
- * This software is distributed under the 3-clause BSD License.
- * ___________________________________________________________________________
-**/
-#include
-#include "AmplInterface.hpp"
-
-int main()
-{
- AmplInterface* ans = new AmplInterfaceFile();
- ans->initialize("simple_nlp.nl");
- delete ans;
- std::cout << "Done\n";
- return 0;
-}
diff --git a/pyomo/contrib/pynumero/cmake/third_party/ASL/README b/pyomo/contrib/pynumero/cmake/third_party/ASL/README
deleted file mode 100644
index 0520f7009e2..00000000000
--- a/pyomo/contrib/pynumero/cmake/third_party/ASL/README
+++ /dev/null
@@ -1,16 +0,0 @@
-PyNumero relies on a patched version of the ASL that supports direct
-memory transfer of the NL file. The included getASL.sh script automates
-the process of fetching and patching the ASL source.
-
-Quick installations for fetching and building the ASL:
-
- ./getASL.sh
- cd solvers
- ./configurehere
- make
-
-If your networking environment prevents the secure download of the ASL
-from GitHub, you can run the download "insecurely" with:
-
- ./getASL.sh --insecure
-
diff --git a/pyomo/contrib/pynumero/cmake/third_party/ASL/asl.patch b/pyomo/contrib/pynumero/cmake/third_party/ASL/asl.patch
deleted file mode 100644
index 2cf56d5c3fc..00000000000
--- a/pyomo/contrib/pynumero/cmake/third_party/ASL/asl.patch
+++ /dev/null
@@ -1,55 +0,0 @@
-diff --git a/src/asl/solvers/asl.h b/src/asl/solvers/asl.h
-index 1394f64..4c4b4af 100644
---- a/asl.h
-+++ b/asl.h
-@@ -1015,6 +1015,7 @@ QPinfo {
- extern void introuble_ASL(ASL*, const char *who, real a, int jv);
- extern void introuble2_ASL(ASL*, const char *who, real a, real b, int jv);
- extern FILE *jac0dim_ASL(ASL*, const char *stub, ftnlen stub_len);
-+ extern FILE *jac0dim_FILE_ASL(ASL*, FILE* nl);
- extern int jac1dim_ASL(ASL*, const char *stub, fint *M, fint *N, fint *NO,
- fint *NZ, fint *MXROW, fint *MXCOL, ftnlen stub_len);
- extern int jac2dim_ASL (ASL*, const char *stub, fint *M, fint *N, fint *NO,
-@@ -1136,6 +1137,7 @@ extern void set_max_dtoa_threads(unsigned int);
- #define getenv getenv_ASL
- #define int_catch(f,v) intcatch_ASL((ASL*)asl,f,v)
- #define jac0dim(stub,len) jac0dim_ASL((ASL*)asl,stub,len)
-+#define jac0dim_FILE(nl) jac0dim_FILE_ASL((ASL*)asl,nl)
- #define jac1dim(s,m,n,no,nz,mxr,mxc,L) jac1dim_ASL((ASL*)asl,s,m,n,no,nz,mxr,mxc,L)
- #define jac2dim(s,m,n,no,nz,mxr,mxc,L) jac2dim_ASL((ASL*)asl,s,m,n,no,nz,mxr,mxc,L)
- #define jacdim(stub,M,N,NO,NZ,MXR,MXC,len) jac_dim_ASL((ASL*)asl,stub,M,N,NO,NZ,MXR,MXC,len)
-diff --git a/src/asl/solvers/jac0dim.c b/src/asl/solvers/jac0dim.c
-index 0bdf3eb..a88648c 100644
---- a/jac0dim.c
-+++ b/jac0dim.c
-@@ -99,10 +99,8 @@ read2(EdRead *R, int *x, int *y)
- jac0dim_ASL(ASL *asl, const char *stub, ftnlen stub_len)
- {
- FILE *nl;
-- int i, k, nlv;
-- char *s, *se;
-- const char *opfmt;
-- EdRead ER, *R;
-+ int i;
-+ char *s;
-
- if (!asl)
- badasl_ASL(asl,0,"jac0dim");
-@@ -130,6 +128,17 @@ jac0dim_ASL(ASL *asl, const char *stub, ftnlen stub_len)
- fprintf(Stderr, "can't open %s\n", filename);
- exit(1);
- }
-+ return jac0dim_FILE_ASL(asl, nl);
-+}
-+
-+FILE *
-+jac0dim_FILE_ASL(ASL *asl, FILE *nl)
-+{
-+ int i, k, nlv;
-+ char *s, *se;
-+ const char *opfmt;
-+ EdRead ER, *R;
-+
- R = EdReadInit_ASL(&ER, asl, nl, 0);
- R->Line = 0;
- s = read_line(R);
diff --git a/pyomo/contrib/pynumero/cmake/third_party/ASL/getASL.sh b/pyomo/contrib/pynumero/cmake/third_party/ASL/getASL.sh
deleted file mode 100755
index 2da4dc45968..00000000000
--- a/pyomo/contrib/pynumero/cmake/third_party/ASL/getASL.sh
+++ /dev/null
@@ -1,107 +0,0 @@
-#!/usr/bin/env bash
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-
-if test "$1" = "--insecure"; then
- DOWNLOADERS=( "wget --no-check-certificate" "curl --insecure -L -O" )
-else
- DOWNLOADERS=( "wget" "curl -L -O" )
-fi
-# Insecure wget: --no-check-certificate
-# Insecure curl: --insecure
-
-ASL=1.3.0
-TARGET=https://github.com/ampl/mp/archive/$ASL.tar.gz
-
-DOWNLOAD=
-DOWNLOADERS=( "wget" "curl -L -O" )
-for test_cmd in "${DOWNLOADERS[@]}"; do
- echo $test_cmd
- $test_cmd --help > /dev/null 2>&1
- if test $? -eq 0; then
- DOWNLOAD="$test_cmd"
- break
- fi
-done
-if test -z "$DOWNLOAD"; then
- echo "ERROR: no downloader found. Tried:"
- for test_cmd in "${DOWNLOADERS[@]}"; do
- echo " $test_cmd"
- done
- exit 1
-fi
-
-ROOT_DIR=`dirname $0`
-TGZ_FILE=`basename $TARGET`
-
-UNPACK_DIR="$ROOT_DIR/tmp-getASL"
-if test -e $UNPACK_DIR; then
- echo "Temporary directory ($UNPACK_DIR) exists!"
- echo "Cowardly refusing to overwrite."
- exit 1
-fi
-FINAL_DIR="$ROOT_DIR/solvers"
-if test -e $FINAL_DIR; then
- echo "Final installation directory ($FINAL_DIR) exists!"
- echo "Cowardly refusing to overwrite."
- exit 1
-fi
-
-function fail() {
- MSG="$1"
- shift
- while test -n "$1"; do
- popd
- shift
- done
- rm -rf "$UNPACK_DIR"
- rm -rf "$FINAL_DIR"
- echo ""
- echo "$MSG"
- echo ""
- exit 1
-}
-
-mkdir "$UNPACK_DIR" || fail "Could not create temporary dir ($UNPACK_DIR)"
-pushd "$UNPACK_DIR" || fail "Could not move to temporary dir ($UNPACK_DIR)"
-
-echo "Downloading $TARGET"
-$DOWNLOAD $TARGET
-if test $? -eq 0; then
- echo "Download complete."
-else
- fail "Download failed." 1
-fi
-
-tar -xzf $ASL.tar.gz || fail "Extracting archive failed" 1
-mv */src/asl/solvers . || fail "Did not locate ASL solvers directory" 1
-pushd solvers || fail "pushd failed"
-
-echo "Updating CFLAGS"
-
-mv makefile.u makefile.u.orig || fail "moving makefile failed" 2 1
-sed -e 's/CFLAGS = /CFLAGS = -DNo_dtoa -fPIC /g' makefile.u.orig > makefile.u \
- || fail "Updating CFLAGS failed" 2 1
-
-echo "Patching ASL"
-patch < ../../asl.patch || fail "patching ASL failed" 2 1
-
-popd || fail "popd failed" 2 1
-popd || fail "popd failed" 1
-
-mv "$UNPACK_DIR/solvers" "$FINAL_DIR" \
- || fail "Cound move ASL to final dir ($FINAL_DIR)"
-
-echo "Deleting the temporary directory"
-rm -rf "$UNPACK_DIR"
-
-echo " "
-echo "Done downloading the source code for ASL."
-echo " "
diff --git a/pyomo/contrib/pynumero/dependencies.py b/pyomo/contrib/pynumero/dependencies.py
new file mode 100644
index 00000000000..f794e238762
--- /dev/null
+++ b/pyomo/contrib/pynumero/dependencies.py
@@ -0,0 +1,30 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+from pyomo.common.dependencies import attempt_import, scipy, scipy_available
+
+# Note: sparse.BlockVector leverages the __array__ufunc__ interface
+# released in numpy 1.13
+numpy, numpy_available = attempt_import(
+ 'numpy',
+ 'Pynumero requires the optional Pyomo dependency "numpy"',
+ minimum_version='1.13.0',
+ defer_check=False)
+
+scipy_sparse, scipy_sparse_available = attempt_import(
+ 'scipy.sparse',
+ 'Pynumero requires the optional Pyomo dependency "scipy"',
+ defer_check=False)
+
+if not numpy_available:
+ numpy.generate_import_warning('pyomo.contrib.pynumero')
+
+if not scipy_available:
+ scipy_sparse.generate_import_warning('pyomo.contrib.pynumero')
diff --git a/pyomo/contrib/pynumero/examples/mumps_example.py b/pyomo/contrib/pynumero/examples/mumps_example.py
new file mode 100644
index 00000000000..13b7a77b73d
--- /dev/null
+++ b/pyomo/contrib/pynumero/examples/mumps_example.py
@@ -0,0 +1,54 @@
+import numpy as np
+import scipy.sparse as sp
+from scipy.linalg import hilbert
+from pyomo.contrib.pynumero.linalg.mumps_solver import MumpsCentralizedAssembledLinearSolver
+
+# create the matrix and the right hand sides
+N = 1000
+A = sp.coo_matrix(hilbert(N) + np.identity(N)) # a well-condition, symmetric, positive-definite matrix with off-diagonal entries
+true_x1 = np.arange(N)
+true_x2 = np.array(list(reversed(np.arange(N))))
+b1 = A * true_x1
+b2 = A * true_x2
+
+# solve
+solver = MumpsCentralizedAssembledLinearSolver()
+x1 = solver.solve(A, b1)
+x2 = solver.solve(A, b2)
+assert np.allclose(x1, true_x1)
+assert np.allclose(x2, true_x2)
+
+# only perform factorization once
+solver = MumpsCentralizedAssembledLinearSolver()
+solver.do_symbolic_factorization(A)
+solver.do_numeric_factorization(A)
+x1 = solver.do_back_solve(b1)
+x2 = solver.do_back_solve(b2)
+assert np.allclose(x1, true_x1)
+assert np.allclose(x2, true_x2)
+
+# Tell Mumps the matrix is symmetric
+# Note that the answer will be incorrect if both the lower
+# and upper portions of the matrix are given.
+solver = MumpsCentralizedAssembledLinearSolver(sym=2)
+A_lower_triangular = sp.tril(A)
+x1 = solver.solve(A_lower_triangular, b1)
+assert np.allclose(x1, true_x1)
+
+# Tell Mumps the matrix is symmetric and positive-definite
+solver = MumpsCentralizedAssembledLinearSolver(sym=1)
+A_lower_triangular = sp.tril(A)
+x1 = solver.solve(A_lower_triangular, b1)
+assert np.allclose(x1, true_x1)
+
+# Set options
+solver = MumpsCentralizedAssembledLinearSolver(icntl_options={11: 2}) # compute error stats
+solver.set_cntl(2, 1e-4) # set the stopping criteria for iterative refinement
+solver.set_icntl(10, 5) # set the maximum number of iterations for iterative refinement to 5
+solver.solve(A, b1)
+assert np.allclose(x1, true_x1)
+
+
+# Get information after the solve
+print('Number of iterations of iterative refinement performed: ', solver.get_infog(15))
+print('scaled residual: ', solver.get_rinfog(6))
diff --git a/pyomo/contrib/pynumero/examples/structured/nlp_compositions.py b/pyomo/contrib/pynumero/examples/structured/nlp_compositions.py
index b70fe151d02..a553aaa5777 100644
--- a/pyomo/contrib/pynumero/examples/structured/nlp_compositions.py
+++ b/pyomo/contrib/pynumero/examples/structured/nlp_compositions.py
@@ -9,11 +9,8 @@
# ___________________________________________________________________________
from pyomo.contrib.pynumero.interfaces.nlp import NLP
from pyomo.contrib.pynumero.sparse import (BlockMatrix,
- BlockSymMatrix,
- BlockVector,
- empty_matrix)
+ BlockVector)
from collections import OrderedDict
-import pyomo.environ as aml
import numpy as np
import pyomo.contrib.pynumero as pn
from scipy.sparse import coo_matrix, csr_matrix, identity
@@ -128,8 +125,8 @@ def _initialize_nlp_components(self, *args, **kwargs):
self._AB_coo = BlockMatrix(self.nblocks+1, self.nblocks+1)
nb = self.nblocks
for i in range(nb):
- self._AB_coo[i, i] = self._AB_csr[i, i].tocoo()
- self._AB_coo[nb, nb] = self._AB_csr[nb, nb]
+ self._AB_coo.set_block(i, i, self._AB_csr.get_block(i, i).tocoo())
+ self._AB_coo.set_block(nb, nb, self._AB_csr.get_block(nb, nb))
def _make_unmutable_caches(self):
# no need for caches here
@@ -139,24 +136,30 @@ def _create_vectors(self):
# Note: This method requires the complicated vars nz to be defined beforehand
- # init values
- self._init_x = BlockVector([nlp.x_init() for nlp in self._nlps] +
- [np.zeros(self.nz, dtype=np.double)])
-
- self._init_y = BlockVector([nlp.y_init() for nlp in self._nlps] +
- [np.zeros(self.nz, dtype=np.double) for i in range(self.nblocks)])
-
- # lower and upper bounds
-
- self._lower_x = BlockVector([nlp.xl() for nlp in self._nlps] +
- [np.full(self.nz, -np.inf, dtype=np.double)])
- self._upper_x = BlockVector([nlp.xu() for nlp in self._nlps] +
- [np.full(self.nz, np.inf, dtype=np.double)])
-
- self._lower_g = BlockVector([nlp.gl() for nlp in self._nlps] +
- [np.zeros(self.nz, dtype=np.double) for i in range(self.nblocks)])
- self._upper_g = BlockVector([nlp.gu() for nlp in self._nlps] +
- [np.zeros(self.nz, dtype=np.double) for i in range(self.nblocks)])
+ # init values and lower and upper bounds
+ self._init_x = BlockVector(len(self._nlps) + 1)
+ self._init_y = BlockVector(len(self._nlps) + self.nblocks)
+ self._lower_x = BlockVector(len(self._nlps) + 1)
+ self._upper_x = BlockVector(len(self._nlps) + 1)
+ self._lower_g = BlockVector(len(self._nlps) + self.nblocks)
+ self._upper_g = BlockVector(len(self._nlps) + self.nblocks)
+ ndx = 0
+ for nlp in self._nlps:
+ self._init_x.set_block(ndx, nlp.x_init())
+ self._init_y.set_block(ndx, nlp.y_init())
+ self._lower_x.set_block(ndx, nlp.xl())
+ self._upper_x.set_block(ndx, nlp.xu())
+ self._lower_g.set_block(ndx, nlp.gl())
+ self._upper_g.set_block(ndx, nlp.gu())
+ ndx += 1
+ self._init_x.set_block(ndx, np.zeros(self.nz, dtype=np.double))
+ self._lower_x.set_block(ndx, np.full(self.nz, -np.inf, dtype=np.double))
+ self._upper_x.set_block(ndx, np.full(self.nz, np.inf, dtype=np.double))
+ for i in range(self.nblocks):
+ self._init_y.set_block(ndx, np.zeros(self.nz, dtype=np.double))
+ self._lower_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
+ self._upper_g.set_block(ndx, np.zeros(self.nz, dtype=np.double))
+ ndx += 1
# define x maps and masks
self._lower_x_mask = np.isfinite(self._lower_x)
@@ -182,11 +185,15 @@ def _create_vectors(self):
self._upper_d_mask = pn.isin(self._d_map, self._upper_g_map)
# remove empty vectors at the end of lower and upper d
- self._lower_d_mask = \
- BlockVector([self._lower_d_mask[i] for i in range(self.nblocks)])
+ _lower_d_mask = BlockVector(self.nblocks)
+ for i in range(self.nblocks):
+ _lower_d_mask.set_block(i, self._lower_d_mask.get_block(i))
+ self._lower_d_mask = _lower_d_mask
- self._upper_d_mask = \
- BlockVector([self._upper_d_mask[i] for i in range(self.nblocks)])
+ _upper_d_mask = BlockVector(self.nblocks)
+ for i in range(self.nblocks):
+ _upper_d_mask.set_block(i, self._upper_d_mask.get_block(i))
+ self._upper_d_mask = _upper_d_mask
# define lower and upper d maps
self._lower_d_map = pn.where(self._lower_d_mask)[0]
@@ -197,8 +204,13 @@ def _create_vectors(self):
self._upper_d = np.compress(self._d_mask, self._upper_g)
# remove empty vectors at the end of lower and upper d
- self._lower_d = BlockVector([self._lower_d[i] for i in range(self.nblocks)])
- self._upper_d = BlockVector([self._upper_d[i] for i in range(self.nblocks)])
+ _lower_d = BlockVector(self.nblocks)
+ _upper_d = BlockVector(self.nblocks)
+ for i in range(self.nblocks):
+ _lower_d.set_block(i, self._lower_d.get_block(i))
+ _upper_d.set_block(i, self._upper_d.get_block(i))
+ self._lower_d = _lower_d
+ self._upper_d = _upper_d
def _create_jacobian_structures(self):
@@ -209,7 +221,7 @@ def _create_jacobian_structures(self):
jac_g = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
xi = nlp.x_init()
- jac_g[sid, sid] = nlp.jacobian_g(xi)
+ jac_g.set_block(sid, sid, nlp.jacobian_g(xi))
# coupling matrices Ai
scenario_vids = self._zid_to_vid[sid]
@@ -232,7 +244,7 @@ def _create_jacobian_structures(self):
jac_c = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
xi = nlp.x_init()
- jac_c[sid, sid] = nlp.jacobian_c(xi)
+ jac_c.set_block(sid, sid, nlp.jacobian_c(xi))
# coupling matrices Ai
scenario_vids = self._zid_to_vid[sid]
@@ -255,7 +267,7 @@ def _create_jacobian_structures(self):
jac_d = BlockMatrix(self.nblocks, self.nblocks)
for sid, nlp in enumerate(self._nlps):
xi = nlp.x_init()
- jac_d[sid, sid] = nlp.jacobian_d(xi)
+ jac_d.set_block(sid, sid, nlp.jacobian_d(xi))
self._internal_jacobian_d = jac_d
flat_jac_d = jac_d.tocoo()
self._irows_jac_d = flat_jac_d.row
@@ -269,13 +281,13 @@ def _create_hessian_structure(self):
# Note: This method requires the complicated vars map to be
# created beforehand
- hess_lag = BlockSymMatrix(self.nblocks + 1)
+ hess_lag = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
xi = nlp.x_init()
yi = nlp.y_init()
- hess_lag[sid, sid] = nlp.hessian_lag(xi, yi)
+ hess_lag.set_block(sid, sid, nlp.hessian_lag(xi, yi))
- hess_lag[self.nblocks, self.nblocks] = empty_matrix(self.nz, self.nz)
+ hess_lag[self.nblocks, self.nblocks] = coo_matrix((self.nz, self.nz))
flat_hess = hess_lag.tocoo()
self._irows_hess = flat_hess.row
@@ -387,12 +399,12 @@ def objective(self, x, **kwargs):
"""
if isinstance(x, BlockVector):
- return sum(self._nlps[i].objective(x[i]) for i in range(self.nblocks))
+ return sum(self._nlps[i].objective(x.get_block(i)) for i in range(self.nblocks))
elif isinstance(x, np.ndarray):
block_x = self.create_vector_x()
block_x.copyfrom(x)
x_ = block_x
- return sum(self._nlps[i].objective(x_[i]) for i in range(self.nblocks))
+ return sum(self._nlps[i].objective(x_.get_block(i)) for i in range(self.nblocks))
else:
raise NotImplementedError("x must be a numpy array or a BlockVector")
@@ -424,7 +436,7 @@ def grad_objective(self, x, out=None, **kwargs):
assert x.size == self.nx
assert x.nblocks == self.nblocks + 1
for i in range(self.nblocks):
- self._nlps[i].grad_objective(x[i], out=df[i])
+ self._nlps[i].grad_objective(x.get_block(i), out=df.get_block(i))
return df
elif isinstance(x, np.ndarray):
assert x.size == self.nx
@@ -432,7 +444,7 @@ def grad_objective(self, x, out=None, **kwargs):
block_x.copyfrom(x)
x_ = block_x
for i in range(self.nblocks):
- self._nlps[i].grad_objective(x_[i], out=df[i])
+ self._nlps[i].grad_objective(x_.get_block(i), out=df.get_block(i))
return df
else:
raise NotImplementedError("x must be a numpy array or a BlockVector")
@@ -466,11 +478,11 @@ def evaluate_g(self, x, out=None, **kwargs):
assert x.nblocks == self.nblocks + 1
for sid in range(self.nblocks):
# evaluate gi
- self._nlps[sid].evaluate_g(x[sid], out=res[sid])
+ self._nlps[sid].evaluate_g(x.get_block(sid), out=res.get_block(sid))
# evaluate coupling Ax-z
- A = self._AB_csr[sid, sid]
- res[sid + self.nblocks] = A * x[sid] - x[self.nblocks]
+ A = self._AB_csr.get_block(sid, sid)
+ res[sid + self.nblocks] = A * x.get_block(sid) - x[self.nblocks]
return res
elif isinstance(x, np.ndarray):
assert x.size == self.nx
@@ -478,10 +490,10 @@ def evaluate_g(self, x, out=None, **kwargs):
block_x.copyfrom(x) # this is expensive
x_ = block_x
for sid in range(self.nblocks):
- self._nlps[sid].evaluate_g(x_[sid], out=res[sid])
+ self._nlps[sid].evaluate_g(x_.get_block(sid), out=res.get_block(sid))
# evaluate coupling Ax-z
- A = self._AB_csr[sid, sid]
- res[sid + self.nblocks] = A * x_[sid] - x_[self.nblocks]
+ A = self._AB_csr.get_block(sid, sid)
+ res[sid + self.nblocks] = A * x_.get_block(sid) - x_[self.nblocks]
return res
else:
raise NotImplementedError("x must be a numpy array or a BlockVector")
@@ -521,16 +533,16 @@ def evaluate_c(self, x, out=None, **kwargs):
if out is None:
return g
for bid, blk in enumerate(g):
- out[bid] = blk
+ out.set_block(bid, blk)
return out
if isinstance(x, BlockVector):
assert x.size == self.nx
assert x.nblocks == self.nblocks + 1
for sid in range(self.nblocks):
- self._nlps[sid].evaluate_c(x[sid], out=res[sid])
- A = self._AB_csr[sid, sid]
- res[sid + self.nblocks] = A * x[sid] - x[self.nblocks]
+ self._nlps[sid].evaluate_c(x.get_block(sid), out=res.get_block(sid))
+ A = self._AB_csr.get_block(sid, sid)
+ res[sid + self.nblocks] = A * x.get_block(sid) - x[self.nblocks]
return res
elif isinstance(x, np.ndarray):
assert x.size == self.nx
@@ -538,9 +550,9 @@ def evaluate_c(self, x, out=None, **kwargs):
block_x.copyfrom(x)
x_ = block_x
for sid in range(self.nblocks):
- self._nlps[sid].evaluate_c(x_[sid], out=res[sid])
- A = self._AB_csr[sid, sid]
- res[sid + self.nblocks] = A * x_[sid] - x_[self.nblocks]
+ self._nlps[sid].evaluate_c(x_.get_block(sid), out=res.get_block(sid))
+ A = self._AB_csr.get_block(sid, sid)
+ res[sid + self.nblocks] = A * x_.get_block(sid) - x_[self.nblocks]
return res
else:
raise NotImplementedError('x must be a numpy array or a BlockVector')
@@ -577,16 +589,16 @@ def evaluate_d(self, x, out=None, **kwargs):
assert evaluated_g.size == self.ng
d = evaluated_g.compress(self._d_mask)
if out is None:
- return BlockVector([d[j] for j in range(self.nblocks)])
+ return BlockVector([d.get_block(j) for j in range(self.nblocks)])
for bid in range(self.nblocks):
- out[bid] = d[bid]
+ out.set_block(bid, d.get_block(bid))
return out
if isinstance(x, BlockVector):
assert x.size == self.nx
assert x.nblocks == self.nblocks + 1
for sid in range(self.nblocks):
- self._nlps[sid].evaluate_d(x[sid], out=res[sid])
+ self._nlps[sid].evaluate_d(x.get_block(sid), out=res.get_block(sid))
return res
elif isinstance(x, np.ndarray):
assert x.size == self.nx
@@ -594,7 +606,7 @@ def evaluate_d(self, x, out=None, **kwargs):
block_x.copyfrom(x)
x_ = block_x
for sid in range(self.nblocks):
- self._nlps[sid].evaluate_d(x_[sid], out=res[sid])
+ self._nlps[sid].evaluate_d(x_.get_block(sid), out=res.get_block(sid))
return res
else:
raise NotImplementedError("x must be a numpy array or a BlockVector")
@@ -629,10 +641,10 @@ def jacobian_g(self, x, out=None, **kwargs):
if out is None:
jac_g = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- jac_g[sid, sid] = nlp.jacobian_g(xi)
+ xi = x_.get_block(sid)
+ jac_g.set_block(sid, sid, nlp.jacobian_g(xi))
# coupling matrices Ai
- jac_g[sid + self.nblocks, sid] = self._AB_coo[sid, sid]
+ jac_g[sid + self.nblocks, sid] = self._AB_coo.get_block(sid, sid)
# coupling matrices Bi
jac_g[sid + self.nblocks, self.nblocks] = -identity(self.nz)
return jac_g
@@ -641,12 +653,12 @@ def jacobian_g(self, x, out=None, **kwargs):
assert out.bshape == (2 * self.nblocks, self.nblocks + 1), "Block shape mismatch"
jac_g = out
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- nlp.jacobian_g(xi, out=jac_g[sid, sid])
+ xi = x_.get_block(sid)
+ nlp.jacobian_g(xi, out=jac_g.get_block(sid, sid))
Ai = jac_g[sid + self.nblocks, sid]
- assert Ai.shape == self._AB_coo[sid, sid].shape, \
+ assert Ai.shape == self._AB_coo.get_block(sid, sid).shape, \
'Block {} mismatch shape'.format((sid + self.nblocks, sid))
- assert Ai.nnz == self._AB_coo[sid, sid].nnz, \
+ assert Ai.nnz == self._AB_coo.get_block(sid, sid).nnz, \
'Block {} mismatch nnz'.format((sid + self.nblocks, sid))
Bi = jac_g[sid + self.nblocks, self.nblocks]
assert Bi.shape == (self.nz, self.nz), \
@@ -685,10 +697,10 @@ def jacobian_c(self, x, out=None, **kwargs):
if out is None:
jac_c = BlockMatrix(2 * self.nblocks, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- jac_c[sid, sid] = nlp.jacobian_c(xi)
+ xi = x_.get_block(sid)
+ jac_c.set_block(sid, sid, nlp.jacobian_c(xi))
# coupling matrices Ai
- jac_c[sid + self.nblocks, sid] = self._AB_coo[sid, sid]
+ jac_c[sid + self.nblocks, sid] = self._AB_coo.get_block(sid, sid)
# coupling matrices Bi
jac_c[sid + self.nblocks, self.nblocks] = -identity(self.nz)
return jac_c
@@ -697,12 +709,12 @@ def jacobian_c(self, x, out=None, **kwargs):
assert out.bshape == (2 * self.nblocks, self.nblocks + 1), "Block shape mismatch"
jac_c = out
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- nlp.jacobian_c(xi, out=jac_c[sid, sid])
+ xi = x_.get_block(sid)
+ nlp.jacobian_c(xi, out=jac_c.get_block(sid, sid))
Ai = jac_c[sid + self.nblocks, sid]
- assert Ai.shape == self._AB_coo[sid, sid].shape, \
+ assert Ai.shape == self._AB_coo.get_block(sid, sid).shape, \
'Block {} mismatch shape'.format((sid + self.nblocks, sid))
- assert Ai.nnz == self._AB_coo[sid, sid].nnz, \
+ assert Ai.nnz == self._AB_coo.get_block(sid, sid).nnz, \
'Block {} mismatch nnz'.format((sid + self.nblocks, sid))
Bi = jac_c[sid + self.nblocks, self.nblocks]
assert Bi.shape == (self.nz, self.nz), \
@@ -741,16 +753,16 @@ def jacobian_d(self, x, out=None, **kwargs):
if out is None:
jac_d = BlockMatrix(self.nblocks, self.nblocks)
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- jac_d[sid, sid] = nlp.jacobian_d(xi)
+ xi = x_.get_block(sid)
+ jac_d.set_block(sid, sid, nlp.jacobian_d(xi))
return jac_d
else:
assert isinstance(out, BlockMatrix), 'out must be a BlockMatrix'
assert out.bshape == (self.nblocks, self.nblocks), 'Block shape mismatch'
jac_d = out
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- nlp.jacobian_d(xi, out=jac_d[sid, sid])
+ xi = x_.get_block(sid)
+ nlp.jacobian_d(xi, out=jac_d.get_block(sid, sid))
return jac_d
def hessian_lag(self, x, y, out=None, **kwargs):
@@ -803,26 +815,26 @@ def hessian_lag(self, x, y, out=None, **kwargs):
raise NotImplementedError('Input vector format not recognized')
if out is None:
- hess_lag = BlockSymMatrix(self.nblocks + 1)
+ hess_lag = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- yi = y_[sid]
- hess_lag[sid, sid] = nlp.hessian_lag(xi, yi, eval_f_c=eval_f_c)
+ xi = x_.get_block(sid)
+ yi = y_.get_block(sid)
+ hess_lag.set_block(sid, sid, nlp.hessian_lag(xi, yi, eval_f_c=eval_f_c))
- hess_lag[self.nblocks, self.nblocks] = empty_matrix(self.nz, self.nz)
+ hess_lag[self.nblocks, self.nblocks] = coo_matrix((self.nz, self.nz))
return hess_lag
else:
- assert isinstance(out, BlockSymMatrix), \
- 'out must be a BlockSymMatrix'
+ assert isinstance(out, BlockMatrix), \
+ 'out must be a BlockMatrix'
assert out.bshape == (self.nblocks + 1, self.nblocks + 1), \
'Block shape mismatch'
hess_lag = out
for sid, nlp in enumerate(self._nlps):
- xi = x_[sid]
- yi = y_[sid]
+ xi = x_.get_block(sid)
+ yi = y_.get_block(sid)
nlp.hessian_lag(xi,
yi,
- out=hess_lag[sid, sid],
+ out=hess_lag.get_block(sid, sid),
eval_f_c=eval_f_c)
Hz = hess_lag[self.nblocks, self.nblocks]
@@ -894,30 +906,30 @@ def expansion_matrix_xl(self):
Pxl = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
- Pxl[sid, sid] = nlp.expansion_matrix_xl()
- Pxl[self.nblocks, self.nblocks] = empty_matrix(self.nz, 0)
+ Pxl.set_block(sid, sid, nlp.expansion_matrix_xl())
+ Pxl[self.nblocks, self.nblocks] = coo_matrix((self.nz, 0))
return Pxl
def expansion_matrix_xu(self):
Pxu = BlockMatrix(self.nblocks + 1, self.nblocks + 1)
for sid, nlp in enumerate(self._nlps):
- Pxu[sid, sid] = nlp.expansion_matrix_xu()
- Pxu[self.nblocks, self.nblocks] = empty_matrix(self.nz, 0)
+ Pxu.set_block(sid, sid, nlp.expansion_matrix_xu())
+ Pxu[self.nblocks, self.nblocks] = coo_matrix((self.nz, 0))
return Pxu
def expansion_matrix_dl(self):
Pdl = BlockMatrix(self.nblocks, self.nblocks)
for sid, nlp in enumerate(self._nlps):
- Pdl[sid, sid] = nlp.expansion_matrix_dl()
+ Pdl.set_block(sid, sid, nlp.expansion_matrix_dl())
return Pdl
def expansion_matrix_du(self):
Pdu = BlockMatrix(self.nblocks, self.nblocks)
for sid, nlp in enumerate(self._nlps):
- Pdu[sid, sid] = nlp.expansion_matrix_du()
+ Pdu.set_block(sid, sid, nlp.expansion_matrix_du())
return Pdu
def coupling_matrix(self):
@@ -927,7 +939,7 @@ def coupling_matrix(self):
col = self._zid_to_vid[sid]
row = np.arange(self.nz, dtype=np.int)
data = np.ones(self.nz)
- AB[sid, sid] = csr_matrix((data, (row, col)), shape=(self.nz, nlp.nx))
+ AB.set_block(sid, sid, csr_matrix((data, (row, col)), shape=(self.nz, nlp.nx)))
AB[self.nblocks, self.nblocks] = -identity(self.nz)
return AB
diff --git a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py
index 549c3c5e07f..b802309d2ba 100644
--- a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py
+++ b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_compositions.py
@@ -10,31 +10,22 @@
import pyutilib.th as unittest
import pyomo.environ as aml
import os
-
-from .. import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-
-import scipy.sparse as spa
-import numpy as np
-
-from pyomo.contrib.pynumero.extensions.asl import AmplInterface
+from pyomo.contrib.pynumero.asl import AmplInterface
+from pyomo.contrib.pynumero.interfaces.nlp import NLP
+from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
+from pyomo.contrib.pynumero.examples.structured.nlp_compositions import TwoStageStochasticNLP
+from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix
+from scipy_sparse import coo_matrix, identity
if not AmplInterface.available():
raise unittest.SkipTest(
"Pynumero needs the ASL extension to run NLP tests")
-from pyomo.contrib.pynumero.interfaces.nlp import NLP
-from pyomo.contrib.pynumero.interfaces.pyomo_nlp import PyomoNLP
-from pyomo.contrib.pynumero.interfaces.nlp_compositions import TwoStageStochasticNLP
-from pyomo.contrib.pynumero.sparse import (BlockVector,
- BlockMatrix,
- BlockSymMatrix,
- empty_matrix)
-
-from scipy.sparse import coo_matrix, identity
-
-
def create_basic_dense_qp(G, A, b, c, complicated_var_ids):
nx = G.shape[0]
@@ -384,8 +375,8 @@ def test_xl(self):
nz = len(self.complicated_vars_ids)
nx_i = (self.G.shape[0] + nz)
for i in range(self.n_scenarios):
- xl[i] = np.array([-np.inf]*nx_i)
- xl[i][0] = -100.0
+ xl.set_block(i, np.array([-np.inf]*nx_i))
+ xl.get_block(i)[0] = -100.0
xl[self.n_scenarios] = np.array([-np.inf] * nz)
self.assertIsInstance(self.nlp.xl(), BlockVector)
xl_flat = xl.flatten()
@@ -402,8 +393,8 @@ def test_xl(self):
self.assertIsInstance(lower_x, BlockVector)
self.assertEqual(lower_x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_x[i], xl))
- self.assertTrue(np.allclose(lower_x[n_scenarios], xl_z))
+ self.assertTrue(np.allclose(lower_x.get_block(i), xl))
+ self.assertTrue(np.allclose(lower_x.get_block(n_scenarios), xl_z))
xl = np.array([0, 0])
n_scenarios = len(self.scenarios2)
@@ -413,16 +404,16 @@ def test_xl(self):
self.assertIsInstance(lower_x, BlockVector)
self.assertEqual(lower_x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_x[i], xl))
- self.assertTrue(np.allclose(lower_x[n_scenarios], xl_z))
+ self.assertTrue(np.allclose(lower_x.get_block(i), xl))
+ self.assertTrue(np.allclose(lower_x.get_block(n_scenarios), xl_z))
def test_xu(self):
xu = BlockVector(self.n_scenarios + 1)
nz = len(self.complicated_vars_ids)
nx_i = (self.G.shape[0] + nz)
for i in range(self.n_scenarios):
- xu[i] = np.array([np.inf]*nx_i)
- xu[i][0] = 100.0
+ xu.set_block(i, np.array([np.inf]*nx_i))
+ xu.get_block(i)[0] = 100.0
xu[self.n_scenarios] = np.array([np.inf] * nz)
self.assertIsInstance(self.nlp.xu(), BlockVector)
xu_flat = xu.flatten()
@@ -439,8 +430,8 @@ def test_xu(self):
self.assertIsInstance(upper_x, BlockVector)
self.assertEqual(upper_x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_x[i], xu))
- self.assertTrue(np.allclose(upper_x[n_scenarios], xu_z))
+ self.assertTrue(np.allclose(upper_x.get_block(i), xu))
+ self.assertTrue(np.allclose(upper_x.get_block(n_scenarios), xu_z))
xu = np.array([100.0])
n_scenarios = len(self.scenarios2)
@@ -450,8 +441,8 @@ def test_xu(self):
self.assertIsInstance(upper_x, BlockVector)
self.assertEqual(upper_x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_x[i], xu))
- self.assertTrue(np.allclose(upper_x[n_scenarios], xu_z))
+ self.assertTrue(np.allclose(upper_x.get_block(i), xu))
+ self.assertTrue(np.allclose(upper_x.get_block(n_scenarios), xu_z))
def test_gl(self):
gl = [0.0, 0.0, -np.inf, -100., -500.]
@@ -461,8 +452,8 @@ def test_gl(self):
self.assertIsInstance(lower_g, BlockVector)
self.assertEqual(lower_g.nblocks, n_scenarios * 2)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_g[i], gl))
- self.assertTrue(np.allclose(lower_g[i+n_scenarios],
+ self.assertTrue(np.allclose(lower_g.get_block(i), gl))
+ self.assertTrue(np.allclose(lower_g.get_block(i+n_scenarios),
np.zeros(nz)))
gl = np.array([0.0, 0.0, -100., -500.])
@@ -473,8 +464,8 @@ def test_gl(self):
self.assertIsInstance(lower_g, BlockVector)
self.assertEqual(lower_g.nblocks, 2 * n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_g[i], gl))
- self.assertTrue(np.allclose(lower_g[i + n_scenarios],
+ self.assertTrue(np.allclose(lower_g.get_block(i), gl))
+ self.assertTrue(np.allclose(lower_g.get_block(i + n_scenarios),
gl_z))
def test_gu(self):
@@ -485,8 +476,8 @@ def test_gu(self):
self.assertIsInstance(upper_g, BlockVector)
self.assertEqual(upper_g.nblocks, n_scenarios * 2)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_g[i], gu))
- self.assertTrue(np.allclose(upper_g[i + n_scenarios],
+ self.assertTrue(np.allclose(upper_g.get_block(i), gu))
+ self.assertTrue(np.allclose(upper_g.get_block(i + n_scenarios),
np.zeros(nz)))
gu = np.array([0.0, 0.0, 100.])
@@ -497,8 +488,8 @@ def test_gu(self):
self.assertIsInstance(upper_g, BlockVector)
self.assertEqual(upper_g.nblocks, 2 * n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_g[i], gu))
- self.assertTrue(np.allclose(upper_g[i + n_scenarios],
+ self.assertTrue(np.allclose(upper_g.get_block(i), gu))
+ self.assertTrue(np.allclose(upper_g.get_block(i + n_scenarios),
gu_z))
def test_dl(self):
@@ -508,7 +499,7 @@ def test_dl(self):
self.assertIsInstance(lower_d, BlockVector)
self.assertEqual(lower_d.nblocks, n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_d[i], dl))
+ self.assertTrue(np.allclose(lower_d.get_block(i), dl))
dl = np.array([-100., -500.])
n_scenarios = len(self.scenarios2)
@@ -516,7 +507,7 @@ def test_dl(self):
self.assertIsInstance(lower_d, BlockVector)
self.assertEqual(lower_d.nblocks, n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(lower_d[i], dl))
+ self.assertTrue(np.allclose(lower_d.get_block(i), dl))
def test_du(self):
du = [100., np.inf, np.inf]
@@ -525,7 +516,7 @@ def test_du(self):
self.assertIsInstance(upper_d, BlockVector)
self.assertEqual(upper_d.nblocks, n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_d[i], du))
+ self.assertTrue(np.allclose(upper_d.get_block(i), du))
du = np.array([100.])
n_scenarios = len(self.scenarios2)
@@ -533,15 +524,15 @@ def test_du(self):
self.assertIsInstance(upper_d, BlockVector)
self.assertEqual(upper_d.nblocks, n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(upper_d[i], du))
+ self.assertTrue(np.allclose(upper_d.get_block(i), du))
def test_x_init(self):
x_init = BlockVector(self.n_scenarios + 1)
nz = len(self.complicated_vars_ids)
nx_i = (self.G.shape[0] + nz)
for i in range(self.n_scenarios):
- x_init[i] = np.zeros(nx_i)
- x_init[i][0] = 1.0
+ x_init.set_block(i, np.zeros(nx_i))
+ x_init.get_block(i)[0] = 1.0
x_init[self.n_scenarios] = np.zeros(nz)
self.assertIsInstance(self.nlp.x_init(), BlockVector)
x_init_flat = x_init.flatten()
@@ -557,8 +548,8 @@ def test_x_init(self):
self.assertIsInstance(x_init, BlockVector)
self.assertEqual(x_init.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(x_init[i], x_init_i))
- self.assertTrue(np.allclose(x_init[n_scenarios], np.zeros(nz)))
+ self.assertTrue(np.allclose(x_init.get_block(i), x_init_i))
+ self.assertTrue(np.allclose(x_init.get_block(n_scenarios), np.zeros(nz)))
def test_create_vector_x(self):
@@ -566,7 +557,7 @@ def test_create_vector_x(self):
nz = len(self.complicated_vars_ids)
nx_i = (self.G.shape[0] + nz)
for i in range(self.n_scenarios):
- x_[i] = np.zeros(nx_i)
+ x_.set_block(i, np.zeros(nx_i))
x_[self.n_scenarios] = np.zeros(nz)
self.assertEqual(x_.shape, self.nlp.create_vector_x().shape)
self.assertEqual(x_.nblocks,
@@ -581,7 +572,7 @@ def test_create_vector_x(self):
xs = self.nlp.create_vector_x(subset=s)
xs_ = BlockVector(self.n_scenarios + 1)
for i in range(self.n_scenarios):
- xs_[i] = np.zeros(1)
+ xs_.set_block(i, np.zeros(1))
xs_[self.n_scenarios] = np.zeros(0)
self.assertEqual(xs_.shape, xs.shape)
self.assertEqual(xs_.nblocks, xs.nblocks)
@@ -598,8 +589,8 @@ def test_create_vector_x(self):
self.assertIsInstance(x, BlockVector)
self.assertEqual(x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(x[i], xi))
- self.assertTrue(np.allclose(x[n_scenarios], np.zeros(nz)))
+ self.assertTrue(np.allclose(x.get_block(i), xi))
+ self.assertTrue(np.allclose(x.get_block(n_scenarios), np.zeros(nz)))
for s in ['l', 'u']:
if s == 'l':
@@ -611,8 +602,8 @@ def test_create_vector_x(self):
self.assertIsInstance(x, BlockVector)
self.assertEqual(x.nblocks, n_scenarios + 1)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(x[i], xi))
- self.assertTrue(np.allclose(x[n_scenarios], np.zeros(0)))
+ self.assertTrue(np.allclose(x.get_block(i), xi))
+ self.assertTrue(np.allclose(x.get_block(n_scenarios), np.zeros(0)))
def test_create_vector_y(self):
nz = len(self.complicated_vars_ids)
@@ -620,7 +611,7 @@ def test_create_vector_y(self):
y_ = BlockVector(2 * self.n_scenarios)
for i in range(self.n_scenarios):
- y_[i] = np.zeros(ng_i)
+ y_.set_block(i, np.zeros(ng_i))
y_[self.n_scenarios + i] = np.zeros(nz)
y = self.nlp.create_vector_y()
@@ -634,7 +625,7 @@ def test_create_vector_y(self):
# check for equalities
ys_ = BlockVector(2 * self.n_scenarios)
for i in range(self.n_scenarios):
- ys_[i] = np.zeros(ng_i)
+ ys_.set_block(i, np.zeros(ng_i))
ys_[self.n_scenarios + i] = np.zeros(nz)
ys = self.nlp.create_vector_y(subset='c')
self.assertEqual(ys_.shape, ys.shape)
@@ -647,7 +638,7 @@ def test_create_vector_y(self):
# check for inequalities
ys_ = BlockVector(self.n_scenarios)
for i in range(self.n_scenarios):
- ys_[i] = np.zeros(0)
+ ys_.set_block(i, np.zeros(0))
ys = self.nlp.create_vector_y(subset='d')
self.assertEqual(ys_.shape, ys.shape)
self.assertEqual(ys_.nblocks, ys.nblocks)
@@ -665,8 +656,8 @@ def test_create_vector_y(self):
self.assertIsInstance(y, BlockVector)
self.assertEqual(y.nblocks, 2 * n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(y[i], np.zeros(gi)))
- self.assertTrue(np.allclose(y[i + n_scenarios], np.zeros(nz)))
+ self.assertTrue(np.allclose(y.get_block(i), np.zeros(gi)))
+ self.assertTrue(np.allclose(y.get_block(i + n_scenarios), np.zeros(nz)))
for s in ['c', 'd']:
y = self.nlp2.create_vector_y(subset=s)
@@ -684,9 +675,9 @@ def test_create_vector_y(self):
gi = 1
self.assertEqual(y.nblocks, n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(y[i], np.zeros(gi)))
+ self.assertTrue(np.allclose(y.get_block(i), np.zeros(gi)))
if s == 'c':
- self.assertTrue(np.allclose(y[i + n_scenarios], np.zeros(nz)))
+ self.assertTrue(np.allclose(y.get_block(i + n_scenarios), np.zeros(nz)))
def test_nlps(self):
@@ -720,7 +711,7 @@ def test_objective(self):
x = self.nlp2.create_vector_x()
n_scenarios = len(self.scenarios2)
for i in range(n_scenarios):
- x[i][1] = 5
+ x.get_block(i)[1] = 5
self.assertEqual(25.0 * n_scenarios, self.nlp2.objective(x))
def test_grad_objective(self):
@@ -735,20 +726,20 @@ def test_grad_objective(self):
x.fill(1.0)
grad_obj = self.nlp.grad_objective(x)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(grad_obj[i], single_grad))
+ self.assertTrue(np.allclose(grad_obj.get_block(i), single_grad))
self.assertTrue(np.allclose(grad_obj[self.n_scenarios],
np.zeros(nz)))
grad_obj.fill(0.0)
self.nlp.grad_objective(x, out=grad_obj)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(grad_obj[i], single_grad))
+ self.assertTrue(np.allclose(grad_obj.get_block(i), single_grad))
self.assertTrue(np.allclose(grad_obj[self.n_scenarios],
np.zeros(nz)))
grad_obj = self.nlp.grad_objective(x.flatten())
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(grad_obj[i], single_grad))
+ self.assertTrue(np.allclose(grad_obj.get_block(i), single_grad))
self.assertTrue(np.allclose(grad_obj[self.n_scenarios],
np.zeros(nz)))
@@ -757,7 +748,7 @@ def test_grad_objective(self):
nz = len(self.complicated_vars_ids2)
n_scenarios = len(self.scenarios2)
for i in range(n_scenarios):
- x[i][1] = 1
+ x.get_block(i)[1] = 1
df = self.nlp2.grad_objective(x)
self.assertIsInstance(df, BlockVector)
@@ -765,8 +756,8 @@ def test_grad_objective(self):
dfi = np.zeros(3)
dfi[1] = 2
for i in range(n_scenarios):
- self.assertTrue(np.allclose(df[i], dfi))
- self.assertTrue(np.allclose(df[n_scenarios], np.zeros(nz)))
+ self.assertTrue(np.allclose(df.get_block(i), dfi))
+ self.assertTrue(np.allclose(df.get_block(n_scenarios), np.zeros(nz)))
def test_evaluate_g(self):
@@ -776,24 +767,24 @@ def test_evaluate_g(self):
gi = np.array([-59, -38, -40, 12, 0, 0])
g = self.nlp.evaluate_g(x)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
self.assertTrue(np.allclose(g[i+self.n_scenarios], np.zeros(nz)))
g.fill(0.0)
self.nlp.evaluate_g(x, out=g)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
self.assertTrue(np.allclose(g[i + self.n_scenarios], np.zeros(nz)))
g = self.nlp.evaluate_g(x.flatten())
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
self.assertTrue(np.allclose(g[i + self.n_scenarios], np.zeros(nz)))
g.fill(0.0)
self.nlp.evaluate_g(x.flatten(), out=g)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
self.assertTrue(np.allclose(g[i + self.n_scenarios], np.zeros(nz)))
# test nlp2
@@ -811,8 +802,8 @@ def test_evaluate_g(self):
self.assertEqual(g.size, n_scenarios * (ngi + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
- self.assertTrue(np.allclose(g[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
+ self.assertTrue(np.allclose(g.get_block(i + n_scenarios), x.get_block(i)[cvars]))
# test out
g.fill(0.0)
@@ -822,8 +813,8 @@ def test_evaluate_g(self):
self.assertEqual(g.size, n_scenarios * (ngi + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(g[i], gi))
- self.assertTrue(np.allclose(g[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(g.get_block(i), gi))
+ self.assertTrue(np.allclose(g.get_block(i + n_scenarios), x.get_block(i)[cvars]))
def test_evaluate_c(self):
@@ -833,24 +824,24 @@ def test_evaluate_c(self):
ci = np.array([-59, -38, -40, 12, 0, 0])
c = self.nlp.evaluate_c(x)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
self.assertTrue(np.allclose(c[i+self.n_scenarios], np.zeros(nz)))
c.fill(0.0)
self.nlp.evaluate_c(x, out=c)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
self.assertTrue(np.allclose(c[i + self.n_scenarios], np.zeros(nz)))
c = self.nlp.evaluate_c(x.flatten())
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
self.assertTrue(np.allclose(c[i + self.n_scenarios], np.zeros(nz)))
c.fill(0.0)
self.nlp.evaluate_c(x.flatten(), out=c)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
self.assertTrue(np.allclose(c[i + self.n_scenarios], np.zeros(nz)))
# test nlp2
@@ -868,8 +859,8 @@ def test_evaluate_c(self):
self.assertEqual(c.size, n_scenarios * (nci + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
- self.assertTrue(np.allclose(c[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
+ self.assertTrue(np.allclose(c.get_block(i + n_scenarios), x.get_block(i)[cvars]))
# test out
c.fill(0.0)
@@ -879,8 +870,8 @@ def test_evaluate_c(self):
self.assertEqual(c.size, n_scenarios * (nci + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
- self.assertTrue(np.allclose(c[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
+ self.assertTrue(np.allclose(c.get_block(i + n_scenarios), x.get_block(i)[cvars]))
# tests evaluated_g
g = self.nlp2.evaluate_g(x)
@@ -890,8 +881,8 @@ def test_evaluate_c(self):
self.assertEqual(c.size, n_scenarios * (nci + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
- self.assertTrue(np.allclose(c[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
+ self.assertTrue(np.allclose(c.get_block(i + n_scenarios), x.get_block(i)[cvars]))
# tests evaluated_g with out
c.fill(0.0)
@@ -901,8 +892,8 @@ def test_evaluate_c(self):
self.assertEqual(c.size, n_scenarios * (nci + nz))
cvars = [0, 2]
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(c[i], ci))
- self.assertTrue(np.allclose(c[i + n_scenarios], x[i][cvars]))
+ self.assertTrue(np.allclose(c.get_block(i), ci))
+ self.assertTrue(np.allclose(c.get_block(i + n_scenarios), x.get_block(i)[cvars]))
def test_evaluate_d(self):
@@ -918,7 +909,7 @@ def test_evaluate_d(self):
self.assertEqual(d.nblocks, n_scenarios)
self.assertEqual(d.size, ndi * n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(di, d[i]))
+ self.assertTrue(np.allclose(di, d.get_block(i)))
# test out
d.fill(0.0)
@@ -927,7 +918,7 @@ def test_evaluate_d(self):
self.assertEqual(d.nblocks, n_scenarios)
self.assertEqual(d.size, ndi * n_scenarios)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(di, d[i]))
+ self.assertTrue(np.allclose(di, d.get_block(i)))
# test evaluated_g
g = self.nlp2.evaluate_g(x)
@@ -936,7 +927,7 @@ def test_evaluate_d(self):
self.assertEqual(d.nblocks, n_scenarios)
self.assertEqual(d.size, n_scenarios * n_scenarios)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(d[i], di))
+ self.assertTrue(np.allclose(d.get_block(i), di))
# test evaluated_g
d.fill(0.0)
@@ -945,7 +936,7 @@ def test_evaluate_d(self):
self.assertEqual(d.nblocks, n_scenarios)
self.assertEqual(d.size, n_scenarios * n_scenarios)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(d[i], di))
+ self.assertTrue(np.allclose(d.get_block(i), di))
def test_jacobian_g(self):
@@ -953,15 +944,15 @@ def test_jacobian_g(self):
nxi = nz + self.G.shape[1]
ngi = nz + self.A.shape[0]
Ji = BlockMatrix(2, 2)
- Ji[0, 0] = coo_matrix(self.A)
+ Ji.set_block(0, 0, coo_matrix(self.A))
B1 = np.zeros((nz, self.A.shape[1]))
B2 = np.zeros((nz, nz))
for i, v in enumerate(self.complicated_vars_ids):
B1[i, v] = -1.0
B2[i, i] = 1.0
- Ji[1, 0] = coo_matrix(B1)
- Ji[1, 1] = coo_matrix(B2)
- dense_Ji = Ji.todense()
+ Ji.set_block(1, 0, coo_matrix(B1))
+ Ji.set_block(1, 1, coo_matrix(B2))
+ dense_Ji = Ji.toarray()
x = self.nlp.create_vector_x()
jac_g = self.nlp.jacobian_g(x)
@@ -972,45 +963,47 @@ def test_jacobian_g(self):
# check block jacobians
for i in range(self.n_scenarios):
- jac_gi = jac_g[i, i].todense()
+ jac_gi = jac_g.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_gi, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_g[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_g[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test out
# change g values
for i in range(self.n_scenarios):
- jac_g[i, i] *= 2.0
- jac_gi = jac_g[i, i].todense()
+ _jac_g_i_i = jac_g.get_block(i, i)
+ _jac_g_i_i *= 2.0
+ jac_g.set_block(i, i, _jac_g_i_i)
+ jac_gi = jac_g.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_gi, 2*dense_Ji))
self.nlp.jacobian_g(x, out=jac_g)
# check block jacobians
for i in range(self.n_scenarios):
- jac_gi = jac_g[i, i].todense()
+ jac_gi = jac_g.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_gi, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_g[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_g[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test flattened vector
jac_g = self.nlp.jacobian_g(x.flatten())
@@ -1021,20 +1014,20 @@ def test_jacobian_g(self):
# check block jacobians
for i in range(self.n_scenarios):
- jac_gi = jac_g[i, i].todense()
+ jac_gi = jac_g.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_gi, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_g[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_g[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test nlp2
instance = self.scenarios2['s0']
@@ -1048,32 +1041,32 @@ def test_jacobian_g(self):
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
- AB[n_scenarios, n_scenarios] = AB[n_scenarios, n_scenarios].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
+ AB.set_block(n_scenarios, n_scenarios, AB.get_block(n_scenarios, n_scenarios).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
# check Ai
- self.assertIsInstance(Jc[n_scenarios + i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].row,
- AB[i, i].row))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].col,
- AB[i, i].col))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].data,
- AB[i, i].data))
+ self.assertIsInstance(Jc.get_block(n_scenarios + i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).row,
+ AB.get_block(i, i).row))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).col,
+ AB.get_block(i, i).col))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).data,
+ AB.get_block(i, i).data))
# check Bi
- coo_identity = Jc[n_scenarios + i, n_scenarios].tocoo()
+ coo_identity = Jc.get_block(n_scenarios + i, n_scenarios).tocoo()
self.assertTrue(np.allclose(coo_identity.row,
- AB[n_scenarios, n_scenarios].row))
+ AB.get_block(n_scenarios, n_scenarios).row))
self.assertTrue(np.allclose(coo_identity.col,
- AB[n_scenarios, n_scenarios].col))
+ AB.get_block(n_scenarios, n_scenarios).col))
self.assertTrue(np.allclose(coo_identity.data,
- AB[n_scenarios, n_scenarios].data))
+ AB.get_block(n_scenarios, n_scenarios).data))
# test flattened
Jc = self.nlp2.jacobian_g(x.flatten())
@@ -1081,51 +1074,53 @@ def test_jacobian_g(self):
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
- AB[n_scenarios, n_scenarios] = AB[n_scenarios, n_scenarios].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
+ AB.set_block(n_scenarios, n_scenarios, AB.get_block(n_scenarios, n_scenarios).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
# check Ai
- self.assertIsInstance(Jc[n_scenarios + i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].row,
- AB[i, i].row))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].col,
- AB[i, i].col))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].data,
- AB[i, i].data))
+ self.assertIsInstance(Jc.get_block(n_scenarios + i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).row,
+ AB.get_block(i, i).row))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).col,
+ AB.get_block(i, i).col))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).data,
+ AB.get_block(i, i).data))
# check Bi
- coo_identity = Jc[n_scenarios + i, n_scenarios].tocoo()
+ coo_identity = Jc.get_block(n_scenarios + i, n_scenarios).tocoo()
self.assertTrue(np.allclose(coo_identity.row,
- AB[n_scenarios, n_scenarios].row))
+ AB.get_block(n_scenarios, n_scenarios).row))
self.assertTrue(np.allclose(coo_identity.col,
- AB[n_scenarios, n_scenarios].col))
+ AB.get_block(n_scenarios, n_scenarios).col))
self.assertTrue(np.allclose(coo_identity.data,
- AB[n_scenarios, n_scenarios].data))
+ AB.get_block(n_scenarios, n_scenarios).data))
# test out
for i in range(n_scenarios):
- Jc[i, i] *= 2.0
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data * 2.0))
+ _Jc_i_i = Jc.get_block(i, i)
+ _Jc_i_i *= 2.0
+ Jc.set_block(i, i, _Jc_i_i)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data * 2.0))
self.nlp2.jacobian_g(x, out=Jc)
self.assertIsInstance(Jc, BlockMatrix)
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
def test_jacobian_c(self):
@@ -1133,15 +1128,15 @@ def test_jacobian_c(self):
nxi = nz + self.G.shape[1]
ngi = nz + self.A.shape[0]
Ji = BlockMatrix(2, 2)
- Ji[0, 0] = coo_matrix(self.A)
+ Ji.set_block(0, 0, coo_matrix(self.A))
B1 = np.zeros((nz, self.A.shape[1]))
B2 = np.zeros((nz, nz))
for i, v in enumerate(self.complicated_vars_ids):
B1[i, v] = -1.0
B2[i, i] = 1.0
- Ji[1, 0] = coo_matrix(B1)
- Ji[1, 1] = coo_matrix(B2)
- dense_Ji = Ji.todense()
+ Ji.set_block(1, 0, coo_matrix(B1))
+ Ji.set_block(1, 1, coo_matrix(B2))
+ dense_Ji = Ji.toarray()
x = self.nlp.create_vector_x()
jac_c = self.nlp.jacobian_c(x)
@@ -1152,45 +1147,47 @@ def test_jacobian_c(self):
# check block jacobians
for i in range(self.n_scenarios):
- jac_ci = jac_c[i, i].todense()
+ jac_ci = jac_c.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_ci, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_c[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_c[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test out
# change g values
for i in range(self.n_scenarios):
- jac_c[i, i] *= 2.0
- jac_ci = jac_c[i, i].todense()
+ _jac_c_i_i = jac_c.get_block(i, i)
+ _jac_c_i_i *= 2.0
+ jac_c.set_block(i, i, _jac_c_i_i)
+ jac_ci = jac_c.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_ci, 2 * dense_Ji))
self.nlp.jacobian_c(x, out=jac_c)
# check block jacobians
for i in range(self.n_scenarios):
- jac_ci = jac_c[i, i].todense()
+ jac_ci = jac_c.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_ci, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_c[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_c[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test flattened vector
jac_g = self.nlp.jacobian_c(x.flatten())
@@ -1201,20 +1198,20 @@ def test_jacobian_c(self):
# check block jacobians
for i in range(self.n_scenarios):
- jac_ci = jac_c[i, i].todense()
+ jac_ci = jac_c.get_block(i, i).toarray()
self.assertTrue(np.allclose(jac_ci, dense_Ji))
# check coupling jacobians
Ai_ = BlockMatrix(1, 2)
- Ai_[0, 1] = identity(nz)
- Ai_[0, 0] = empty_matrix(nz, self.G.shape[1])
- Ai_ = Ai_.todense()
- Bi_ = -identity(nz).todense()
+ Ai_.set_block(0, 1, identity(nz))
+ Ai_.set_block(0, 0, coo_matrix((nz, self.G.shape[1])))
+ Ai_ = Ai_.toarray()
+ Bi_ = -identity(nz).toarray()
for i in range(self.n_scenarios):
Ai = jac_c[self.n_scenarios + i, i]
- self.assertTrue(np.allclose(Ai.todense(), Ai_))
+ self.assertTrue(np.allclose(Ai.toarray(), Ai_))
Bi = jac_c[self.n_scenarios + i, self.n_scenarios]
- self.assertTrue(np.allclose(Bi.todense(), Bi_))
+ self.assertTrue(np.allclose(Bi.toarray(), Bi_))
# test nlp2
instance = self.scenarios2['s0']
@@ -1228,32 +1225,32 @@ def test_jacobian_c(self):
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
- AB[n_scenarios, n_scenarios] = AB[n_scenarios, n_scenarios].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
+ AB.set_block(n_scenarios, n_scenarios, AB.get_block(n_scenarios, n_scenarios).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
# check Ai
- self.assertIsInstance(Jc[n_scenarios + i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].row,
- AB[i, i].row))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].col,
- AB[i, i].col))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].data,
- AB[i, i].data))
+ self.assertIsInstance(Jc.get_block(n_scenarios + i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).row,
+ AB.get_block(i, i).row))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).col,
+ AB.get_block(i, i).col))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).data,
+ AB.get_block(i, i).data))
# check Bi
- #self.assertIsInstance(Jc[n_scenarios + i, n_scenarios], coo_matrix)
- coo_identity = Jc[n_scenarios + i, n_scenarios].tocoo()
+ #self.assertIsInstance(Jc.get_block(n_scenarios + i, n_scenarios), coo_matrix)
+ coo_identity = Jc.get_block(n_scenarios + i, n_scenarios).tocoo()
self.assertTrue(np.allclose(coo_identity.row,
- AB[n_scenarios, n_scenarios].row))
+ AB.get_block(n_scenarios, n_scenarios).row))
self.assertTrue(np.allclose(coo_identity.col,
- AB[n_scenarios, n_scenarios].col))
+ AB.get_block(n_scenarios, n_scenarios).col))
self.assertTrue(np.allclose(coo_identity.data,
- AB[n_scenarios, n_scenarios].data))
+ AB.get_block(n_scenarios, n_scenarios).data))
# test flattened
Jc = self.nlp2.jacobian_c(x.flatten())
@@ -1261,51 +1258,53 @@ def test_jacobian_c(self):
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
- AB[n_scenarios, n_scenarios] = AB[n_scenarios, n_scenarios].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
+ AB.set_block(n_scenarios, n_scenarios, AB.get_block(n_scenarios, n_scenarios).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
# check Ai
- self.assertIsInstance(Jc[n_scenarios + i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].row,
- AB[i, i].row))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].col,
- AB[i, i].col))
- self.assertTrue(np.allclose(Jc[n_scenarios + i, i].data,
- AB[i, i].data))
+ self.assertIsInstance(Jc.get_block(n_scenarios + i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).row,
+ AB.get_block(i, i).row))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).col,
+ AB.get_block(i, i).col))
+ self.assertTrue(np.allclose(Jc.get_block(n_scenarios + i, i).data,
+ AB.get_block(i, i).data))
# check Bi
- #self.assertIsInstance(Jc[n_scenarios + i, n_scenarios], coo_matrix)
- coo_identity = Jc[n_scenarios + i, n_scenarios].tocoo()
+ #self.assertIsInstance(Jc.get_block(n_scenarios + i, n_scenarios), coo_matrix)
+ coo_identity = Jc.get_block(n_scenarios + i, n_scenarios).tocoo()
self.assertTrue(np.allclose(coo_identity.row,
- AB[n_scenarios, n_scenarios].row))
+ AB.get_block(n_scenarios, n_scenarios).row))
self.assertTrue(np.allclose(coo_identity.col,
- AB[n_scenarios, n_scenarios].col))
+ AB.get_block(n_scenarios, n_scenarios).col))
self.assertTrue(np.allclose(coo_identity.data,
- AB[n_scenarios, n_scenarios].data))
+ AB.get_block(n_scenarios, n_scenarios).data))
# test out
for i in range(n_scenarios):
- Jc[i, i] *= 2.0
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data * 2.0))
+ _Jc_i_i = Jc.get_block(i, i)
+ _Jc_i_i *= 2.0
+ Jc.set_block(i, i, _Jc_i_i)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data * 2.0))
self.nlp2.jacobian_c(x, out=Jc)
self.assertIsInstance(Jc, BlockMatrix)
self.assertEqual(Jc.bshape, (2 * n_scenarios, n_scenarios + 1))
AB = self.nlp2.coupling_matrix()
for i in range(n_scenarios):
- AB[i, i] = AB[i, i].tocoo()
+ AB.set_block(i, i, AB.get_block(i, i).tocoo())
for i in range(n_scenarios):
- self.assertIsInstance(Jc[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jc[i, i].row, Jci.row))
- self.assertTrue(np.allclose(Jc[i, i].col, Jci.col))
- self.assertTrue(np.allclose(Jc[i, i].data, Jci.data))
+ self.assertIsInstance(Jc.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jc.get_block(i, i).row, Jci.row))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).col, Jci.col))
+ self.assertTrue(np.allclose(Jc.get_block(i, i).data, Jci.data))
def test_jacobian_d(self):
@@ -1319,88 +1318,95 @@ def test_jacobian_d(self):
self.assertIsInstance(Jd, BlockMatrix)
self.assertEqual(Jd.bshape, (n_scenarios, n_scenarios))
for i in range(n_scenarios):
- self.assertIsInstance(Jd[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jd[i, i].row, Jdi.row))
- self.assertTrue(np.allclose(Jd[i, i].col, Jdi.col))
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data))
+ self.assertIsInstance(Jd.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).row, Jdi.row))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).col, Jdi.col))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data))
for i in range(n_scenarios):
- Jd[i, i] *= 2.0
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data*2.0))
+ _Jd_i_i = Jd.get_block(i, i)
+ _Jd_i_i *= 2.0
+ Jd.set_block(i, i, _Jd_i_i)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data*2.0))
self.nlp2.jacobian_d(x, out=Jd)
for i in range(n_scenarios):
- self.assertIsInstance(Jd[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jd[i, i].row, Jdi.row))
- self.assertTrue(np.allclose(Jd[i, i].col, Jdi.col))
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data))
+ self.assertIsInstance(Jd.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).row, Jdi.row))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).col, Jdi.col))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data))
Jd = self.nlp2.jacobian_d(x.flatten())
self.assertIsInstance(Jd, BlockMatrix)
self.assertEqual(Jd.bshape, (n_scenarios, n_scenarios))
for i in range(n_scenarios):
- self.assertIsInstance(Jd[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jd[i, i].row, Jdi.row))
- self.assertTrue(np.allclose(Jd[i, i].col, Jdi.col))
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data))
+ self.assertIsInstance(Jd.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).row, Jdi.row))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).col, Jdi.col))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data))
for i in range(n_scenarios):
- Jd[i, i] *= 2.0
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data * 2.0))
+ _Jd_i_i = Jd.get_block(i, i)
+ _Jd_i_i *= 2.0
+ Jd.set_block(i, i, _Jd_i_i)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data * 2.0))
self.nlp2.jacobian_d(x.flatten(), out=Jd)
for i in range(n_scenarios):
- self.assertIsInstance(Jd[i, i], coo_matrix)
- self.assertTrue(np.allclose(Jd[i, i].row, Jdi.row))
- self.assertTrue(np.allclose(Jd[i, i].col, Jdi.col))
- self.assertTrue(np.allclose(Jd[i, i].data, Jdi.data))
+ self.assertIsInstance(Jd.get_block(i, i), coo_matrix)
+ self.assertTrue(np.allclose(Jd.get_block(i, i).row, Jdi.row))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).col, Jdi.col))
+ self.assertTrue(np.allclose(Jd.get_block(i, i).data, Jdi.data))
def test_hessian(self):
nz = len(self.complicated_vars_ids)
- Hi = BlockSymMatrix(2)
- Hi[0, 0] = coo_matrix(self.G)
- Hi[1, 1] = empty_matrix(nz, nz) # this is because of the way the test problem was setup
+ Hi = BlockMatrix(2, 2)
+ Hi.set_block(0, 0, coo_matrix(self.G))
+ # this is because of the way the test problem was setup
+ Hi.set_block(1, 1, coo_matrix((nz, nz)))
- Hi = Hi.todense()
+ Hi = Hi.toarray()
x = self.nlp.create_vector_x()
y = self.nlp.create_vector_y()
H = self.nlp.hessian_lag(x, y)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(H[i, i].todense(), Hi))
- self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].todense(),
- empty_matrix(nz, nz).todense()))
+ self.assertTrue(np.allclose(H.get_block(i, i).toarray(), Hi))
+ self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].toarray(),
+ coo_matrix((nz, nz)).toarray()))
# test out
# change g values
for i in range(self.n_scenarios):
- H[i, i] *= 2.0
- Hj = H[i, i].todense()
+ _H_i_i = H.get_block(i, i)
+ _H_i_i *= 2.0
+ H.set_block(i, i, _H_i_i)
+ Hj = H.get_block(i, i).toarray()
self.assertTrue(np.allclose(Hj, 2.0 * Hi))
self.nlp.hessian_lag(x, y, out=H)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(H[i, i].todense(), Hi))
- self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].todense(),
- empty_matrix(nz, nz).todense()))
+ self.assertTrue(np.allclose(H.get_block(i, i).toarray(), Hi))
+ self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].toarray(),
+ coo_matrix((nz, nz)).toarray()))
H = self.nlp.hessian_lag(x.flatten(), y)
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(H[i, i].todense(), Hi))
- self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].todense(),
- empty_matrix(nz, nz).todense()))
+ self.assertTrue(np.allclose(H.get_block(i, i).toarray(), Hi))
+ self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].toarray(),
+ coo_matrix((nz, nz)).toarray()))
H = self.nlp.hessian_lag(x.flatten(), y.flatten())
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(H[i, i].todense(), Hi))
- self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].todense(),
- empty_matrix(nz, nz).todense()))
+ self.assertTrue(np.allclose(H.get_block(i, i).toarray(), Hi))
+ self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].toarray(),
+ coo_matrix((nz, nz)).toarray()))
H = self.nlp.hessian_lag(x, y.flatten())
for i in range(self.n_scenarios):
- self.assertTrue(np.allclose(H[i, i].todense(), Hi))
- self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].todense(),
- empty_matrix(nz, nz).todense()))
+ self.assertTrue(np.allclose(H.get_block(i, i).toarray(), Hi))
+ self.assertTrue(np.allclose(H[self.n_scenarios, self.n_scenarios].toarray(),
+ coo_matrix((nz, nz)).toarray()))
def test_expansion_matrix_xl(self):
@@ -1432,8 +1438,8 @@ def test_expansion_matrix_xl(self):
all_xl = Pxl * lower_x
self.assertIsInstance(all_xl, BlockVector)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(xxi, all_xl[i]))
- self.assertTrue(np.allclose(np.zeros(nz), all_xl[n_scenarios]))
+ self.assertTrue(np.allclose(xxi, all_xl.get_block(i)))
+ self.assertTrue(np.allclose(np.zeros(nz), all_xl.get_block(n_scenarios)))
def expansion_matrix_xu(self):
@@ -1456,8 +1462,8 @@ def expansion_matrix_xu(self):
all_xu = Pxu * upper_x
self.assertIsInstance(all_xu, BlockVector)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(xxi, all_xu[i]))
- self.assertTrue(np.allclose(np.zeros(nz), all_xu[n_scenarios]))
+ self.assertTrue(np.allclose(xxi, all_xu.get_block(i)))
+ self.assertTrue(np.allclose(np.zeros(nz), all_xu.get_block(n_scenarios)))
def test_expansion_matrix_dl(self):
@@ -1481,7 +1487,7 @@ def test_expansion_matrix_dl(self):
all_dl = Pdl * lower_d
self.assertIsInstance(all_dl, BlockVector)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(ddi, all_dl[i]))
+ self.assertTrue(np.allclose(ddi, all_dl.get_block(i)))
def test_expansion_matrix_du(self):
@@ -1505,7 +1511,7 @@ def test_expansion_matrix_du(self):
all_du = Pdu * upper_d
self.assertIsInstance(all_du, BlockVector)
for i in range(n_scenarios):
- self.assertTrue(np.allclose(ddi, all_du[i]))
+ self.assertTrue(np.allclose(ddi, all_du.get_block(i)))
def test_coupling_matrix(self):
@@ -1520,7 +1526,7 @@ def test_coupling_matrix(self):
x.fill(1.0)
zs = AB * x
for i in range(n_scenarios):
- self.assertEqual(zs[i].size, nz)
- self.assertEqual(zs[n_scenarios].size, nz)
+ self.assertEqual(zs.get_block(i).size, nz)
+ self.assertEqual(zs.get_block(n_scenarios).size, nz)
diff --git a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py
index bdd034aa009..80b0442620b 100644
--- a/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py
+++ b/pyomo/contrib/pynumero/examples/structured/tests/test_nlp_transformations.py
@@ -12,14 +12,13 @@
import pyomo.environ as aml
import os
-from .. import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse as spa, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-import scipy.sparse as spa
-import numpy as np
-
-from pyomo.contrib.pynumero.extensions.asl import AmplInterface
+from pyomo.contrib.pynumero.asl import AmplInterface
if not AmplInterface.available():
raise unittest.SkipTest(
diff --git a/pyomo/contrib/pynumero/extensions/lib/Darwin/README b/pyomo/contrib/pynumero/extensions/lib/Darwin/README
deleted file mode 100644
index 838ddd9b809..00000000000
--- a/pyomo/contrib/pynumero/extensions/lib/Darwin/README
+++ /dev/null
@@ -1 +0,0 @@
-Copy PyNumero libraries here.
\ No newline at end of file
diff --git a/pyomo/contrib/pynumero/extensions/lib/Linux/README b/pyomo/contrib/pynumero/extensions/lib/Linux/README
deleted file mode 100644
index 838ddd9b809..00000000000
--- a/pyomo/contrib/pynumero/extensions/lib/Linux/README
+++ /dev/null
@@ -1 +0,0 @@
-Copy PyNumero libraries here.
\ No newline at end of file
diff --git a/pyomo/contrib/pynumero/extensions/lib/Windows/README b/pyomo/contrib/pynumero/extensions/lib/Windows/README
deleted file mode 100644
index 838ddd9b809..00000000000
--- a/pyomo/contrib/pynumero/extensions/lib/Windows/README
+++ /dev/null
@@ -1 +0,0 @@
-Copy PyNumero libraries here.
\ No newline at end of file
diff --git a/pyomo/contrib/pynumero/extensions/utils.py b/pyomo/contrib/pynumero/extensions/utils.py
deleted file mode 100644
index 744fead53c5..00000000000
--- a/pyomo/contrib/pynumero/extensions/utils.py
+++ /dev/null
@@ -1,46 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-from ctypes.util import find_library
-import sys
-import os
-
-
-def find_pynumero_library(library_name):
-
- asl_path = find_library(library_name)
- if asl_path is not None:
- return asl_path
- else:
- # try looking into extensions directory now
- file_path = os.path.abspath(__file__)
- dir_path = os.path.dirname(file_path)
-
- if os.name in ['nt', 'dos']:
- libname = 'lib/Windows/lib{}.dll'.format(library_name)
- elif sys.platform in ['darwin']:
- libname = 'lib/Darwin/lib{}.dylib'.format(library_name)
- else:
- libname = 'lib/Linux/lib{}.so'.format(library_name)
-
- asl_lib_path = os.path.join(dir_path, libname)
-
- if os.path.exists(asl_lib_path):
- return asl_lib_path
- return None
-
-
-def found_pynumero_libraries():
-
- p1 = find_pynumero_library('pynumero_ASL')
- p2 = find_pynumero_library('pynumero_SPARSE')
-
- if p1 is not None and p2 is not None:
- return True
- return False
diff --git a/pyomo/contrib/pynumero/interfaces/__init__.py b/pyomo/contrib/pynumero/interfaces/__init__.py
index b63ee7ec4bd..b40ce7ab5c8 100644
--- a/pyomo/contrib/pynumero/interfaces/__init__.py
+++ b/pyomo/contrib/pynumero/interfaces/__init__.py
@@ -8,7 +8,7 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-#from .. import numpy_available, scipy_available
+#from ..dependencies import numpy_available, scipy_available
# TODO: What do we want to import from interfaces?
#if numpy_available and scipy_available:
diff --git a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py
index a39f691d94e..b862eb935c3 100644
--- a/pyomo/contrib/pynumero/interfaces/ampl_nlp.py
+++ b/pyomo/contrib/pynumero/interfaces/ampl_nlp.py
@@ -12,7 +12,7 @@
the Ampl Solver Library (ASL) implementation
"""
try:
- import pyomo.contrib.pynumero.extensions.asl as _asl
+ import pyomo.contrib.pynumero.asl as _asl
except ImportError as e:
print('{}'.format(e))
raise ImportError('Error importing asl.'
@@ -503,6 +503,7 @@ def _evaluate_jacobians_and_cache_if_necessary(self):
# this computation into one
if not self._jac_full_is_cached:
self._asl.eval_jac_g(self._primals, self._cached_jac_full.data)
+ self._jac_full_is_cached = True
# overloaded from NLP
def evaluate_jacobian(self, out=None):
diff --git a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py
index 691d9853b07..7d434031611 100644
--- a/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py
+++ b/pyomo/contrib/pynumero/interfaces/tests/test_nlp.py
@@ -10,14 +10,13 @@
import pyutilib.th as unittest
import os
-from pyomo.contrib.pynumero import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-import scipy.sparse as sp
-import numpy as np
-
-from pyomo.contrib.pynumero.extensions.asl import AmplInterface
+from pyomo.contrib.pynumero.asl import AmplInterface
if not AmplInterface.available():
raise unittest.SkipTest(
"Pynumero needs the ASL extension to run NLP tests")
@@ -345,7 +344,7 @@ def execute_extended_nlp_interface(self, anlp):
expected_hess = np.asarray(expected_hess, dtype=np.float64)
self.assertTrue(np.array_equal(dense_hess, expected_hess))
-@unittest.skipIf(os.name in ['nt', 'dos'], "Do not test on windows")
+
class TestAslNLP(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -363,7 +362,6 @@ def test_nlp_interface(self):
anlp = AslNLP(self.filename)
execute_extended_nlp_interface(self, anlp)
-@unittest.skipIf(os.name in ['nt', 'dos'], "Do not test on windows")
class TestAmplNLP(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -441,7 +439,6 @@ def test_idxs(self):
self.assertEqual(sum(ineq_constraint_idxs), 3)
-@unittest.skipIf(os.name in ['nt', 'dos'], "Do not test on windows")
class TestPyomoNLP(unittest.TestCase):
@classmethod
def setUpClass(cls):
@@ -550,7 +547,6 @@ def test_no_objective(self):
with self.assertRaises(NotImplementedError):
nlp = PyomoNLP(m)
-@unittest.skipIf(os.name in ['nt', 'dos'], "Do not test on windows")
class TestUtils(unittest.TestCase):
@classmethod
def setUpClass(cls):
diff --git a/pyomo/contrib/pynumero/interfaces/utils.py b/pyomo/contrib/pynumero/interfaces/utils.py
index 0df36aa3731..7ca7195c0bd 100644
--- a/pyomo/contrib/pynumero/interfaces/utils.py
+++ b/pyomo/contrib/pynumero/interfaces/utils.py
@@ -9,6 +9,10 @@
# ___________________________________________________________________________
import numpy as np
from scipy.sparse import coo_matrix
+from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix
+from pyomo.common.dependencies import attempt_import
+mpi_block_vector, mpi_block_vector_available = attempt_import('pyomo.contrib.pynumero.sparse.mpi_block_vector')
+
def build_bounds_mask(vector):
"""
@@ -18,18 +22,50 @@ def build_bounds_mask(vector):
"""
return build_compression_mask_for_finite_values(vector)
+
def build_compression_matrix(compression_mask):
"""
Return a sparse matrix CM of ones such that
compressed_vector = CM*full_vector based on the
compression mask
+
+ Parameters
+ ----------
+ compression_mask: np.ndarray or pyomo.contrib.pynumero.sparse.block_vector.BlockVector
+
+ Returns
+ -------
+ cm: coo_matrix or BlockMatrix
+ The compression matrix
"""
- cols = compression_mask.nonzero()[0]
- nnz = len(cols)
- rows = np.arange(nnz, dtype=np.int)
- data = np.ones(nnz)
- return coo_matrix((data, (rows, cols)), shape=(nnz, len(compression_mask)))
-
+ if isinstance(compression_mask, BlockVector):
+ n = compression_mask.nblocks
+ res = BlockMatrix(nbrows=n, nbcols=n)
+ for ndx, block in enumerate(compression_mask):
+ sub_matrix = build_compression_matrix(block)
+ res.set_block(ndx, ndx, sub_matrix)
+ return res
+ elif type(compression_mask) is np.ndarray:
+ cols = compression_mask.nonzero()[0]
+ nnz = len(cols)
+ rows = np.arange(nnz, dtype=np.int)
+ data = np.ones(nnz)
+ return coo_matrix((data, (rows, cols)), shape=(nnz, len(compression_mask)))
+ elif isinstance(compression_mask, mpi_block_vector.MPIBlockVector):
+ from pyomo.contrib.pynumero.sparse.mpi_block_matrix import MPIBlockMatrix
+ n = compression_mask.nblocks
+ rank_ownership = np.ones((n, n), dtype=np.int64) * -1
+ for i in range(n):
+ rank_ownership[i, i] = compression_mask.rank_ownership[i]
+ res = MPIBlockMatrix(nbrows=n, nbcols=n, rank_ownership=rank_ownership, mpi_comm=compression_mask.mpi_comm)
+ for ndx in compression_mask.owned_blocks:
+ block = compression_mask.get_block(ndx)
+ sub_matrix = build_compression_matrix(block)
+ res.set_block(ndx, ndx, sub_matrix)
+ res.broadcast_block_sizes()
+ return res
+
+
def build_compression_mask_for_finite_values(vector):
"""
Creates masks for converting from the full vector of
diff --git a/pyomo/contrib/pynumero/sparse/intrinsic.py b/pyomo/contrib/pynumero/intrinsic.py
similarity index 50%
rename from pyomo/contrib/pynumero/sparse/intrinsic.py
rename to pyomo/contrib/pynumero/intrinsic.py
index 2d4e223904b..062d48f7c71 100644
--- a/pyomo/contrib/pynumero/sparse/intrinsic.py
+++ b/pyomo/contrib/pynumero/intrinsic.py
@@ -7,13 +7,25 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from pyomo.contrib.pynumero.sparse.block_vector import BlockVector
-import numpy as np
-__all__ = ['allclose', 'concatenate', 'where', 'isin']
+from pyomo.common.dependencies import numpy as np, attempt_import
+block_vector = attempt_import('pyomo.contrib.pynumero.sparse.block_vector',
+ defer_check=True)[0]
+
+def norm(x, ord=None):
+
+ f = np.linalg.norm
+ if isinstance(x, np.ndarray):
+ return f(x, ord=ord)
+ elif isinstance(x, BlockVector):
+ flat_x = x.flatten()
+ return f(flat_x, ord=ord)
+ else:
+ raise NotImplementedError()
def allclose(x1, x2, rtol, atol):
+ # this needs to be implemented for parallel
x1_flat = x1.flatten()
x2_flat = x2.flatten()
return np.allclose(x1_flat, x2_flat, rtol=rtol, atol=atol)
@@ -33,83 +45,83 @@ def where(*args):
raise TypeError('where() takes at most 3 arguments ({} given)'.format(len(args)))
n_args = len(args)
- if isinstance(condition, BlockVector):
+ if isinstance(condition, block_vector.BlockVector):
if n_args == 1:
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
for i in range(condition.nblocks):
- _args = [condition[i]]
- res[i] = where(*_args)[0]
+ _args = [condition.get_block(i)]
+ res.set_block(i, where(*_args)[0])
return (res,)
else:
x = args[1]
y = args[2]
- if isinstance(x, BlockVector) and isinstance(y, BlockVector):
+ if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
for i in range(condition.nblocks):
- _args = [condition[i], x[i], y[i]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x.get_block(i), y.get_block(i)]
+ res.set_block(i, where(*_args))
return res
- elif isinstance(x, np.ndarray) and isinstance(y, BlockVector):
+ elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x[accum: accum + nelements], y[i]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x[accum: accum + nelements], y.get_block(i)]
+ res.set_block(i, where(*_args))
accum += nelements
return res
- elif isinstance(x, BlockVector) and isinstance(y, np.ndarray):
+ elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x[i], y[accum: accum + nelements]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x.get_block(i), y[accum: accum + nelements]]
+ res.set_block(i, where(*_args))
accum += nelements
return res
- elif np.isscalar(x) and isinstance(y, BlockVector):
+ elif np.isscalar(x) and isinstance(y, block_vector.BlockVector):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.nblocks == y.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
assert condition.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x, y[i]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x, y.get_block(i)]
+ res.set_block(i, where(*_args))
accum += nelements
return res
- elif isinstance(x, BlockVector) and np.isscalar(y):
+ elif isinstance(x, block_vector.BlockVector) and np.isscalar(y):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.nblocks == x.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x[i], y]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x.get_block(i), y]
+ res.set_block(i, where(*_args))
accum += nelements
return res
@@ -117,45 +129,45 @@ def where(*args):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand'
assert x.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x[accum: accum + nelements], y[accum: accum + nelements]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x[accum: accum + nelements], y[accum: accum + nelements]]
+ res.set_block(i, where(*_args))
accum += nelements
return res
elif isinstance(x, np.ndarray) and np.isscalar(y):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert x.size == condition.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x[accum: accum + nelements], y]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x[accum: accum + nelements], y]
+ res.set_block(i, where(*_args))
accum += nelements
return res
elif np.isscalar(x) and isinstance(y, np.ndarray):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert condition.size == y.size, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
accum = 0
for i in range(condition.nblocks):
nelements = condition._brow_lengths[i]
- _args = [condition[i], x, y[accum: accum + nelements]]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x, y[accum: accum + nelements]]
+ res.set_block(i, where(*_args))
accum += nelements
return res
elif np.isscalar(x) and np.isscalar(y):
assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- res = BlockVector(condition.nblocks)
+ res = block_vector.BlockVector(condition.nblocks)
for i in range(condition.nblocks):
- _args = [condition[i], x, y]
- res[i] = where(*_args)
+ _args = [condition.get_block(i), x, y]
+ res.set_block(i, where(*_args))
return res
else:
@@ -167,10 +179,10 @@ def where(*args):
x = args[1]
y = args[2]
- if isinstance(x, BlockVector):
+ if isinstance(x, block_vector.BlockVector):
# ToDo: add logger to give warning here
x = x.flatten()
- if isinstance(y, BlockVector):
+ if isinstance(y, block_vector.BlockVector):
# ToDo: add logger to give warning here
y = y.flatten()
_args = [condition, x, y]
@@ -179,27 +191,27 @@ def where(*args):
def isin(element, test_elements, assume_unique=False, invert=False):
- if isinstance(element, BlockVector) and isinstance(test_elements, BlockVector):
+ if isinstance(element, block_vector.BlockVector) and isinstance(test_elements, block_vector.BlockVector):
assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert not test_elements.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
assert element.nblocks == test_elements.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
- res = BlockVector(element.nblocks)
+ res = block_vector.BlockVector(element.nblocks)
for i in range(element.nblocks):
- res[i] = isin(element[i],
- test_elements[i],
- assume_unique=assume_unique,
- invert=invert)
+ res.set_block(i, isin(element.get_block(i),
+ test_elements.get_block(i),
+ assume_unique=assume_unique,
+ invert=invert))
return res
- elif isinstance(element, BlockVector) and isinstance(test_elements, np.ndarray):
+ elif isinstance(element, block_vector.BlockVector) and isinstance(test_elements, np.ndarray):
assert not element.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- res = BlockVector(element.nblocks)
+ res = block_vector.BlockVector(element.nblocks)
for i in range(element.nblocks):
- res[i] = isin(element[i],
- test_elements,
- assume_unique=assume_unique,
- invert=invert)
+ res.set_block(i, isin(element.get_block(i),
+ test_elements,
+ assume_unique=assume_unique,
+ invert=invert))
return res
elif isinstance(element, np.ndarray) and isinstance(test_elements, np.ndarray):
@@ -210,4 +222,96 @@ def isin(element, test_elements, assume_unique=False, invert=False):
invert=invert)
else:
- raise NotImplementedError()
\ No newline at end of file
+ raise NotImplementedError()
+
+
+def intersect1d(ar1, ar2, assume_unique=False, return_indices=False):
+
+ if return_indices:
+ raise NotImplementedError()
+
+ if isinstance(ar1, tuple) and len(ar1) == 1:
+ x = ar1[0]
+ elif isinstance(ar1, np.ndarray) or isinstance(ar1, block_vector.BlockVector):
+ x = ar1
+ else:
+ raise RuntimeError('ar1 type not recognized. Needs to be np.ndarray or BlockVector')
+
+ if isinstance(ar2, tuple) and len(ar2) == 1:
+ y = ar2[0]
+ elif isinstance(ar2, np.ndarray) or isinstance(ar1, block_vector.BlockVector):
+ y = ar2
+ else:
+ raise RuntimeError('ar2 type not recognized. Needs to be np.ndarray or BlockVector')
+
+ if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector):
+
+ assert x.nblocks == y.nblocks, "Number of blocks does not match"
+ assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
+ res = block_vector.BlockVector(x.nblocks)
+ for i in range(x.nblocks):
+ res.set_block(i, intersect1d(x.get_block(i), y.get_block(i), assume_unique=assume_unique))
+ return res
+ elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray):
+ assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
+ res = block_vector.BlockVector(x.nblocks)
+ for i in range(x.nblocks):
+ res.set_block(i, np.intersect1d(x.get_block(i), y, assume_unique=assume_unique))
+ return res
+ elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector):
+
+ assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
+ res = block_vector.BlockVector(y.nblocks)
+ for i in range(y.nblocks):
+ res.set_block(i, np.intersect1d(x, y.get_block(i), assume_unique=assume_unique))
+ return res
+ else:
+ return np.intersect1d(x, y, assume_unique=assume_unique)
+
+
+def setdiff1d(ar1, ar2, assume_unique=False):
+
+ if isinstance(ar1, tuple) and len(ar1) == 1:
+ x = ar1[0]
+ elif isinstance(ar1, np.ndarray) or isinstance(ar1, block_vector.BlockVector):
+ x = ar1
+ else:
+ raise RuntimeError('ar1 type not recognized. Needs to be np.ndarray or BlockVector')
+
+ if isinstance(ar2, tuple) and len(ar2) == 1:
+ y = ar2[0]
+ elif isinstance(ar2, np.ndarray) or isinstance(ar1, block_vector.BlockVector):
+ y = ar2
+ else:
+ raise RuntimeError('ar2 type not recognized. Needs to be np.ndarray or BlockVector')
+
+ if isinstance(x, block_vector.BlockVector) and isinstance(y, block_vector.BlockVector):
+
+ assert x.nblocks == y.nblocks, "Number of blocks does not match"
+ assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
+ res = block_vector.BlockVector(x.nblocks)
+ for i in range(x.nblocks):
+ res.set_block(i, setdiff1d(x.get_block(i), y.get_block(i), assume_unique=assume_unique))
+ return res
+ elif isinstance(x, block_vector.BlockVector) and isinstance(y, np.ndarray):
+ assert not x.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ res = block_vector.BlockVector(x.nblocks)
+ for i in range(x.nblocks):
+ res.set_block(i, np.setdiff1d(x.get_block(i), y, assume_unique=assume_unique))
+ return res
+ elif isinstance(x, np.ndarray) and isinstance(y, block_vector.BlockVector):
+
+ assert not y.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
+ res = block_vector.BlockVector(y.nblocks)
+ for i in range(y.nblocks):
+ res.set_block(i, np.setdiff1d(x, y.get_block(i), assume_unique=assume_unique))
+ return res
+ else:
+ return np.setdiff1d(x, y, assume_unique=assume_unique)
diff --git a/pyomo/contrib/pynumero/linalg/__init__.py b/pyomo/contrib/pynumero/linalg/__init__.py
index 8e828ba5ae3..e17241568bd 100644
--- a/pyomo/contrib/pynumero/linalg/__init__.py
+++ b/pyomo/contrib/pynumero/linalg/__init__.py
@@ -8,7 +8,4 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from .. import numpy_available, scipy_available
-
-if numpy_available and scipy_available:
- from .intrinsics import *
+from ..dependencies import numpy_available, scipy_available
diff --git a/pyomo/contrib/pynumero/linalg/ma27.py b/pyomo/contrib/pynumero/linalg/ma27.py
new file mode 100644
index 00000000000..abc60124c34
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/ma27.py
@@ -0,0 +1,174 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+from pyomo.common.fileutils import find_library
+from pyomo.contrib.pynumero.linalg.utils import (validate_index,
+ validate_value, _NotSet)
+import numpy.ctypeslib as npct
+import numpy as np
+import ctypes
+import os
+
+
+class MA27Interface(object):
+
+ libname = _NotSet
+
+ @classmethod
+ def available(cls):
+ if cls.libname is _NotSet:
+ cls.libname = find_library('pynumero_MA27')
+ if cls.libname is None:
+ return False
+ return os.path.exists(cls.libname)
+
+ def __init__(self,
+ iw_factor=None,
+ a_factor=None):
+
+ if not MA27Interface.available():
+ raise RuntimeError(
+ 'Could not find pynumero_MA27 library.')
+
+ self.iw_factor = iw_factor
+ self.a_factor = a_factor
+
+ self.lib = ctypes.cdll.LoadLibrary(self.libname)
+
+ array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
+ array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
+ array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS')
+
+ # Declare arg and res types of functions:
+
+ # Do I need to specify that this function takes no argument?
+ self.lib.new_MA27_struct.restype = ctypes.c_void_p
+
+ self.lib.free_MA27_struct.argtypes = [ctypes.c_void_p]
+
+ self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
+ # Do I need to specify that this function returns nothing?
+ self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_icntl.restype = ctypes.c_int
+
+ self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double]
+ self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_cntl.restype = ctypes.c_double
+
+ self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_info.restype = ctypes.c_int
+
+ self.lib.alloc_iw_a.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.alloc_iw_b.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.alloc_a.argtypes = [ctypes.c_void_p, ctypes.c_int]
+
+ self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int,
+ ctypes.c_int, array_1d_int, array_1d_int]
+ self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int,
+ ctypes.c_int, array_1d_int, array_1d_int,
+ array_1d_double]
+ self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_1d_double]
+
+ self.icntl_len = 30
+ self.cntl_len = 5
+ self.info_len = 20
+
+ self._ma27 = self.lib.new_MA27_struct()
+
+ def __del__(self):
+ self.lib.free_MA27_struct(self._ma27)
+
+
+ def set_icntl(self, i, val):
+ validate_index(i, self.icntl_len, 'ICNTL')
+ validate_value(i, int, 'ICNTL')
+ # NOTE: Use the FORTRAN indexing (same as documentation) to
+ # set and access info/cntl arrays from Python, whereas C
+ # functions use C indexing. Maybe this is too confusing.
+ self.lib.set_icntl(self._ma27, i-1, val)
+
+
+ def get_icntl(self, i):
+ validate_index(i, self.icntl_len, 'ICNTL')
+ return self.lib.get_icntl(self._ma27, i-1)
+
+
+ def set_cntl(self, i, val):
+ validate_index(i, self.cntl_len, 'CNTL')
+ validate_value(val, float, 'CNTL')
+ self.lib.set_cntl(self._ma27, i-1, val)
+
+
+ def get_cntl(self, i):
+ validate_index(i, self.cntl_len, 'CNTL')
+ return self.lib.get_cntl(self._ma27, i-1)
+
+
+ def get_info(self, i):
+ validate_index(i, self.info_len, 'INFO')
+ return self.lib.get_info(self._ma27, i-1)
+
+
+ def do_symbolic_factorization(self, dim, irn, icn):
+ irn = irn.astype(np.intc, casting='safe', copy=True)
+ icn = icn.astype(np.intc, casting='safe', copy=True)
+ ne = irn.size
+ self.ne_cached = ne
+ self.dim_cached = dim
+ assert ne == icn.size, 'Dimension mismatch in row and column arrays'
+
+ if self.iw_factor is not None:
+ min_size = 2*ne + 3*dim + 1
+ self.lib.alloc_iw_a(self._ma27,
+ int(self.iw_factor*min_size))
+
+ self.lib.do_symbolic_factorization(self._ma27,
+ dim, ne, irn, icn)
+ return self.get_info(1)
+
+
+ def do_numeric_factorization(self, irn, icn, dim, entries):
+ irn = irn.astype(np.intc, casting='safe', copy=True)
+ icn = icn.astype(np.intc, casting='safe', copy=True)
+ assert (self.ne_cached == icn.size) and self.ne_cached == irn.size,\
+ 'Dimension mismatch in row or column array'
+
+ ent = entries.astype(np.double, casting='safe', copy=True)
+
+ ne = ent.size
+ assert ne == self.ne_cached,\
+ ('Wrong number of entries in matrix. Please re-run symbolic'
+ 'factorization with correct nonzero coordinates.')
+ assert dim == self.dim_cached,\
+ ('Dimension mismatch between symbolic and numeric factorization.'
+ 'Please re-run symbolic factorization with the correct '
+ 'dimension.')
+ if self.a_factor is not None:
+ min_size = self.get_info(5)
+ self.lib.alloc_a(self._ma27,
+ int(self.a_factor*min_size))
+ if self.iw_factor is not None:
+ min_size = self.get_info(6)
+ self.lib.alloc_iw_b(self._ma27,
+ int(self.iw_factor*min_size))
+
+ self.lib.do_numeric_factorization(self._ma27, dim, ne,
+ irn, icn, ent)
+ return self.get_info(1)
+
+
+ def do_backsolve(self, rhs):
+ rhs = rhs.astype(np.double, casting='safe', copy=True)
+ rhs_dim = rhs.size
+ assert rhs_dim == self.dim_cached,\
+ 'Dimension mismatch in right hand side. Please correct.'
+
+ self.lib.do_backsolve(self._ma27, rhs_dim, rhs)
+
+ return rhs
diff --git a/pyomo/contrib/pynumero/linalg/ma57.py b/pyomo/contrib/pynumero/linalg/ma57.py
new file mode 100644
index 00000000000..26a13e092f6
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/ma57.py
@@ -0,0 +1,217 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+from pyomo.common.fileutils import find_library
+from pyomo.contrib.pynumero.linalg.utils import (validate_index,
+ validate_value, _NotSet)
+import numpy.ctypeslib as npct
+import numpy as np
+import ctypes
+import sys
+import os
+
+class MA57Interface(object):
+
+ libname = _NotSet
+
+ @classmethod
+ def available(cls):
+ if cls.libname is _NotSet:
+ cls.libname = find_library('pynumero_MA57')
+ if cls.libname is None:
+ return False
+ return os.path.exists(cls.libname)
+
+ def __init__(self,
+ work_factor=None,
+ fact_factor=None,
+ ifact_factor=None):
+
+ if not MA57Interface.available():
+ raise RuntimeError(
+ 'Could not find pynumero_MA57 library.')
+
+ self.work_factor = work_factor
+ self.fact_factor = fact_factor
+ self.ifact_factor = ifact_factor
+
+ self.lib = ctypes.cdll.LoadLibrary(self.libname)
+
+ array_1d_double = npct.ndpointer(dtype=np.double, ndim=1, flags='CONTIGUOUS')
+ array_2d_double = npct.ndpointer(dtype=np.double, ndim=2, flags='CONTIGUOUS')
+ array_1d_int = npct.ndpointer(dtype=np.intc, ndim=1, flags='CONTIGUOUS')
+
+ # Declare arg and res types of functions:
+
+ # Do I need to specify that this function takes no argument?
+ self.lib.new_MA57_struct.restype = ctypes.c_void_p
+ # return type is pointer to MA57_struct. Why do I use c_void_p here?
+
+ self.lib.free_MA57_struct.argtypes = [ctypes.c_void_p]
+
+ self.lib.set_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
+ # Do I need to specify that this function returns nothing?
+ self.lib.get_icntl.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_icntl.restype = ctypes.c_int
+
+ self.lib.set_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double]
+ self.lib.get_cntl.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_cntl.restype = ctypes.c_double
+
+ self.lib.get_info.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_info.restype = ctypes.c_int
+
+ self.lib.get_rinfo.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.get_rinfo.restype = ctypes.c_double
+
+ self.lib.alloc_keep.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.alloc_work.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.alloc_fact.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.alloc_ifact.argtypes = [ctypes.c_void_p, ctypes.c_int]
+
+ self.lib.set_nrhs.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.set_lrhs.argtypes = [ctypes.c_void_p, ctypes.c_int]
+ self.lib.set_job.argtypes = [ctypes.c_void_p, ctypes.c_int]
+
+ self.lib.do_symbolic_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int,
+ ctypes.c_int, array_1d_int, array_1d_int]
+ self.lib.do_numeric_factorization.argtypes = [ctypes.c_void_p, ctypes.c_int,
+ ctypes.c_int, array_1d_double]
+ self.lib.do_backsolve.argtypes = [ctypes.c_void_p, ctypes.c_int, array_2d_double]
+ self.lib.do_iterative_refinement.argtypes = [ctypes.c_void_p, ctypes.c_int,
+ ctypes.c_int, array_1d_double, array_1d_int, array_1d_int,
+ array_1d_double, array_1d_double, array_1d_double]
+ self.lib.do_reallocation.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_double,
+ ctypes.c_int]
+
+ self.icntl_len = 20
+ self.cntl_len = 5
+ self.info_len = 40
+ self.rinfo_len = 20
+
+ self._ma57 = self.lib.new_MA57_struct()
+
+
+ def __del__(self):
+ self.lib.free_MA57_struct(self._ma57)
+
+
+ def set_icntl(self, i, val):
+ validate_index(i, self.icntl_len, 'ICNTL')
+ validate_value(i, int, 'ICNTL')
+ # NOTE: Use the FORTRAN indexing (same as documentation) to
+ # set and access info/cntl arrays from Python, whereas C
+ # functions use C indexing. Maybe this is too confusing.
+ self.lib.set_icntl(self._ma57, i-1, val)
+
+
+ def get_icntl(self, i):
+ validate_index(i, self.icntl_len, 'ICNTL')
+ return self.lib.get_icntl(self._ma57, i-1)
+
+
+ def set_cntl(self, i, val):
+ validate_index(i, self.cntl_len, 'CNTL')
+ validate_value(val, float, 'CNTL')
+ self.lib.set_cntl(self._ma57, i-1, val)
+
+
+ def get_cntl(self, i):
+ validate_index(i, self.cntl_len, 'CNTL')
+ return self.lib.get_cntl(self._ma57, i-1)
+
+
+ def get_info(self, i):
+ validate_index(i, self.info_len, 'INFO')
+ return self.lib.get_info(self._ma57, i-1)
+
+
+ def get_rinfo(self, i):
+ validate_index(i, self.rinfo_len, 'RINFO')
+ return self.lib.get_info(self._ma57, i-1)
+
+
+ def do_symbolic_factorization(self, dim, irn, jcn):
+ irn = irn.astype(np.intc, casting='safe', copy=True)
+ jcn = jcn.astype(np.intc, casting='safe', copy=True)
+ # TODO: maybe allow user the option to specify size of KEEP
+ ne = irn.size
+ self.ne_cached = ne
+ self.dim_cached = dim
+ assert ne == jcn.size, 'Dimension mismatch in row and column arrays'
+ self.lib.do_symbolic_factorization(self._ma57,
+ dim, ne, irn, jcn)
+ return self.get_info(1)
+
+
+ def do_numeric_factorization(self, dim, entries):
+ entries = entries.astype(np.float64, casting='safe', copy=True)
+ ne = entries.size
+ assert ne == self.ne_cached,\
+ ('Wrong number of entries in matrix. Please re-run symbolic'
+ 'factorization with correct nonzero coordinates.')
+ assert dim == self.dim_cached,\
+ ('Dimension mismatch between symbolic and numeric factorization.'
+ 'Please re-run symbolic factorization with the correct '
+ 'dimension.')
+ if self.fact_factor is not None:
+ min_size = self.get_info(9)
+ self.lib.alloc_fact(self._ma57,
+ int(self.fact_factor*min_size))
+ if self.ifact_factor is not None:
+ min_size = self.get_info(10)
+ self.lib.alloc_ifact(self._ma57,
+ int(self.ifact_factor*min_size))
+
+ self.lib.do_numeric_factorization(self._ma57,
+ dim, ne, entries)
+ return self.get_info(1)
+
+
+ def do_backsolve(self, rhs):
+ rhs = rhs.astype(np.double, casting='safe', copy=True)
+ shape = rhs.shape
+ if len(shape) == 1:
+ rhs_dim = rhs.size
+ nrhs = 1
+ rhs = np.array([rhs])
+ elif len(shape) == 2:
+ # FIXME
+ raise NotImplementedError(
+ 'Funcionality for solving a matrix of right hand '
+ 'is buggy and needs fixing.')
+ rhs_dim = rhs.shape[0]
+ nrhs = rhs.shape[1]
+ else:
+ raise ValueError(
+ 'Right hand side must be a one or two-dimensional array')
+ # This does not necessarily need to be true; each RHS could have length
+ # larger than N (for some reason). In the C interface, however, I assume
+ # that LRHS == N
+ assert self.dim_cached == rhs_dim, 'Dimension mismatch in RHS'
+ # TODO: Option to specify a JOB other than 1. By my understanding,
+ # different JOBs allow partial factorizations to be performed.
+ # Currently not supported - unclear if it should be.
+
+ if nrhs > 1:
+ self.lib.set_nrhs(self._ma57, nrhs)
+
+ if self.work_factor is not None:
+ self.lib.alloc_work(self._ma57,
+ int(self.work_factor*nrhs*rhs_dim))
+
+ self.lib.do_backsolve(self._ma57,
+ rhs_dim, rhs)
+
+ if len(shape) == 1:
+ # If the user input rhs as a 1D array, return the solution
+ # as a 1D array.
+ rhs = rhs[0, :]
+
+ return rhs
diff --git a/pyomo/contrib/pynumero/linalg/mumps_interface.py b/pyomo/contrib/pynumero/linalg/mumps_interface.py
new file mode 100644
index 00000000000..15037695fbe
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/mumps_interface.py
@@ -0,0 +1,192 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+from scipy.sparse import isspmatrix_coo, coo_matrix
+import numpy as np
+
+try:
+ import mumps
+except ImportError as e:
+ raise ImportError('Error importing mumps. Install pymumps '
+ 'conda install -c conda-forge pymumps')
+
+from pyomo.contrib.pynumero.sparse import BlockVector
+
+
+class MumpsCentralizedAssembledLinearSolver(object):
+ """
+ A thin wrapper around pymumps which uses the centralized assembled matrix format.
+ In other words ICNTL(5) = 0 and ICNTL(18) = 0.
+
+ Solve matrix * x = rhs for x.
+
+ See the Mumps documentation for descriptions of the parameters. The section numbers
+ listed below refer to the Mumps documentation for version 5.2.1.
+
+ Parameters
+ ----------
+ sym: int, optional
+ See section 5.2.1 of the Mumps documentation
+ par: int, optional
+ See section 5.1.3
+ comm: mpi4py comm, optional
+ See section 5.1.3
+ cntl_options: dict, optional
+ See section 6.2
+ icntl_options: dict, optional
+ See section 6.1
+ """
+ def __init__(self, sym=0, par=1, comm=None, cntl_options=None, icntl_options=None):
+ self._nnz = None
+ self._dim = None
+ self._mumps = mumps.DMumpsContext(sym=sym, par=par, comm=comm)
+ self._mumps.set_silent()
+ self._icntl_options = dict()
+ self._cntl_options = dict()
+
+ if cntl_options is None:
+ cntl_options = dict()
+ if icntl_options is None:
+ icntl_options = dict()
+ for k, v in cntl_options.items():
+ self.set_cntl(k, v)
+ for k, v in icntl_options.items():
+ self.set_icntl(k, v)
+
+ def _init(self):
+ """
+ The purpose of this method is to address issue #12 from pymumps
+ """
+ self._mumps.run(job=-1)
+ self._mumps.set_silent()
+ for k, v in self._cntl_options.items():
+ self.set_cntl(k, v)
+ for k, v in self._icntl_options.items():
+ self.set_icntl(k, v)
+
+ def do_symbolic_factorization(self, matrix):
+ """
+ Perform Mumps analysis.
+
+ Parameters
+ ----------
+ matrix: scipy.sparse.spmatrix or pyomo.contrib.pynumero.sparse.BlockMatrix
+ This matrix must have the same nonzero structure as the matrix passed into
+ do_numeric_factorization. The matrix will be converted to coo format if it
+ is not already in coo format. If sym is 1 or 2, the matrix must be lower
+ or upper triangular.
+ """
+ self._init()
+ if type(matrix) == np.ndarray:
+ matrix = coo_matrix(matrix)
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ nrows, ncols = matrix.shape
+ if nrows != ncols:
+ raise ValueError('matrix is not square')
+ self._dim = nrows
+ self._nnz = matrix.nnz
+ self._mumps.set_shape(nrows)
+ self._mumps.set_centralized_assembled_rows_cols(matrix.row + 1, matrix.col + 1)
+ self._mumps.run(job=1)
+
+ def do_numeric_factorization(self, matrix):
+ """
+ Perform Mumps factorization. Note that do_symbolic_factorization should be called
+ before do_numeric_factorization.
+
+ Parameters
+ ----------
+ matrix: scipy.sparse.spmatrix or pyomo.contrib.pynumero.sparse.BlockMatrix
+ This matrix must have the same nonzero structure as the matrix passed into
+ do_symbolic_factorization. The matrix will be converted to coo format if it
+ is not already in coo format. If sym is 1 or 2, the matrix must be lower
+ or upper triangular.
+ """
+ if self._nnz is None:
+ raise RuntimeError('Call do_symbolic_factorization first.')
+ if type(matrix) == np.ndarray:
+ matrix = coo_matrix(matrix)
+ if not isspmatrix_coo(matrix):
+ matrix = matrix.tocoo()
+ nrows, ncols = matrix.shape
+ if nrows != ncols:
+ raise ValueError('matrix is not square')
+ if self._dim != nrows:
+ raise ValueError('The shape of the matrix changed between symbolic and numeric factorization')
+ if self._nnz != matrix.nnz:
+ raise ValueError('The number of nonzeros changed between symbolic and numeric factorization')
+ self._mumps.set_centralized_assembled_values(matrix.data)
+ self._mumps.run(job=2)
+
+ def do_back_solve(self, rhs):
+ """
+ Perform back solve with Mumps. Note that both do_symbolic_factorization and
+ do_numeric_factorization should be called before do_back_solve.
+
+ Parameters
+ ----------
+ rhs: numpy.ndarray or pyomo.contrib.pynumero.sparse.BlockVector
+ The right hand side in matrix * x = rhs.
+
+ Returns
+ -------
+ result: numpy.ndarray or pyomo.contrib.pynumero.sparse.BlockVector
+ The x in matrix * x = rhs. If rhs is a BlockVector, then, result
+ will be a BlockVector with the same block structure as rhs.
+ """
+ if isinstance(rhs, BlockVector):
+ _rhs = rhs.flatten()
+ result = _rhs
+ else:
+ result = rhs.copy()
+
+ self._mumps.set_rhs(result)
+ self._mumps.run(job=3)
+
+ if isinstance(rhs, BlockVector):
+ _result = rhs.copy_structure()
+ _result.copyfrom(result)
+ result = _result
+
+ return result
+
+ def __del__(self):
+ self._mumps.destroy()
+
+ def set_icntl(self, key, value):
+ self._icntl_options[key] = value
+ self._mumps.set_icntl(key, value)
+
+ def set_cntl(self, key, value):
+ self._cntl_options[key] = value
+ self._mumps.id.cntl[key - 1] = value
+
+ def solve(self, matrix, rhs):
+ self.do_symbolic_factorization(matrix)
+ self.do_numeric_factorization(matrix)
+ return self.do_back_solve(rhs)
+
+ def get_icntl(self, key):
+ return self._mumps.id.icntl[key - 1]
+
+ def get_cntl(self, key):
+ return self._mumps.id.cntl[key - 1]
+
+ def get_info(self, key):
+ return self._mumps.id.info[key - 1]
+
+ def get_infog(self, key):
+ return self._mumps.id.infog[key - 1]
+
+ def get_rinfo(self, key):
+ return self._mumps.id.rinfo[key - 1]
+
+ def get_rinfog(self, key):
+ return self._mumps.id.rinfog[key - 1]
diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma27.py b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py
new file mode 100644
index 00000000000..7f831b67dae
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/tests/test_ma27.py
@@ -0,0 +1,148 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import pyutilib.th as unittest
+from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available
+if not numpy_available:
+ raise unittest.SkipTest('pynumero MA27 tests require numpy')
+from pyomo.contrib.pynumero.linalg.ma27 import *
+
+
+@unittest.skipIf(not MA27Interface.available(), reason='MA27 not available')
+class TestMA27Interface(unittest.TestCase):
+
+ def test_get_cntl(self):
+ ma27 = MA27Interface()
+ self.assertEqual(ma27.get_icntl(1), 6)
+
+ self.assertAlmostEqual(ma27.get_cntl(1), 1e-1) # Numerical pivot threshold
+ self.assertAlmostEqual(ma27.get_cntl(3), 0.0) # Null pivot threshold
+
+ def test_set_icntl(self):
+ ma27 = MA27Interface()
+ ma27.set_icntl(5, 4) # Set output printing to max verbosity
+ ma27.set_icntl(8, 1) # Keep factors when we run out of space
+ # (so MA27ED can be used)
+ icntl5 = ma27.get_icntl(5)
+ icntl8 = ma27.get_icntl(8)
+ self.assertEqual(icntl5, 4)
+ self.assertEqual(icntl8, 1)
+
+ with self.assertRaisesRegex(TypeError, 'must be an integer'):
+ ma27.set_icntl(1.0, 0)
+ with self.assertRaisesRegex(IndexError, 'is out of range'):
+ ma27.set_icntl(100, 0)
+ with self.assertRaises(ctypes.ArgumentError):
+ ma27.set_icntl(1, 0.0)
+
+ def test_set_cntl(self):
+ ma27 = MA27Interface()
+ ma27.set_cntl(1, 1e-8)
+ ma27.set_cntl(3, 1e-12)
+ self.assertAlmostEqual(ma27.get_cntl(1), 1e-8)
+ self.assertAlmostEqual(ma27.get_cntl(3), 1e-12)
+
+ def test_do_symbolic_factorization(self):
+ ma27 = MA27Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ icn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ # These arrays, copied out of HSL docs, contain Fortran indices.
+ # Interfaces accept C indices as this is what I typically expect.
+ irn = irn - 1
+ icn = icn - 1
+
+ bad_icn = np.array([1,2,3,5,3,4], dtype=np.intc)
+ # ^No need to update these indices
+
+ ma27.do_symbolic_factorization(n, irn, icn)
+
+ self.assertEqual(ma27.get_info(1), 0)
+ self.assertEqual(ma27.get_info(5), 14) # Min required num. integer words
+ self.assertEqual(ma27.get_info(6), 20) # Min required num. real words
+
+ with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'):
+ ma27.do_symbolic_factorization(n, irn, bad_icn)
+
+ def test_do_numeric_factorization(self):
+ ma27 = MA27Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ icn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ irn = irn - 1
+ icn = icn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double)
+ ma27.do_symbolic_factorization(n, irn, icn)
+
+ status = ma27.do_numeric_factorization(irn, icn, n, ent)
+ self.assertEqual(status, 0)
+
+ expected_ent = [2.,3.,4.,6.,1.,5.,1.,]
+ for i in range(ne):
+ self.assertAlmostEqual(ent[i], expected_ent[i])
+
+ self.assertEqual(ma27.get_info(15), 2) # 2 negative eigenvalues
+ self.assertEqual(ma27.get_info(14), 1) # 1 2x2 pivot
+
+ # Check that we can successfully perform another numeric factorization
+ # with same symbolic factorization
+ ent2 = np.array([1.5, 5.4, 1.2, 6.1, 4.2, 3.3, 2.0], dtype=np.double)
+ status = ma27.do_numeric_factorization(irn, icn, n, ent2)
+ self.assertEqual(ma27.get_info(15), 2)
+ self.assertEqual(status, 0)
+
+ bad_ent = np.array([2.,3.,4.,6.,1.,5.], dtype=np.double)
+ with self.assertRaisesRegex(AssertionError, 'Wrong number of entries'):
+ ma27.do_numeric_factorization(irn, icn, n, bad_ent)
+ with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'):
+ ma27.do_numeric_factorization(irn, icn, n+1, ent)
+
+ # Check that we can successfully perform another symbolic and
+ # numeric factorization with the same ma27 struct
+ #
+ # n is still 5, ne has changed to 8.
+ irn = np.array([1,1,2,2,3,3,5,1], dtype=np.intc)
+ icn = np.array([1,2,3,5,3,4,5,5], dtype=np.intc)
+ irn = irn - 1
+ icn = icn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.,3.], dtype=np.double)
+ status = ma27.do_symbolic_factorization(n, irn, icn)
+ self.assertEqual(status, 0)
+ status = ma27.do_numeric_factorization(irn, icn, n, ent)
+ self.assertEqual(status, 0)
+ self.assertEqual(ma27.get_info(15), 3)
+
+ def test_do_backsolve(self):
+ ma27 = MA27Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ icn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ irn = irn - 1
+ icn = icn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double)
+ rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double)
+ status = ma27.do_symbolic_factorization(n, irn, icn)
+ status = ma27.do_numeric_factorization(irn, icn, n, ent)
+ sol = ma27.do_backsolve(rhs)
+
+ expected_sol = [1,2,3,4,5]
+ old_rhs = np.array([8.,45.,31.,15.,17.])
+ for i in range(n):
+ self.assertAlmostEqual(sol[i], expected_sol[i])
+ self.assertEqual(old_rhs[i], rhs[i])
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/contrib/pynumero/linalg/tests/test_ma57.py b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py
new file mode 100644
index 00000000000..61def1b91b4
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/tests/test_ma57.py
@@ -0,0 +1,160 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import ctypes
+import pyutilib.th as unittest
+from pyomo.contrib.pynumero.dependencies import numpy as np, numpy_available
+if not numpy_available:
+ raise unittest.SkipTest('pynumero MA27 tests require numpy')
+from pyomo.contrib.pynumero.linalg.ma57 import *
+
+
+@unittest.skipIf(not MA57Interface.available(), reason='MA57 not available')
+class TestMA57Interface(unittest.TestCase):
+
+ def test_get_cntl(self):
+ ma57 = MA57Interface()
+ self.assertEqual(ma57.get_icntl(1), 6)
+ self.assertEqual(ma57.get_icntl(7), 1)
+
+ self.assertAlmostEqual(ma57.get_cntl(1), 1e-2) # Numerical pivot threshold
+ self.assertAlmostEqual(ma57.get_cntl(2), 1e-20) # Null pivot threshold
+
+ def test_set_icntl(self):
+ ma57 = MA57Interface()
+ ma57.set_icntl(5, 4) # Set output printing to max verbosity
+ ma57.set_icntl(8, 1) # Keep factors when we run out of space
+ # (so MA57ED can be used)
+ icntl5 = ma57.get_icntl(5)
+ icntl8 = ma57.get_icntl(8)
+ self.assertEqual(icntl5, 4)
+ self.assertEqual(icntl8, 1)
+
+ with self.assertRaisesRegex(TypeError, 'must be an integer'):
+ ma57.set_icntl(1.0, 0)
+ with self.assertRaisesRegex(IndexError, 'is out of range'):
+ ma57.set_icntl(100, 0)
+ with self.assertRaises(ctypes.ArgumentError):
+ ma57.set_icntl(1, 0.0)
+
+ def test_set_cntl(self):
+ ma57 = MA57Interface()
+ ma57.set_cntl(1, 1e-8)
+ ma57.set_cntl(2, 1e-12)
+ self.assertAlmostEqual(ma57.get_cntl(1), 1e-8)
+ self.assertAlmostEqual(ma57.get_cntl(2), 1e-12)
+
+ def test_do_symbolic_factorization(self):
+ ma57 = MA57Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ # Copied these Fortran-style indices from HSL docs.
+ # Interface expects C-style indices, as is typical in Python.
+ irn = irn - 1
+ jcn = jcn - 1
+
+ bad_jcn = np.array([1,2,3,5,3,4], dtype=np.intc)
+
+ ma57.do_symbolic_factorization(n, irn, jcn)
+
+ self.assertEqual(ma57.get_info(1), 0)
+ self.assertEqual(ma57.get_info(4), 0)
+ self.assertEqual(ma57.get_info(9), 48) # Min required length of FACT
+ self.assertEqual(ma57.get_info(10), 53) # Min required length of IFACT
+ self.assertEqual(ma57.get_info(14), 0) # Should not yet be set
+
+ with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'):
+ ma57.do_symbolic_factorization(n, irn, bad_jcn)
+
+ def test_do_numeric_factorization(self):
+ ma57 = MA57Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ irn = irn - 1
+ jcn = jcn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double)
+ ma57.do_symbolic_factorization(n, irn, jcn)
+ ma57.fact_factor = 1.5
+ ma57.ifact_factor = 1.5
+ # ^ No way to check whether these are handled properly... Would have to
+ # access the struct to get LFACT, LIFACT
+
+ status = ma57.do_numeric_factorization(n, ent)
+ self.assertEqual(status, 0)
+
+ self.assertEqual(ma57.get_info(14), 12) # 12 entries in factors
+ self.assertEqual(ma57.get_info(24), 2) # 2 negative eigenvalues
+ self.assertEqual(ma57.get_info(22), 1) # 1 2x2 pivot
+ self.assertEqual(ma57.get_info(23), 0) # 0 delayed pivots
+
+ ent2 = np.array([1.,5.,1.,6.,4.,3.,2.], dtype=np.double)
+ ma57.do_numeric_factorization(n, ent2)
+ self.assertEqual(status, 0)
+
+ bad_ent = np.array([2.,3.,4.,6.,1.,5.], dtype=np.double)
+ with self.assertRaisesRegex(AssertionError, 'Wrong number of entries'):
+ ma57.do_numeric_factorization(n, bad_ent)
+ with self.assertRaisesRegex(AssertionError, 'Dimension mismatch'):
+ ma57.do_numeric_factorization(n+1, ent)
+
+ n = 5
+ ne = 8
+ irn = np.array([1,1,2,2,3,3,5,5], dtype=np.intc)
+ jcn = np.array([1,2,3,5,3,4,5,1], dtype=np.intc)
+ irn = irn - 1
+ jcn = jcn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.,-1.3], dtype=np.double)
+ status = ma57.do_symbolic_factorization(n, irn, jcn)
+ self.assertEqual(status, 0)
+ status = ma57.do_numeric_factorization(n, ent)
+ self.assertEqual(status, 0)
+ self.assertEqual(ma57.get_info(24), 2)
+ self.assertEqual(ma57.get_info(23), 0)
+
+
+ def test_do_backsolve(self):
+ ma57 = MA57Interface()
+
+ n = 5
+ ne = 7
+ irn = np.array([1,1,2,2,3,3,5], dtype=np.intc)
+ jcn = np.array([1,2,3,5,3,4,5], dtype=np.intc)
+ irn = irn - 1
+ jcn = jcn - 1
+ ent = np.array([2.,3.,4.,6.,1.,5.,1.], dtype=np.double)
+ rhs = np.array([8.,45.,31.,15.,17.], dtype=np.double)
+ status = ma57.do_symbolic_factorization(n, irn, jcn)
+ status = ma57.do_numeric_factorization(n, ent)
+ sol = ma57.do_backsolve(rhs)
+
+ expected_sol = [1,2,3,4,5]
+ old_rhs = np.array([8.,45.,31.,15.,17.])
+ for i in range(n):
+ self.assertAlmostEqual(sol[i], expected_sol[i])
+ self.assertEqual(old_rhs[i], rhs[i])
+
+ #rhs2 = np.array([[8., 17.],
+ # [45., 15.],
+ # [31., 31.],
+ # [15., 45.],
+ # [17., 8.]], dtype=np.double)
+ #sol = ma57.do_backsolve(rhs2)
+ # FIXME
+ # This gives unexpected (incorrect) results.
+ # Need to investigate further.
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py
new file mode 100644
index 00000000000..09d602aedea
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/tests/test_mumps_interface.py
@@ -0,0 +1,71 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import pyutilib.th as unittest
+try:
+ import numpy as np
+ from scipy.sparse import coo_matrix, tril
+except ImportError:
+ raise unittest.SkipTest("Pynumero needs scipy and numpy to run linear solver tests")
+
+try:
+ from pyomo.contrib.pynumero.linalg.mumps_interface import MumpsCentralizedAssembledLinearSolver
+except ImportError:
+ raise unittest.SkipTest("Pynumero needs pymumps to run linear solver tests")
+
+from pyomo.contrib.pynumero.sparse import BlockMatrix, BlockVector
+
+
+class TestMumpsLinearSolver(unittest.TestCase):
+ def test_mumps_linear_solver(self):
+ A = np.array([[ 1, 7, 3],
+ [ 7, 4, -5],
+ [ 3, -5, 6]], dtype=np.double)
+ A = coo_matrix(A)
+ A_lower = tril(A)
+ x1 = np.arange(3) + 1
+ b1 = A * x1
+ x2 = np.array(list(reversed(x1)))
+ b2 = A * x2
+
+ solver = MumpsCentralizedAssembledLinearSolver()
+ solver.do_symbolic_factorization(A)
+ solver.do_numeric_factorization(A)
+ x = solver.do_back_solve(b1)
+ self.assertTrue(np.allclose(x, x1))
+ x = solver.do_back_solve(b2)
+ self.assertTrue(np.allclose(x, x2))
+
+ solver = MumpsCentralizedAssembledLinearSolver(sym=2)
+ x = solver.solve(A_lower, b1)
+ self.assertTrue(np.allclose(x, x1))
+
+ block_A = BlockMatrix(2, 2)
+ block_A.set_row_size(0, 2)
+ block_A.set_row_size(1, 1)
+ block_A.set_col_size(0, 2)
+ block_A.set_col_size(1, 1)
+ block_A.copyfrom(A)
+
+ block_b1 = BlockVector(2)
+ block_b1.set_block(0, b1[0:2])
+ block_b1.set_block(1, b1[2:])
+
+ block_b2 = BlockVector(2)
+ block_b2.set_block(0, b2[0:2])
+ block_b2.set_block(1, b2[2:])
+
+ solver = MumpsCentralizedAssembledLinearSolver(icntl_options={10: -3}, cntl_options={2: 1e-16})
+ solver.do_symbolic_factorization(block_A)
+ solver.do_numeric_factorization(block_A)
+ x = solver.do_back_solve(block_b1)
+ self.assertTrue(np.allclose(x, x1))
+ x = solver.do_back_solve(block_b2)
+ self.assertTrue(np.allclose(x, x2))
+ self.assertEqual(solver.get_infog(15), 3)
diff --git a/pyomo/contrib/pynumero/linalg/utils.py b/pyomo/contrib/pynumero/linalg/utils.py
new file mode 100644
index 00000000000..2c39d990757
--- /dev/null
+++ b/pyomo/contrib/pynumero/linalg/utils.py
@@ -0,0 +1,32 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+def validate_index(i, array_len, array_name=''):
+ if not isinstance(i, int):
+ raise TypeError(
+ 'Index into %s array must be an integer. Got %s'
+ % (array_name, type(i)))
+ if i < 1 or i > array_len:
+ # NOTE: Use the FORTRAN indexing (same as documentation) to
+ # set and access info/cntl arrays from Python, whereas C
+ # functions use C indexing. Maybe this is too confusing.
+ raise IndexError(
+ 'Index %s is out of range for %s array of length %s'
+ % (i, array_name, array_len))
+
+def validate_value(val, dtype, array_name=''):
+ if not isinstance(val, dtype):
+ raise ValueError(
+ 'Members of %s array must have type %s. Got %s'
+ % (array_name, dtype, type(val)))
+
+class _NotSet:
+ pass
+
diff --git a/pyomo/contrib/pynumero/cmake/tests/CMakeLists.txt b/pyomo/contrib/pynumero/plugins.py
similarity index 61%
rename from pyomo/contrib/pynumero/cmake/tests/CMakeLists.txt
rename to pyomo/contrib/pynumero/plugins.py
index 2142724f678..9f7944b74e2 100644
--- a/pyomo/contrib/pynumero/cmake/tests/CMakeLists.txt
+++ b/pyomo/contrib/pynumero/plugins.py
@@ -8,12 +8,9 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-set(ASL_HEADERS "${PROJECT_SOURCE_DIR}/asl_interface/src" )
-ADD_EXECUTABLE(asl_test src/simple_test.cpp)
-TARGET_INCLUDE_DIRECTORIES( asl_test PUBLIC ${ASL_HEADERS} )
-#SET_PROPERTY(TARGET asl_test PROPERTY ENABLE_EXPORTS)
-TARGET_LINK_LIBRARIES( asl_test pynumero_ASL)
+from pyomo.common.extensions import ExtensionBuilderFactory
+from .build import PyNumeroBuilder
+
+def load():
+ ExtensionBuilderFactory.register('pynumero')(PyNumeroBuilder)
-INSTALL(TARGETS asl_test
- DESTINATION bin
- )
diff --git a/pyomo/contrib/pynumero/sparse/__init__.py b/pyomo/contrib/pynumero/sparse/__init__.py
index 55061454e95..dcf00d79128 100644
--- a/pyomo/contrib/pynumero/sparse/__init__.py
+++ b/pyomo/contrib/pynumero/sparse/__init__.py
@@ -8,9 +8,8 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from .. import numpy_available, scipy_available
+from ..dependencies import numpy_available, scipy_available
if numpy_available and scipy_available:
- from .coo import empty_matrix, diagonal_matrix
- from .block_vector import BlockVector
- from .block_matrix import BlockMatrix, BlockSymMatrix
+ from .block_vector import BlockVector, NotFullyDefinedBlockVectorError
+ from .block_matrix import BlockMatrix, NotFullyDefinedBlockMatrixError
diff --git a/pyomo/contrib/pynumero/sparse/base_block.py b/pyomo/contrib/pynumero/sparse/base_block.py
new file mode 100644
index 00000000000..36bd46cd01f
--- /dev/null
+++ b/pyomo/contrib/pynumero/sparse/base_block.py
@@ -0,0 +1,181 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import abc
+import six
+
+# These classes are for checking types consistently and raising errors
+
+
+class BaseBlockVector(object):
+ """Base class for block vectors"""
+
+ def __init__(self):
+ pass
+
+ # We do not expect classes derived from BaseBlockVector to support
+ # the methods below.
+ def argpartition(self, kth, axis=-1, kind='introselect', order=None):
+ msg = "argpartition not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def argsort(self, axis=-1, kind='quicksort', order=None):
+ msg = "argsort not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def byteswap(self, inplace=False):
+ msg = "byteswap not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def choose(self, choices, out=None, mode='raise'):
+ msg = "choose not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def diagonal(self, offset=0, axis1=0, axis2=1):
+ msg = "diagonal not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def getfield(self, dtype, offset=0):
+ msg = "getfield not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def item(self, *args):
+ msg = "item not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def itemset(self, *args):
+ msg = "itemset not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def newbyteorder(self, new_order='S'):
+ msg = "newbyteorder not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def put(self, indices, values, mode='raise'):
+ msg = "put not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def partition(self, kth, axis=-1, kind='introselect', order=None):
+ msg = "partition not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def repeat(self, repeats, axis=None):
+ msg = "repeat not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def reshape(self, shape, order='C'):
+ msg = "reshape not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def resize(self, new_shape, refcheck=True):
+ msg = "resize not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def searchsorted(self, v, side='left', sorter=None):
+ msg = "searchsorted not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def setfield(self, val, dtype, offset=0):
+ msg = "setfield not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def setflags(self, write=None, align=None, uic=None):
+ msg = "setflags not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def sort(self, axis=-1, kind='quicksort', order=None):
+ msg = "sort not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def squeeze(self, axis=None):
+ msg = "squeeze not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def swapaxes(self, axis1, axis2):
+ msg = "swapaxes not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ msg = "trace not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def argmax(self, axis=None, out=None):
+ msg = "argmax not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def argmin(self, axis=None, out=None):
+ msg = "argmin not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def take(self, indices, axis=None, out=None, mode='raise'):
+ msg = "take not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ # The following vectors are to be supported at some point
+ def dump(self, file):
+ msg = "dump not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def dumps(self):
+ msg = "dumps not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def tobytes(self, order='C'):
+ msg = "tobytes not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+
+class BaseBlockMatrix(object):
+ """Base class for block matrices"""
+
+ def __init__(self):
+ pass
+
+ # We do not expect classes derived from BaseBlockVector to support
+ # the methods below.
+ def tolil(self, copy=False):
+ msg = "tolil not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def todia(self, copy=False):
+ msg = "todia not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def tobsr(self, blocksize=None, copy=False):
+ msg = "tobsr not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def sum(self, axis=None, dtype=None, out=None):
+ msg = "sum not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def mean(self, axis=None, dtype=None, out=None):
+ msg = "mean not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def diagonal(self, k=0):
+ msg = "diagonal not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def nonzero(self):
+ msg = "nonzero not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def setdiag(self, values, k=0):
+ msg = "setdiag not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def transpose(*axes):
+ msg = "transpose not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
+
+ def tostring(order='C'):
+ msg = "tostring not implemented for {}".format(self.__class__.__name__)
+ raise NotImplementedError(msg)
diff --git a/pyomo/contrib/pynumero/sparse/block_matrix.py b/pyomo/contrib/pynumero/sparse/block_matrix.py
index ee2a7e82be4..6d76d19a3f3 100644
--- a/pyomo/contrib/pynumero/sparse/block_matrix.py
+++ b/pyomo/contrib/pynumero/sparse/block_matrix.py
@@ -23,29 +23,73 @@
from scipy.sparse.sputils import upcast, isscalarlike, get_index_dtype
from pyomo.contrib.pynumero.sparse.block_vector import BlockVector
-from scipy.sparse import coo_matrix
+from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
from scipy.sparse import isspmatrix
from pyomo.contrib.pynumero.sparse.utils import is_symmetric_sparse
+from .base_block import BaseBlockMatrix
+from scipy.sparse.base import spmatrix
+import operator
import numpy as np
+import six
+import abc
+import logging
+import warnings
-__all__ = ['BlockMatrix', 'BlockSymMatrix']
+__all__ = ['BlockMatrix', 'NotFullyDefinedBlockMatrixError']
-# ToDo: better exception handling
-class BlockMatrix(object):
+logger = logging.getLogger(__name__)
+
+
+class NotFullyDefinedBlockMatrixError(Exception):
+ pass
+
+
+def assert_block_structure(mat):
+ if mat.has_undefined_row_sizes():
+ msgr = 'Operation not allowed with None rows. ' \
+ 'Specify at least one block in every row'
+ raise NotFullyDefinedBlockMatrixError(msgr)
+ if mat.has_undefined_col_sizes():
+ msgc = 'Operation not allowed with None columns. ' \
+ 'Specify at least one block every column'
+ raise NotFullyDefinedBlockMatrixError(msgc)
+
+
+class BlockMatrix(BaseBlockMatrix):
"""
Structured Matrix interface
+ Attributes
+ ----------
+ _blocks: numpy.ndarray
+ 2D-array where submatrices are stored
+ _bshape: tuple
+ number of block-rows and block-columns
+ _block_mask: numpy.ndarray
+ 2D-array with booleans that indicates if block is not empty.
+ Empty blocks are represented with None
+ _brow_lengths: numpy.ndarray
+ 1D-array with sizes of block-rows
+ _bcol_lengths: numpy.ndarray
+ 1D-array with sizes of block-columns
+ _undefined_brows: set
+ set of block row indices with undefined dimensions
+ _undefined_bcols: set
+ set of block column indices with undefined dimensions
+
Parameters
-------------------
- nbrows: number of block-rows in the matrix
- nbcols: number of block-columns in the matrix
+ nbrows: int
+ number of block-rows in the matrix
+ nbcols: int
+ number of block-columns in the matrix
"""
+ format = 'block_matrix'
def __init__(self, nbrows, nbcols):
- super(BlockMatrix, self).__init__()
shape = (nbrows, nbcols)
blocks = []
@@ -54,19 +98,25 @@ def __init__(self, nbrows, nbcols):
self._blocks = np.asarray(blocks, dtype='object')
- self._symmetric = False
- self._name = None
-
self._bshape = shape
self._block_mask = np.zeros(shape, dtype=bool)
- self._brow_lengths = np.zeros(nbrows, dtype=np.int64)
- self._bcol_lengths = np.zeros(nbcols, dtype=np.int64)
+
+ # _brow_lengths and _bcol_lengths get converted to dtype=np.int64 as soon as
+ # all of the dimensions are defined. Until then, users do not have access
+ # to these. See __setitem__, has_undefined_row_sizes, has_undefined_col_sizes,
+ # row_block_sizes, col_block_sizes, and assert_block_structure
+ self._brow_lengths = np.empty(nbrows, dtype=np.float64)
+ self._bcol_lengths = np.empty(nbcols, dtype=np.float64)
+ self._brow_lengths.fill(np.nan)
+ self._bcol_lengths.fill(np.nan)
+ self._undefined_brows = set(range(nbrows))
+ self._undefined_bcols = set(range(nbcols))
@property
def bshape(self):
"""
- Returns the block-shape of the matrix
+ Returns tuple with the block-shape of the matrix
"""
return self._bshape
@@ -75,12 +125,15 @@ def shape(self):
"""
Returns tuple with total number of rows and columns
"""
- return np.sum(self._brow_lengths), np.sum(self._bcol_lengths)
+ assert_block_structure(self)
+ nrows = np.sum(self._brow_lengths)
+ ncols = np.sum(self._bcol_lengths)
+ return nrows, ncols
@property
def nnz(self):
"""
- Returns total number of nonzero values in the matrix
+ Returns total number of nonzero values in this matrix
"""
return sum(blk.nnz for blk in self._blocks[self._block_mask])
@@ -89,42 +142,124 @@ def dtype(self):
"""
Returns data type of the matrix.
"""
- # ToDo: decide if this is the right way of doing this
all_dtypes = [blk.dtype for blk in self._blocks[self._block_mask]]
- dtype = upcast(*all_dtypes) if all_dtypes else None
- return dtype
+ ref_dtype = all_dtypes[0]
+ if all(ref_dtype is i for i in all_dtypes):
+ return ref_dtype
+ else:
+ raise ValueError('Multiple dtypes found: {0}'.format(str(all_dtypes)))
+
+ @property
+ def T(self):
+ """
+ Transpose matrix
+ """
+ return self.transpose()
- def row_block_sizes(self):
+ def row_block_sizes(self, copy=True):
"""
- Returns row-block sizes
+ Returns array with row-block sizes
+
+ Parameters
+ ----------
+ copy: bool
+ If False, then the internal array which stores the row block sizes will be returned without being copied.
+ Setting copy to False is risky and should only be done with extreme care.
Returns
-------
- ndarray
+ numpy.ndarray
"""
- return np.copy(self._brow_lengths)
+ if self.has_undefined_row_sizes():
+ raise NotFullyDefinedBlockMatrixError('Some block row lengths are not defined: {0}'.format(str(self._brow_lengths)))
+ if copy:
+ return self._brow_lengths.copy()
+ else:
+ return self._brow_lengths
- def col_block_sizes(self):
+ def col_block_sizes(self, copy=True):
"""
- Returns col-block sizes
+ Returns array with col-block sizes
+
+ Parameters
+ ----------
+ copy: bool
+ If False, then the internal array which stores the column block sizes will be returned without being copied.
+ Setting copy to False is risky and should only be done with extreme care.
Returns
-------
- narray
+ numpy.ndarray
"""
- return np.copy(self._bcol_lengths)
+ if self.has_undefined_col_sizes():
+ raise NotFullyDefinedBlockMatrixError('Some block column lengths are not defined: {0}'.format(str(self._bcol_lengths)))
+ if copy:
+ return self._bcol_lengths.copy()
+ else:
+ return self._bcol_lengths
+
+ def get_row_size(self, row):
+ if row in self._undefined_brows:
+ raise NotFullyDefinedBlockMatrixError('The dimensions of the requested row are not defined.')
+ return int(self._brow_lengths[row])
+
+ def get_col_size(self, col):
+ if col in self._undefined_bcols:
+ raise NotFullyDefinedBlockMatrixError('The dimensions of the requested column are not defined.')
+ return int(self._bcol_lengths[col])
+
+ def set_row_size(self, row, size):
+ if row in self._undefined_brows:
+ self._undefined_brows.remove(row)
+ self._brow_lengths[row] = size
+ if len(self._undefined_brows) == 0:
+ self._brow_lengths = np.asarray(self._brow_lengths, dtype=np.int64)
+ else:
+ if self._brow_lengths[row] != size:
+ raise ValueError('Incompatible row dimensions for '
+ 'row {row}; got {got}; '
+ 'expected {exp}'.format(row=row,
+ got=size,
+ exp=self._brow_lengths[row]))
+
+ def set_col_size(self, col, size):
+ if col in self._undefined_bcols:
+ self._undefined_bcols.remove(col)
+ self._bcol_lengths[col] = size
+ if len(self._undefined_bcols) == 0:
+ self._bcol_lengths = np.asarray(self._bcol_lengths, dtype=np.int64)
+ else:
+ if self._bcol_lengths[col] != size:
+ raise ValueError('Incompatible column dimensions for '
+ 'column {col}; got {got}; '
+ 'expected {exp}'.format(col=col,
+ got=size,
+ exp=self._bcol_lengths[col]))
+
+ def is_row_size_defined(self, row):
+ return row not in self._undefined_brows
+
+ def is_col_size_defined(self, col):
+ return col not in self._undefined_bcols
def block_shapes(self):
"""
- Returns shapes of blocks in BlockMatrix
+ Returns list with shapes of blocks in this BlockMatrix
+
+ Notes
+ -----
+ For a BlockMatrix with 2 block-rows and 2 block-cols
+ this method returns [[Block_00.shape, Block_01.shape],[Block_10.shape, Block_11.shape]]
Returns
-------
list
+
"""
- bm, bn =self.bshape
+ assert_block_structure(self)
+ bm, bn = self.bshape
sizes = [list() for i in range(bm)]
for i in range(bm):
sizes[i] = list()
@@ -141,196 +276,271 @@ def dot(self, other):
def reset_brow(self, idx):
"""
- Resets all blocks in selected row to None
+ Resets all blocks in selected block-row to None
Parameters
----------
- idx: integer
- row index to be reseted
+ idx: int
+ block-row index to be reset
Returns
-------
None
"""
- assert 0 <= idx < self.bshape[0], "index must be less than {}".format(self.bshape[0])
- self._brow_lengths[idx] = 0
+ assert 0 <= idx < self.bshape[0], 'Index out of bounds'
self._block_mask[idx, :] = False
self._blocks[idx, :] = None
def reset_bcol(self, jdx):
"""
- Resets all blocks in selected column to None
+ Resets all blocks in selected block-column to None
Parameters
----------
- idx: integer
- column index to be reseted
+ jdx: int
+ block-column index to be reset
Returns
-------
None
"""
- assert 0 <= jdx < self.bshape[1], "index must be less than {}".format(self.bshape[1])
- self._bcol_lengths[jdx] = 0
+ assert 0 <= jdx < self.bshape[1], 'Index out of bounds'
self._block_mask[:, jdx] = False
self._blocks[:, jdx] = None
def coo_data(self):
"""
- Returns data values of matrix in coo format
+ Returns data array of matrix. The array corresponds to
+ the data pointer in COOrdinate matrix format.
Returns
-------
- ndarray with values of all entries in the matrix
+ numpy.ndarray with values of all entries in the matrix
"""
- self._check_mask()
+ assert_block_structure(self)
nonzeros = self.nnz
data = np.empty(nonzeros, dtype=self.dtype)
nnz = 0
+
+ # get row col indices of blocks that are not none
ii, jj = np.nonzero(self._block_mask)
for i, j in zip(ii, jj):
+ # transform block to coo
B = self._blocks[i, j].tocoo()
idx = slice(nnz, nnz + B.nnz)
+ # populate coo_data array
data[idx] = B.data
nnz += B.nnz
return data
- def tocoo(self):
+ def tocoo(self, copy=True):
"""
- Converts this matrix to coo_matrix format.
+ Converts this matrix to COOrdinate format.
+
+ Parameters
+ ----------
+ copy: bool, optional
+ This argument is in the signature solely for Scipy compatibility
+ reasons. It does not do anything. The data is always copied.
Returns
-------
- coo_matrix
+ scipy.sparse.coo_matrix
"""
- # ToDo: copy argument to match scipy?
- self._check_mask()
+ assert_block_structure(self)
dtype = self.dtype
+ # Determine offsets for rows
+ # e.g. row_offset[1] = block_00.shape[0]
+ # e.g. row_offset[2] = block_00.shape[0] + block_10.shape[0]
row_offsets = np.append(0, np.cumsum(self._brow_lengths))
+ # Determine offsets for columns
col_offsets = np.append(0, np.cumsum(self._bcol_lengths))
+ # stores shape of resulting "flattened" matrix
shape = (row_offsets[-1], col_offsets[-1])
+ # total number of nonzeros
nonzeros = self.nnz
+ # create pointers for COO matrix (row, col, data)
data = np.empty(nonzeros, dtype=dtype)
idx_dtype = get_index_dtype(maxval=max(shape))
row = -np.ones(nonzeros, dtype=idx_dtype)
col = -np.ones(nonzeros, dtype=idx_dtype)
+ # populate COO pointers
nnz = 0
ii, jj = np.nonzero(self._block_mask)
for i, j in zip(ii, jj):
- B = self[i, j].tocoo()
+
+ B = self.get_block(i, j).tocoo()
+ # get slice that contains all elements in current block
idx = slice(nnz, nnz + B.nnz)
+
+ # append B.nnz elements to COO pointers using the slice
data[idx] = B.data
- #row[idx] = (B.row + row_offsets[i]).astype(idx_dtype, copy=False)
- #col[idx] = (B.col + col_offsets[j]).astype(idx_dtype, copy=False)
row[idx] = B.row + row_offsets[i]
col[idx] = B.col + col_offsets[j]
nnz += B.nnz
return coo_matrix((data, (row, col)), shape=shape)
- def tocsr(self):
+ def tocsr(self, copy=True):
"""
- Converts this matrix to csr format.
+ Converts this matrix to Compressed Sparse Row format.
+
+ Parameters
+ ----------
+ copy: bool, optional
+ This argument is in the signature solely for Scipy compatibility
+ reasons. It does not do anything. The data is always copied.
Returns
-------
- CSRMatrix
+ scipy.sparse.csr_matrix
"""
+
return self.tocoo().tocsr()
- def tocsc(self):
+ def tocsc(self, copy=True):
"""
- Converts this matrix to csc format.
+ Converts this matrix to Compressed Sparse Column format.
+
+ Parameters
+ ----------
+ copy: bool, optional
+ This argument is in the signature solely for Scipy compatibility
+ reasons. It does not do anything. The data is always copied.
Returns
-------
- CSCMatrix
+ scipy.sparse.csc_matrix
"""
return self.tocoo().tocsc()
- def toarray(self):
+ def toarray(self, order=None, out=None):
"""
- Returns a dense ndarray representation of this matrix.
+ Returns a numpy.ndarray representation of this matrix.
+
+ Parameters
+ ----------
+ order : {'C', 'F'}, optional
+ Whether to store multi-dimensional data in C (row-major)
+ or Fortran (column-major) order in memory. The default
+ is 'None', indicating the NumPy default of C-ordered.
+ Cannot be specified in conjunction with the `out`
+ argument.
+
+ out : ndarray, 2-dimensional, optional
+ If specified, uses this array as the output buffer
+ instead of allocating a new array to return. The provided
+ array must have the same shape and dtype as the sparse
+ matrix on which you are calling the method. For most
+ sparse types, `out` is required to be memory contiguous
+ (either C or Fortran ordered).
Returns
-------
arr : ndarray, 2-dimensional
An array with the same shape and containing the same data
- represented by the block matrix.
+ represented by the BlockMatrix.
"""
- return self.tocoo().toarray()
+ return self.tocoo().toarray(order=order, out=out)
- def todense(self):
+ def _mul_sparse_matrix(self, other):
"""
- Returns a dense matrix representation of this matrix.
+ Perform self * other where other is a block matrix
+
+ Parameters
+ ----------
+ other: BlockMatrix
Returns
-------
- arr : ndarray, 2-dimensional
- An array with the same shape and containing the same data
- represented by the block matrix.
-
+ BlockMatrix
"""
- return np.asmatrix(self.toarray())
-
- def _mul_sparse_matrix(self, other):
- assert other.shape == self.shape, "Dimension mismatch"
-
- if not isinstance(other, BlockMatrix):
- return self.tocsr()._mul_sparse_matrix(other)
+ if isinstance(other, BlockMatrix):
+ assert other.bshape[0] == self.bshape[1], "Dimension mismatch"
+ result = BlockMatrix(self.bshape[0], other.bshape[1])
+
+ # get dimenions from the other matrix
+ other_col_sizes = other.col_block_sizes(copy=False)
+
+ # compute result
+ for i in range(self.bshape[0]):
+ for j in range(other.bshape[1]):
+ accum = coo_matrix((self._brow_lengths[i],
+ other_col_sizes[i]))
+ for k in range(self.bshape[1]):
+ if self._block_mask[i, k] and not other.is_empty_block(k, j):
+ prod = self._blocks[i,k] * other.get_block(k, j)
+ accum = accum + prod
+ result.set_block(i, j, accum)
+ return result
+ elif isspmatrix(other):
+ raise NotImplementedError('BlockMatrix multiply with spmatrix not supported. Multiply a BlockMatrix '
+ 'with another BlockMatrix of compatible dimensions.')
else:
- raise NotImplementedError("Not supported yet")
+ raise NotImplementedError('Operation not supported by BlockMatrix')
- def transpose(self, axes=None, copy=False):
+ def transpose(self, axes=None, copy=True):
"""
- Reverses the dimensions of the block matrix.
+ Creates a transpose copy of the BlockMatrix.
Parameters
----------
axes: None, optional
This argument is in the signature solely for NumPy compatibility reasons. Do not pass in
anything except for the default value.
- copy: bool, optional
- Indicates whether or not attributes of self should be copied whenever possible.
+ copy: bool
+ This argument is in the signature solely for scipy compatibility reasons. Do not pass in
+ anything except for the default value.
Returns
-------
BlockMatrix with dimensions reversed
+
+ """
+ """
+ It is difficult to support transpose without copying. A "TransposeView" object might be a better approach.
"""
if axes is not None:
raise ValueError(("Sparse matrices do not support "
"an 'axes' parameter because swapping "
"dimensions is the only logical permutation."))
+ if not copy:
+ raise ValueError('BlockMatrix only supports transpose with copy=True')
- m = self.bshape[0]
- n = self.bshape[1]
+ m, n = self.bshape
mat = BlockMatrix(n, m)
+ for row in range(m):
+ if self.is_row_size_defined(row):
+ mat.set_col_size(row, self.get_row_size(row))
+ for col in range(n):
+ if self.is_col_size_defined(col):
+ mat.set_row_size(col, self.get_col_size(col))
for i in range(m):
for j in range(n):
if not self.is_empty_block(i, j):
- mat[j, i] = self[i, j].transpose()
+ mat.set_block(j, i, self.get_block(i, j).transpose(copy=True))
return mat
def is_empty_block(self, idx, jdx):
"""
- Indicates if a block is empty
+ Indicates if a block is None
Parameters
----------
@@ -341,385 +551,771 @@ def is_empty_block(self, idx, jdx):
Returns
-------
- boolean
+ bool
"""
return not self._block_mask[idx, jdx]
- def _check_mask(self):
+ def has_undefined_row_sizes(self):
+ """
+ Indicates if the matrix has block-rows with undefined dimensions
- bm, bn = self.bshape
+ Returns
+ -------
+ bool
- empty_rows = []
- for idx in range(bm):
- row_bool = np.logical_not(self._block_mask[idx, :])
- if np.all(row_bool):
- empty_rows.append(idx)
- empty_cols = []
- for jdx in range(bn):
- col_bool = np.logical_not(self._block_mask[:, jdx])
- if np.all(col_bool):
- empty_cols.append(jdx)
+ """
+ return len(self._undefined_brows) != 0
- if len(empty_rows) > 0:
- msg = 'Operation not allowed with None rows. Specify at least one block in rows:\n'
- msg += '{} of BlockMatrix'.format(empty_rows)
- raise RuntimeError(msg)
+ def has_undefined_col_sizes(self):
+ """
+ Indicates if the matrix has block-columns with undefined dimensions
- if len(empty_cols)>0:
- msg = 'Operation not allowed with None columns. Specify at least one block in columns:\n'
- msg += '{} of BlockMatrix'.format(empty_cols)
- raise RuntimeError(msg)
+ Returns
+ -------
+ bool
+
+ """
+ return len(self._undefined_bcols) != 0
- def has_empty_rows(self):
+ def copyfrom(self, other, deep=True):
"""
- Indicates if the matrix has block-rows that are empty
+ Copies entries of other matrix into this matrix. This method provides
+ an easy way to populate a BlockMatrix from scipy.sparse matrices. It also
+ intended to facilitate copying values from other BlockMatrix to this BlockMatrix
+
+ Parameters
+ ----------
+ other: BlockMatrix or scipy.spmatrix
+ deep: bool
+ If deep is True and other is a BlockMatrix, then the blocks in other are copied. If deep is False
+ and other is a BlockMatrix, then the blocks in other are not copied.
Returns
-------
- boolean
+ None
"""
- bm, bn = self.bshape
+ assert_block_structure(self)
+ if isinstance(other, BlockMatrix):
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
- empty_rows = []
- for idx in range(bm):
- row_bool = np.logical_not(self._block_mask[idx, :])
- if np.all(row_bool):
- empty_rows.append(idx)
+ m, n = self.bshape
+ if deep:
+ for i in range(m):
+ for j in range(n):
+ if not other.is_empty_block(i, j):
+ self.set_block(i, j, other.get_block(i, j).copy())
+ else:
+ self.set_block(i, j, None)
+ else:
+ for i in range(m):
+ for j in range(n):
+ self.set_block(i, j, other.get_block(i, j))
+
+ elif isspmatrix(other) or isinstance(other, np.ndarray):
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+ if isinstance(other, np.ndarray):
+ # cast numpy.array to coo_matrix for ease of manipulation
+ m = csr_matrix(other)
+ else:
+ m = other.tocsr()
+
+ # determine offsets for each block
+ row_offsets = np.append(0, np.cumsum(self._brow_lengths))
+ col_offsets = np.append(0, np.cumsum(self._bcol_lengths))
+
+ # maps 'flat' matrix to the block structure of this matrix
+ # csr row slicing is fast
+ # csc column slicing is fast
+ # therefore, we do the row slice once for each row, then we convert to csc for the column slicing
+ for i in range(self.bshape[0]):
+ mm = m[row_offsets[i]:row_offsets[i+1], :].tocsc()
+ for j in range(self.bshape[1]):
+ mmm = mm[:, col_offsets[j]:col_offsets[j+1]]
+
+ if self.is_empty_block(i, j) and mmm.nnz == 0:
+ self.set_block(i, j, None)
+ else:
+ self.set_block(i, j, mmm)
- return len(empty_rows) > 0
+ else:
+ raise NotImplementedError("Format not supported. BlockMatrix can only copy data from another BlockMatrix, "
+ "a numpy array, or a scipy sparse matrix.")
- def has_empty_cols(self):
+ def copyto(self, other, deep=True):
"""
- Indicates if the matrix has block-columns that are empty
+ Copies entries of this BlockMatrix into other. This method provides
+ an easy way to copy values of this matrix into another format.
+
+ Parameters
+ ----------
+ other: BlockMatrix or scipy.spmatrix
+ deep: bool
+ If deep is True and other is a BlockMatrix, then the blocks in this BlockMatrix are copied. If deep is
+ False and other is a BlockMatrix, then the blocks in this BlockMatrix are not copied.
Returns
-------
- boolean
+ None
"""
- bm, bn = self.bshape
+ if isinstance(other, BlockMatrix):
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
- empty_cols = []
- for jdx in range(bn):
- col_bool = np.logical_not(self._block_mask[:, jdx])
- if np.all(col_bool):
- empty_cols.append(jdx)
+ if deep:
+ m, n = self.bshape
+ for i in range(m):
+ for j in range(n):
+ if self.is_empty_block(i, j):
+ other.set_block(i, j, None)
+ else:
+ other.set_block(i, j, self.get_block(i, j).copy())
+ else:
+ m, n = self.bshape
+ for i in range(m):
+ for j in range(n):
+ other.set_block(i, j, self.get_block(i, j))
+ elif isspmatrix(other) or isinstance(other, np.ndarray):
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+
+ # create temporary matrix to copy
+ tmp_matrix = self.tocoo()
+ if isinstance(other, coo_matrix):
+ np.copyto(other.data, tmp_matrix.data)
+ np.copyto(other.row, tmp_matrix.row)
+ np.copyto(other.col, tmp_matrix.col)
+ elif isinstance(other, csr_matrix):
+ tmp_matrix2 = tmp_matrix.tocsr()
+ np.copyto(other.data, tmp_matrix2.data)
+ np.copyto(other.indices, tmp_matrix2.indices)
+ np.copyto(other.indptr, tmp_matrix2.indptr)
+ elif isinstance(other, csc_matrix):
+ tmp_matrix2 = tmp_matrix.tocsc()
+ np.copyto(other.data, tmp_matrix2.data)
+ np.copyto(other.indices, tmp_matrix2.indices)
+ np.copyto(other.indptr, tmp_matrix2.indptr)
+ elif isinstance(other, np.ndarray):
+ np.copyto(other, tmp_matrix.toarray())
+ else:
+ raise NotImplementedError("Format not supported. BlockMatrix can only copy data to another BlockMatrix, "
+ "a numpy array, or a scipy sparse coo, csr, or csc matrix.")
+ else:
+ raise NotImplementedError("Format not supported. BlockMatrix can only copy data to another BlockMatrix, "
+ "a numpy array, or a scipy sparse coo, csr, or csc matrix.")
- return len(empty_cols) > 0
+ def copy(self, deep=True):
+ """
+ Makes a copy of this BlockMatrix
+
+ Parameters
+ ----------
+ deep: bool
+ If deep is True, then the blocks in this BlockMatrix are copied
+
+ Returns
+ -------
+ BlockMatrix
+
+ """
+ result = BlockMatrix(self.bshape[0], self.bshape[1])
+ ii, jj = np.nonzero(self._block_mask)
+ if deep:
+ for i, j in zip(ii, jj):
+ result.set_block(i, j, self._blocks[i, j].copy())
+ else:
+ for i, j in zip(ii, jj):
+ result.set_block(i, j, self._blocks[i, j])
+ return result
+
+ def copy_structure(self):
+ """
+ Makes a copy of the structure of this BlockMatrix. This proivides a
+ light-weighted copy of each block in this BlockMatrix. The blocks in the
+ resulting matrix have the same shape as in the original matrices but not
+ the same number of nonzeros.
+
+ Returns
+ -------
+ BlockMatrix
+
+ """
+ m, n = self.bshape
+ result = BlockMatrix(m, n)
+ for row in range(m):
+ if self.is_row_size_defined(row):
+ result.set_row_size(row, self.get_row_size(row))
+ for col in range(n):
+ if self.is_col_size_defined(col):
+ result.set_col_size(col, self.get_col_size(col))
+ ii, jj = np.nonzero(self._block_mask)
+ for i, j in zip(ii, jj):
+ if isinstance(self._blocks[i, j], BlockMatrix):
+ result.set_block(i, j, self._blocks[i, j].copy_structure())
+ else:
+ nrows, ncols = self._blocks[i, j].shape
+ result.set_block(i, j, coo_matrix((nrows, ncols)))
+ return result
def __repr__(self):
- return '{}{}'.format(self.__class__.__name__, self.shape)
+ return '{}{}'.format(self.__class__.__name__, self.bshape)
- def __str__(self):
+ def _print(self, indent):
msg = ''
for idx in range(self.bshape[0]):
for jdx in range(self.bshape[1]):
- repn = self._blocks[idx, jdx].__repr__() if self._block_mask[idx, jdx] else None
- msg += '({}, {}): {}\n'.format(idx, jdx, repn)
+ if self.is_empty_block(idx, jdx):
+ msg += indent + str((idx, jdx)) + ': ' + str(None) + '\n'
+ else:
+ block = self.get_block(idx, jdx)
+ if isinstance(block, BlockMatrix):
+ msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n'
+ msg += block._print(indent=indent+' ')
+ else:
+ msg += indent + str((idx, jdx)) + ': ' + block.__class__.__name__ + str(block.shape) + '\n'
return msg
- def __getitem__(self, item):
- if isinstance(item, slice) or isinstance(item, tuple):
- idx, jdx = item
- assert idx >= 0 and jdx >= 0, 'indices must be positive'
- return self._blocks[item]
- else:
- raise RuntimeError('Wrong index')
+ def __str__(self):
+ return self._print(indent='')
- def __setitem__(self, key, value):
+ def get_block(self, row, col):
+ assert row >= 0 and col >= 0, 'indices must be positive'
+ assert row < self.bshape[0] and \
+ col < self.bshape[1], 'Indices out of range'
+ return self._blocks[row, col]
- if isinstance(key, slice):
- raise NotImplementedError('slices not supported for BlockMatrix')
+ def set_block(self, row, col, value):
+ assert row >= 0 and col >= 0, 'Indices must be positive'
- if not isinstance(key, tuple):
- raise RuntimeError('Wrong index')
+ assert row < self.bshape[0] and col < self.bshape[1], 'Indices out of range'
- idx, jdx = key
- assert idx >= 0 and jdx >= 0, 'indices must be positive'
- assert idx < self.bshape[0] and jdx < self.bshape[1], 'indices out of range'
if value is None:
- self._blocks[idx, jdx] = None
- self._block_mask[idx, jdx] = False
- all_none_rows = True
- for blk in self._blocks[:, jdx]:
- if blk is not None:
- all_none_rows = False
- break
-
- all_none_cols = True
- for blk in self._blocks[idx, :]:
- if blk is not None:
- all_none_cols = False
- break
-
- if all_none_cols:
- self._brow_lengths[idx] = 0
- if all_none_rows:
- self._bcol_lengths[jdx] = 0
+ self._blocks[row, col] = None
+ self._block_mask[row, col] = False
else:
- msg = 'blocks need to be sparse matrices'
- assert isinstance(value, BlockMatrix) or isspmatrix(value), msg
- if self._brow_lengths[idx] == 0 and self._bcol_lengths[jdx] == 0:
- self._blocks[idx, jdx] = value
- self._brow_lengths[idx] = value.shape[0]
- self._bcol_lengths[jdx] = value.shape[1]
- self._block_mask[idx, jdx] = True
- elif self._brow_lengths[idx] != 0 and self._bcol_lengths[jdx] == 0:
- if self._brow_lengths[idx] != value.shape[0]:
- msg = ('Incompatible row dimensions for block ({i},{j}) '
- 'Got {got}, '
- 'expected {exp}.'.format(i=idx, j=jdx,
- exp=self._brow_lengths[idx],
- got=value.shape[0]))
- raise RuntimeError(msg)
- self._blocks[idx, jdx] = value
- self._block_mask[idx, jdx] = True
- self._bcol_lengths[jdx] = value.shape[1]
- elif self._brow_lengths[idx] == 0 and self._bcol_lengths[jdx] != 0:
- if self._bcol_lengths[jdx] != value.shape[1]:
- msg = ('Incompatible col dimensions for block ({i},{j}) '
- 'Got {got}, '
- 'expected {exp}.'.format(i=idx, j=jdx,
- exp=self._bcol_lengths[jdx],
- got=value.shape[1]))
- raise RuntimeError(msg)
- self._blocks[idx, jdx] = value
- self._block_mask[idx, jdx] = True
- self._brow_lengths[idx] = value.shape[0]
+ if isinstance(value, BaseBlockMatrix):
+ assert_block_structure(value)
+ elif isinstance(value, np.ndarray):
+ if value.ndim != 2:
+ msg = 'blocks need to be sparse matrices or BlockMatrices'
+ raise ValueError(msg)
+ msg = 'blocks need to be sparse matrices or BlockMatrices; a numpy array was given; copying the numpy array to a coo_matrix'
+ logger.warning(msg)
+ warnings.warn(msg)
+ value = coo_matrix(value)
else:
- if self._brow_lengths[idx] != value.shape[0]:
- msg = ('Incompatible row dimensions for block ({i},{j}) '
- 'Got {got}, '
- 'expected {exp}.'.format(i=idx, j=jdx,
- exp=self._brow_lengths[idx],
- got=value.shape[0]))
- raise RuntimeError(msg)
- if self._bcol_lengths[jdx] != value.shape[1]:
- msg = ('Incompatible col dimensions for block ({i},{j}) '
- 'Got {got}, '
- 'expected {exp}.'.format(i=idx, j=jdx,
- exp=self._bcol_lengths[jdx],
- got=value.shape[1]))
- raise RuntimeError(msg)
- self._blocks[idx, jdx] = value
- self._block_mask[idx, jdx] = True
+ assert isspmatrix(value), 'blocks need to be sparse matrices or BlockMatrices'
+
+ nrows, ncols = value.shape
+ self.set_row_size(row, nrows)
+ self.set_col_size(col, ncols)
+ self._blocks[row, col] = value
+ self._block_mask[row, col] = True
+
+ def __getitem__(self, item):
+ raise NotImplementedError('BlockMatrix does not support __getitem__. '
+ 'Use get_block or set_block to access sub-blocks.')
+
+ def __setitem__(self, item, val):
+ raise NotImplementedError('BlockMatrix does not support __setitem__. '
+ 'Use get_block or set_block to access sub-blocks.')
def __add__(self, other):
- self._check_mask()
+ assert_block_structure(self)
result = BlockMatrix(self.bshape[0], self.bshape[1])
- m, n = self.bshape
- assert other.shape == self.shape, \
- 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+
if isinstance(other, BlockMatrix):
assert other.bshape == self.bshape, \
'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
- other._check_mask()
- for i in range(m):
- for j in range(n):
- if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
- result[i, j] = self._blocks[i, j] + other[i, j]
- elif not self.is_empty_block(i, j) and other.is_empty_block(i, j):
- result[i, j] = self._blocks[i, j]
- elif self.is_empty_block(i, j) and not other.is_empty_block(i, j):
- result[i, j] = other[i, j]
- else:
- result[i, j] = None
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+ assert_block_structure(other)
+
+ iterator = set(zip(*np.nonzero(self._block_mask)))
+ iterator.update(zip(*np.nonzero(other._block_mask)))
+ for i, j in iterator:
+ if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
+ result.set_block(i, j, self._blocks[i, j] + other.get_block(i, j))
+ elif not self.is_empty_block(i, j):
+ result.set_block(i, j, self._blocks[i, j].copy())
+ elif not other.is_empty_block(i, j):
+ result.set_block(i, j, other.get_block(i, j).copy())
return result
elif isspmatrix(other):
- raise NotImplementedError('Sparse Matrix with BlockMatrix addition not supported')
- elif np.isscalar(other):
- raise NotImplementedError('Scalar with BlockMatrix addition not supported')
+ # Note: this is not efficient but is just for flexibility.
+ mat = self.copy_structure()
+ mat.copyfrom(other)
+ return self.__add__(mat)
else:
- raise NotImplementedError('input not recognized for addition')
+ if other.__class__.__name__ == 'MPIBlockMatrix':
+ raise RuntimeError('Operation not supported by BlockMatrix')
+
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __radd__(self, other):
return self.__add__(other)
def __sub__(self, other):
- self._check_mask()
+ assert_block_structure(self)
result = BlockMatrix(self.bshape[0], self.bshape[1])
- m, n = self.bshape
- assert other.shape == self.shape, \
- 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+
if isinstance(other, BlockMatrix):
assert other.bshape == self.bshape, \
'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
- other._check_mask()
- for i in range(m):
- for j in range(n):
- if self._block_mask[i, j] and other._block_mask[i, j]:
- result[i, j] = self._blocks[i, j] - other[i, j]
- elif self._block_mask[i, j] and not other._block_mask[i, j]:
- result[i, j] = self._blocks[i, j]
- elif not self._block_mask[i, j] and other._block_mask[i, j]:
- result[i, j] = -other[i, j]
- else:
- result[i, j] = None
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+ assert_block_structure(other)
+ iterator = set(zip(*np.nonzero(self._block_mask)))
+ iterator.update(zip(*np.nonzero(other._block_mask)))
+ for i, j in iterator:
+ if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
+ result.set_block(i, j, self._blocks[i, j] - other.get_block(i, j))
+ elif not self.is_empty_block(i, j):
+ result.set_block(i, j, self._blocks[i, j].copy())
+ elif not other.is_empty_block(i, j):
+ result.set_block(i, j, -other.get_block(i, j))
return result
elif isspmatrix(other):
- raise NotImplementedError('Sparse Matrix with BlockMatrix subtraction not supported')
- elif np.isscalar(other):
- raise NotImplementedError('Scalar with BlockMatrix subtraction not supported')
+ # Note: this is not efficient but is just for flexibility.
+ mat = self.copy_structure()
+ mat.copyfrom(other)
+ return self.__sub__(mat)
else:
- raise NotImplementedError('input not recognized for subtraction')
+ if other.__class__.__name__ == 'MPIBlockMatrix':
+ raise RuntimeError('Operation not supported by BlockMatrix')
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __rsub__(self, other):
- self._check_mask()
+ assert_block_structure(self)
result = BlockMatrix(self.bshape[0], self.bshape[1])
- m, n = self.bshape
- assert other.shape == self.shape, \
- 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
- if isinstance(other, BlockMatrix):
- assert other.bshape == self.bshape, \
- 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
- other._check_mask()
- for i in range(m):
- for j in range(n):
- if self._block_mask[i, j] and other._block_mask[i, j]:
- result[i, j] = other[i, j] - self._blocks[i, j]
- elif self._block_mask[i, j] and not other._block_mask[i, j]:
- result[i, j] = -self._blocks[i, j]
- elif not self._block_mask[i, j] and other._block_mask[i, j]:
- result[i, j] = other[i, j]
- else:
- result[i, j] = None
- return result
- elif isspmatrix(other):
- raise NotImplementedError('Sparse Matrix with BlockMatrix subtraction not supported')
- elif np.isscalar(other):
- raise NotImplementedError('Scalar with BlockMatrix subtraction not supported')
+ if isspmatrix(other):
+ # Note: this is not efficient but is just for flexibility.
+ mat = self.copy_structure()
+ mat.copyfrom(other)
+ return mat - self
else:
- raise NotImplementedError('input not recognized for subtraction')
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __mul__(self, other):
- self._check_mask()
+ """
+ When doing A*B with numpy arrays, element-by-element multiplication is done. However, when doing
+ A*B with scipy sparse matrices, a matrix-matrix dot product is performed. We are following the
+ scipy sparse matrix API.
+ """
+
bm, bn = self.bshape
if np.isscalar(other):
result = BlockMatrix(bm, bn)
ii, jj = np.nonzero(self._block_mask)
for i, j in zip(ii, jj):
- scaled = self._blocks[i, j] * other
- result[i, j] = scaled
+ result.set_block(i, j, self._blocks[i, j] * other)
return result
elif isinstance(other, BlockVector):
assert bn == other.bshape[0], 'Dimension mismatch'
assert self.shape[1] == other.shape[0], 'Dimension mismatch'
- other._check_mask()
+ assert not other.has_none, 'Block vector must not have none entries'
+ assert_block_structure(self)
+
nblocks = self.bshape[0]
result = BlockVector(nblocks)
for i in range(bm):
- result[i] = np.zeros(self._brow_lengths[i])
+ result.set_block(i, np.zeros(self._brow_lengths[i]))
for j in range(bn):
- x = other[j] # this flattens block vectors that are within block vectors
if not self.is_empty_block(i, j):
+ x = other.get_block(j)
A = self._blocks[i, j]
- result[i] += A * x
+ blk = result.get_block(i)
+ _tmp = A*x
+ _tmp += blk
+ result.set_block(i, _tmp)
return result
elif isinstance(other, np.ndarray):
- assert self.shape[1] == other.shape[0], 'Dimension mismatch {}!={}'.format(self.shape[1],
- other.shape[0])
+ if other.ndim != 1:
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
+ assert self.shape[1] == other.shape[0], \
+ 'Dimension mismatch {}!={}'.format(self.shape[1],
+ other.shape[0])
+ assert_block_structure(self)
+
nblocks = self.bshape[0]
result = BlockVector(nblocks)
for i in range(bm):
- result[i] = np.zeros(self._brow_lengths[i])
+ result.set_block(i, np.zeros(self._brow_lengths[i]))
counter = 0
for j in range(bn):
if not self.is_empty_block(i, j):
A = self._blocks[i, j]
x = other[counter: counter + A.shape[1]]
- result[i] += A * x
+ blk = result.get_block(i)
+ blk += A * x
counter += A.shape[0]
return result
elif isinstance(other, BlockMatrix) or isspmatrix(other):
+ assert_block_structure(self)
return self._mul_sparse_matrix(other)
else:
raise NotImplementedError('input not recognized for multiplication')
+ def __truediv__(self, other):
+ bm, bn = self.bshape
+ if np.isscalar(other):
+ result = BlockMatrix(bm, bn)
+ ii, jj = np.nonzero(self._block_mask)
+ for i, j in zip(ii, jj):
+ result.set_block(i, j, self._blocks[i, j] / other)
+ return result
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
+ def __rtruediv__(self, other):
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
def __rmul__(self, other):
- self._check_mask()
+ """
+ When doing A*B with numpy arrays, element-by-element multiplication is done. However, when doing
+ A*B with scipy sparse matrices, a matrix-matrix dot product is performed. We are following the
+ scipy sparse matrix API.
+ """
bm, bn = self.bshape
if np.isscalar(other):
result = BlockMatrix(bm, bn)
ii, jj = np.nonzero(self._block_mask)
for i, j in zip(ii, jj):
- scaled = self._blocks[i, j] * other
- result[i, j] = scaled
+ result.set_block(i, j, self._blocks[i, j] * other)
return result
+ elif isspmatrix(other):
+ raise NotImplementedError('sparse matrix times block matrix is not supported.')
else:
- raise NotImplementedError('Not implemented yet')
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
+ def __pow__(self, other):
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
+ def __abs__(self):
+ res = BlockMatrix(*self.bshape)
+ ii, jj = np.nonzero(self._block_mask)
+ for i, j in zip(ii, jj):
+ res.set_block(i, j, abs(self._blocks[i, j]))
+ return res
def __iadd__(self, other):
- raise NotImplementedError('implicit add not supported for BlockMatrix')
+
+ if isinstance(other, BlockMatrix):
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+
+ iterator = set(zip(*np.nonzero(self._block_mask)))
+ iterator.update(zip(*np.nonzero(other._block_mask)))
+ for i, j in iterator:
+ if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
+ self._blocks[i, j] += other.get_block(i, j)
+ elif not other.is_empty_block(i, j):
+ self.set_block(i, j, other.get_block(i, j).copy())
+ return self
+ elif isspmatrix(other):
+ # Note: this is not efficient but is just for flexibility.
+ mat = self.copy_structure()
+ mat.copyfrom(other)
+ return self.__iadd__(mat)
+ else:
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __isub__(self, other):
- raise NotImplementedError('implicit sub not supported for BlockMatrix')
+
+ if isinstance(other, BlockMatrix):
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+ assert other.shape == self.shape, \
+ 'dimensions mismatch {} != {}'.format(self.shape, other.shape)
+
+ iterator = set(zip(*np.nonzero(self._block_mask)))
+ iterator.update(zip(*np.nonzero(other._block_mask)))
+ for i, j in iterator:
+ if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
+ self._blocks[i, j] -= other.get_block(i, j)
+ elif not other.is_empty_block(i, j):
+ self.set_block(i, j, -other.get_block(i, j)) # the copy happens in __neg__ of other.get_block(i, j)
+ return self
+ elif isspmatrix(other):
+ # Note: this is not efficient but is just for flexibility.
+ mat = self.copy_structure()
+ mat.copyfrom(other)
+ return self.__isub__(mat)
+ else:
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __imul__(self, other):
- self._check_mask()
if np.isscalar(other):
ii, jj = np.nonzero(self._block_mask)
for i, j in zip(ii, jj):
- self._blocks[i, j] = self._blocks[i, j] * other
+ self._blocks[i, j] *= other
return self
- raise NotImplementedError('only scalar support for implicit multiplication')
+ raise NotImplementedError('Operation not supported by BlockMatrix')
def __itruediv__(self, other):
- raise NotImplementedError('implicit divide not supported yet')
+ if np.isscalar(other):
+ ii, jj = np.nonzero(self._block_mask)
+ for i, j in zip(ii, jj):
+ self._blocks[i, j] /= other
+ return self
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+ def __div__(self, other):
+ return self.__truediv__(other)
-class BlockSymMatrix(BlockMatrix):
+ def __rdiv__(self, other):
+ return self.__rtruediv__(other)
- def __init__(self, nrowcols):
+ def __idiv__(self, other):
+ return self.__itruediv__(other)
- super(BlockSymMatrix, self).__init__(nrowcols, nrowcols)
- self._symmetric = True
+ def __ifloordiv__(self, other):
+ raise NotImplementedError('Operation not supported by BlockMatrix')
- def __repr__(self):
- return '{}{}'.format(self.__class__.__name__, self.shape)
+ def __neg__(self):
+ res = BlockMatrix(*self.bshape)
+ ii, jj = np.nonzero(self._block_mask)
+ for i, j in zip(ii, jj):
+ res.set_block(i, j, -self._blocks[i, j])
+ return res
- def __str__(self):
- msg = ''
- for idx in range(self.bshape[0]):
- for jdx in range(self.bshape[1]):
- if idx >= jdx:
- repn = self._blocks[idx, jdx].__repr__() if self._block_mask[idx, jdx] else None
- msg += '({}, {}): {}\n'.format(idx, jdx, repn)
- return msg
+ def _comparison_helper(self, operation, other):
+ result = BlockMatrix(self.bshape[0], self.bshape[1])
- def __getitem__(self, item):
+ if isinstance(other, BlockMatrix) and other.bshape == self.bshape:
+ m, n = self.bshape
+ for i in range(m):
+ for j in range(n):
+ if not self.is_empty_block(i, j) and not other.is_empty_block(i, j):
+ result.set_block(i, j, operation(self._blocks[i, j], other.get_block(i, j)))
+ else:
+ nrows = self._brow_lengths[i]
+ ncols = self._bcol_lengths[j]
+ mat = coo_matrix((nrows, ncols))
+ if not self.is_empty_block(i, j):
+ result.set_block(i, j, operation(self._blocks[i, j], mat))
+ elif not other.is_empty_block(i, j):
+ result.set_block(i, j, operation(mat, other.get_block(i, j)))
+ else:
+ result.set_block(i, j, operation(mat, mat))
+ return result
+ elif isinstance(other, BlockMatrix) or isspmatrix(other):
+ if isinstance(other, BlockMatrix):
+ raise NotImplementedError('Operation supported with same block structure only')
+ else:
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+ elif np.isscalar(other):
+ m, n = self.bshape
+ for i in range(m):
+ for j in range(n):
+ if not self.is_empty_block(i, j):
+ result.set_block(i, j, operation(self._blocks[i, j], other))
+ else:
+ nrows = self._brow_lengths[i]
+ ncols = self._bcol_lengths[j]
+ matc = coo_matrix((nrows, ncols))
+ result.set_block(i, j, operation(matc, other))
+ return result
+ else:
+ if other.__class__.__name__ == 'MPIBlockMatrix':
+ raise RuntimeError('Operation not supported by BlockMatrix')
+ raise NotImplementedError('Operation not supported by BlockMatrix')
- if isinstance(item, slice):
- raise NotImplementedError
+ def __eq__(self, other):
+ return self._comparison_helper(operation=operator.eq, other=other)
- if isinstance(item, tuple):
- idx, jdx = item
- assert idx >= 0 and jdx >= 0, 'indices must be positive'
- return self._blocks[item]
- else:
- raise RuntimeError('Wrong index: need a tuple')
+ def __ne__(self, other):
+ return self._comparison_helper(operation=operator.ne, other=other)
+
+ def __le__(self, other):
+ return self._comparison_helper(operation=operator.le, other=other)
+
+ def __lt__(self, other):
+ return self._comparison_helper(operation=operator.lt, other=other)
+
+ def __ge__(self, other):
+ return self._comparison_helper(operation=operator.ge, other=other)
+
+ def __gt__(self, other):
+ return self._comparison_helper(operation=operator.gt, other=other)
+
+ def __len__(self):
+ raise NotImplementedError('Operation not supported by BlockMatrix')
+
+ def __matmul__(self, other):
+ return self.__mul__(other)
+
+ def __rmatmul__(self, other):
+ return self.__rmul__(other)
- def __setitem__(self, key, value):
+ def pprint(self):
+ """Prints BlockMatrix in pretty format"""
+ print(str(self))
- if isinstance(key, slice):
- raise NotImplementedError
+ def get_block_column_index(self, index):
+ """
+ Returns block-column idx from matrix column index.
+
+ Parameters
+ ----------
+ index: int
+ Column index
+
+ Returns
+ -------
+ int
+
+ """
+ msgc = 'Operation not allowed with None columns. ' \
+ 'Specify at least one block in every column'
+ assert not self.has_undefined_col_sizes(), msgc
- if not isinstance(key, tuple):
- raise RuntimeError('Wrong index: need a tuple')
+ bm, bn = self.bshape
+ # get cummulative sum of block sizes
+ cum = self._bcol_lengths.cumsum()
+ assert index >= 0, 'index out of bounds'
+ assert index < cum[bn-1], 'index out of bounds'
+
+ # exits if only has one column
+ if bn <= 1:
+ return 0
- idx, jdx = key
+ ge = cum >= index
+ # find first entry that is greater or equal
+ block_index = np.argmax(ge)
- assert idx >= 0 and jdx >= 0, 'indices must be positive'
- assert idx >= jdx, 'symmetric block matrices only set lower triangular entries idx >= jdx'
- if idx == jdx:
- assert is_symmetric_sparse(value), 'Matrix is not symmetric'
- super(BlockSymMatrix, self).__setitem__(key, value)
- super(BlockSymMatrix, self).__setitem__((jdx, idx), value.transpose())
+ if cum[block_index] == index:
+ return block_index + 1
+ return block_index
+ def get_block_row_index(self, index):
+ """
+ Returns block-row idx from matrix row index.
+ Parameters
+ ----------
+ index: int
+ Row index
+ Returns
+ -------
+ int
+
+ """
+ msgr = 'Operation not allowed with None rows. ' \
+ 'Specify at least one block in every row'
+ assert not self.has_undefined_row_sizes(), msgr
+
+ bm, bn = self.bshape
+ # get cummulative sum of block sizes
+ cum = self._brow_lengths.cumsum()
+ assert index >=0, 'index out of bounds'
+ assert index < cum[bm-1], 'index out of bounds'
+
+ # exits if only has one column
+ if bm <= 1:
+ return 0
+
+ ge = cum >= index
+ # find first entry that is greater or equal
+ block_index = np.argmax(ge)
+
+ if cum[block_index] == index:
+ return block_index + 1
+ return block_index
+
+ def getcol(self, j):
+ """
+ Returns vector of column j
+
+ Parameters
+ ----------
+ j: int
+ Column index
+
+ Returns
+ -------
+ pyomo.contrib.pynumero.sparse BlockVector
+
+ """
+ # Note: this method is slightly different than the sparse_matrix
+ # from scipy. It returns an array always instead of returning
+ # an sparse matrix with a single column
+
+ # get block column index
+ bcol = self.get_block_column_index(j)
+ bm, bn = self.bshape
+
+ # compute offset columns
+ offset = 0
+ if bcol > 0:
+ cum_sum = self._bcol_lengths.cumsum()
+ offset = cum_sum[bcol-1]
+
+ # build block vector
+ result = BlockVector(bm)
+ for i in range(bm):
+ mat = self.get_block(i, bcol)
+ if self.is_empty_block(i, bcol):
+ v = np.zeros(self._brow_lengths[i])
+ elif isinstance(mat, BaseBlockMatrix):
+ # this will return a block vector
+ v = mat.getcol(j-offset)
+ else:
+ # if it is sparse matrix transform array to vector
+ v = mat.getcol(j-offset).toarray().flatten()
+ result.set_block(i, v)
+ return result
+
+ def getrow(self, i):
+ """
+ Returns vector of column i
+
+ Parameters
+ ----------
+ i: int
+ Row index
+
+ Returns
+ -------
+ pyomo.contrib.pynumero.sparse BlockVector
+
+ """
+ # Note: this method is slightly different than the sparse_matrix
+ # from scipy. It returns an array always instead of returning
+ # an sparse matrix with a single row
+
+ # get block column index
+ brow = self.get_block_row_index(i)
+ bm, bn = self.bshape
+
+ # compute offset columns
+ offset = 0
+ if brow > 0:
+ cum_sum = self._brow_lengths.cumsum()
+ offset = cum_sum[brow-1]
+
+ # build block vector
+ result = BlockVector(bn)
+ for j in range(bn):
+ mat = self.get_block(brow, j)
+ if self.is_empty_block(brow, j):
+ v = np.zeros(self._bcol_lengths[j])
+ elif isinstance(mat, BaseBlockMatrix):
+ # this will return a block vector
+ v = mat.getcol(i-offset)
+ else:
+ # if it is sparse matrix transform array to vector
+ v = mat.getcol(i-offset).toarray().flatten()
+ result.set_block(j, v)
+ return result
diff --git a/pyomo/contrib/pynumero/sparse/block_vector.py b/pyomo/contrib/pynumero/sparse/block_vector.py
index d8ef02a7e61..410c51f97aa 100644
--- a/pyomo/contrib/pynumero/sparse/block_vector.py
+++ b/pyomo/contrib/pynumero/sparse/block_vector.py
@@ -20,73 +20,90 @@
.. rubric:: Contents
"""
-import numpy as np
-import copy as cp
-__all__ = ['BlockVector']
+import operator
+from ..dependencies import numpy as np
+from .base_block import BaseBlockVector
-class BlockVector(np.ndarray):
+__all__ = ['BlockVector', 'NotFullyDefinedBlockVectorError']
+
+
+class NotFullyDefinedBlockVectorError(Exception):
+ pass
+
+
+def assert_block_structure(vec):
+ if vec.has_none:
+ msg = 'Operation not allowed with None blocks.'
+ raise NotFullyDefinedBlockVectorError(msg)
+
+
+class BlockVector(np.ndarray, BaseBlockVector):
"""
- Structured Vector interface
+ Structured vector interface. This interface can be used to
+ performe operations on vectors composed by vectors. For example,
+
+ bv = BlockVector(3)
+ bv.set_block(0, v0)
+ bv.set_block(1, v1)
+ bv.set_block(2, v2)
+
+ where vi are numpy.ndarrays or BlockVectors.
+
+ Attributes
+ ----------
+ _nblocks: int
+ number of blocks
+ _brow_lengths: numpy.ndarray
+ 1D-Array of size nblocks that specifies the length of each entry
+ in the block vector
+ _undefined_brows: set
+ A set of block indices for which the blocks are still None (i.e., the dimensions
+ have not yet ben set). Operations with BlockVectors require all entries to be
+ different than None.
Parameters
- -------------------
- vectors: int or list of 1d-arrays
- number of blocks contained in the block vector
- if a list is passed the block vector is initialized from
- the list of 1d-arrays
+ ----------
+ nblocks: int
+ The number of blocks in the BlockVector
"""
- def __new__(cls, vectors):
-
- if isinstance(vectors, int):
- blocks = [None for i in range(vectors)]
- block_mask = np.zeros(vectors, dtype=bool)
- brow_lengths = np.zeros(vectors, dtype=np.int64)
- arr = np.asarray(blocks, dtype='object')
- obj = arr.view(cls)
- obj._brow_lengths = np.array(brow_lengths, dtype=np.int64)
- obj._block_mask = block_mask
- obj._nblocks = len(brow_lengths)
- obj._has_none = True
- return obj
- elif isinstance(vectors, list):
- nblocks = len(vectors)
- blocks = [None for i in range(nblocks)]
- block_mask = np.zeros(nblocks, dtype=bool)
- brow_lengths = np.zeros(nblocks, dtype=np.int64)
- arr = np.asarray(blocks, dtype='object')
- obj = arr.view(cls)
- obj._brow_lengths = np.array(brow_lengths, dtype=np.int64)
- obj._block_mask = block_mask
- obj._nblocks = len(brow_lengths)
- obj._has_none = True
- for idx, blk in enumerate(vectors):
- obj[idx] = blk
- return obj
- else:
- raise RuntimeError('Vectors must be a list of an integer')
+ def __new__(cls, nblocks):
+ blocks = [None for i in range(nblocks)]
+ arr = np.asarray(blocks, dtype='object')
+ obj = arr.view(cls)
+ obj._nblocks = nblocks
- def __array_finalize__(self, obj):
+ obj._brow_lengths = np.empty(nblocks, dtype=np.float64)
+ obj._brow_lengths.fill(np.nan)
+ obj._undefined_brows = set(range(nblocks))
+
+ return obj
+
+ def __init__(self, nblocks):
+ pass
+ def __array_finalize__(self, obj):
+ """This method is required to subclass from numpy array"""
if obj is None:
return
self._brow_lengths = getattr(obj, '_brow_lengths', None)
self._nblocks = getattr(obj, '_nblocks', 0)
- self._found_none = getattr(obj, '_has_none', True)
+ self._undefined_brows = getattr(obj, '_undefined_brows', None)
def __array_prepare__(self, out_arr, context=None):
+ """This method is required to subclass from numpy array"""
return super(BlockVector, self).__array_prepare__(self, out_arr, context)
def __array_wrap__(self, out_arr, context=None):
+ """This method is required to subclass from numpy array"""
return super(BlockVector, self).__array_wrap__(self, out_arr, context)
def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
-
- # Note: this for now just flatten the inputs and call super
-
+ """Runs ufuncs speciallizations to BlockVector"""
+ # functions that take one vector
unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil,
np.floor, np.tan, np.arctan, np.arcsin,
np.arccos, np.sinh, np.cosh, np.abs,
@@ -96,8 +113,9 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
np.logical_not, np.expm1, np.exp2, np.sign,
np.rint, np.square, np.positive, np.negative,
np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal,
- ]
+ np.signbit]
+ # functions that take two vectors
binary_funcs = [np.add, np.multiply, np.divide, np.subtract,
np.greater, np.greater_equal, np.less, np.less_equal,
np.not_equal, np.maximum, np.minimum, np.fmax,
@@ -105,26 +123,11 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
np.logical_or, np.logical_xor, np.logaddexp,
np.logaddexp2, np.remainder, np.heaviside,
np.hypot]
- # args = []
- # for i, input_ in enumerate(inputs):
- # if isinstance(input_, BlockVector):
- # args.append(input_.flatten())
- # else:
- # args.append(input_)
- args = [input_ for i, input_ in enumerate(inputs)]
+ args = [input_ for i, input_ in enumerate(inputs)]
outputs = kwargs.pop('out', None)
- out_no = []
- if outputs:
- out_args = []
- for j, output in enumerate(outputs):
- if isinstance(output, BlockVector):
- raise NotImplementedError(str(ufunc))
- else:
- out_args.append(output)
- kwargs['out'] = tuple(out_args)
- else:
- outputs = (None,) * ufunc.nout
+ if outputs is not None:
+ raise NotImplementedError(str(ufunc) + ' cannot be used with BlockVector if the out keyword argument is given.')
if ufunc in unary_funcs:
results = self._unary_operation(ufunc, method, *args, **kwargs)
@@ -134,93 +137,89 @@ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
return results
else:
raise NotImplementedError(str(ufunc) + "not supported for BlockVector")
- #results = super(BlockVector, self).__array_ufunc__(ufunc, method,
- # *args, **kwargs)
- # if results is NotImplemented:
- # return NotImplemented
- #
- # if method == 'at':
- # raise NotImplementedError()
- #
- # if ufunc.nout == 1:
- # results = (results,)
- #
- # return results
def _unary_operation(self, ufunc, method, *args, **kwargs):
+ """Run recursion to perform unary_funcs on BlockVector"""
# ToDo: deal with out
x = args[0]
if isinstance(x, BlockVector):
v = BlockVector(x.nblocks)
for i in range(x.nblocks):
- _args = [x[i]] + [args[j] for j in range(1, len(args))]
- v[i] = self._unary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x.get_block(i)] + [args[j] for j in range(1, len(args))]
+ v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs))
return v
- elif isinstance(x, np.ndarray):
+ elif type(x) == np.ndarray:
return super(BlockVector, self).__array_ufunc__(ufunc, method,
*args, **kwargs)
else:
raise NotImplementedError()
def _binary_operation(self, ufunc, method, *args, **kwargs):
+ """Run recursion to perform binary_funcs on BlockVector"""
# ToDo: deal with out
x1 = args[0]
x2 = args[1]
if isinstance(x1, BlockVector) and isinstance(x2, BlockVector):
- assert not x1.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert not x2.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert x1.nblocks == x2.nblocks, 'Operation on BlockVectors need the same number of blocks on each operand'
- assert x1.size == x2.size, 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
+ assert_block_structure(x1)
+ assert_block_structure(x2)
+ assert x1.nblocks == x2.nblocks, \
+ 'Operation on BlockVectors need the same number of blocks on each operand'
+ assert x1.size == x2.size, \
+ 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
res = BlockVector(x1.nblocks)
for i in range(x1.nblocks):
- _args = [x1[i]] + [x2[i]] + [args[j] for j in range(2, len(args))]
- res[i] = self._binary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x1.get_block(i)] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
return res
- elif isinstance(x1, np.ndarray) and isinstance(x2, BlockVector):
- assert not x2.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert x1.size == x2.size, 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
+ elif type(x1)==np.ndarray and isinstance(x2, BlockVector):
+ assert_block_structure(x2)
+ assert x1.size == x2.size, \
+ 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
res = BlockVector(x2.nblocks)
accum = 0
for i in range(x2.nblocks):
nelements = x2._brow_lengths[i]
- _args = [x1[accum: accum + nelements]] + [x2[i]] + [args[j] for j in range(2, len(args))]
- res[i] = self._binary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x1[accum: accum + nelements]] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
accum += nelements
return res
- elif isinstance(x2, np.ndarray) and isinstance(x1, BlockVector):
- assert not x1.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert x1.size == x2.size, 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
+ elif type(x2)==np.ndarray and isinstance(x1, BlockVector):
+ assert_block_structure(x1)
+ assert x1.size == x2.size, \
+ 'Dimension missmatch {}!={}'.format(x1.size, x2.size)
res = BlockVector(x1.nblocks)
accum = 0
for i in range(x1.nblocks):
nelements = x1._brow_lengths[i]
- _args = [x1[i]] + [x2[accum: accum + nelements]] + [args[j] for j in range(2, len(args))]
- res[i] = self._binary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x1.get_block(i)] + [x2[accum: accum + nelements]] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
accum += nelements
return res
elif np.isscalar(x1) and isinstance(x2, BlockVector):
- assert not x2.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(x2)
res = BlockVector(x2.nblocks)
for i in range(x2.nblocks):
- _args = [x1] + [x2[i]] + [args[j] for j in range(2, len(args))]
- res[i] = self._binary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
return res
elif np.isscalar(x2) and isinstance(x1, BlockVector):
- assert not x1.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(x1)
res = BlockVector(x1.nblocks)
for i in range(x1.nblocks):
- _args = [x1[i]] + [x2] + [args[j] for j in range(2, len(args))]
- res[i] = self._binary_operation(ufunc, method, *_args, **kwargs)
+ _args = [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
return res
- elif (isinstance(x1, np.ndarray) or np.isscalar(x1)) and (isinstance(x2, np.ndarray) or np.isscalar(x2)):
+ elif (type(x1)==np.ndarray or np.isscalar(x1)) and (type(x2)==np.ndarray or np.isscalar(x2)):
return super(BlockVector, self).__array_ufunc__(ufunc, method,
*args, **kwargs)
else:
+ if x1.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
+ if x2.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
-
-
@property
def nblocks(self):
"""
@@ -231,56 +230,71 @@ def nblocks(self):
@property
def bshape(self):
"""
- Returns the number of blocks.
+ Returns the number of blocks in this BlockVector in a tuple.
"""
return self.nblocks,
@property
def shape(self):
"""
- Returns total number of elements in the block vector
+ Returns total number of elements in this BlockVector
"""
+ assert_block_structure(self)
return np.sum(self._brow_lengths),
- @shape.setter
- def shape(self, new_shape):
- raise NotImplementedError("BlockVector does not support reshaping")
-
@property
def size(self):
"""
- Returns total number of elements in the block vector
+ Returns total number of elements in this BlockVector
"""
+ assert_block_structure(self)
return np.sum(self._brow_lengths)
- @size.setter
- def size(self, new_size):
- raise NotImplementedError("BlockVector does not support resizing")
-
@property
def ndim(self):
"""
- Returns dimension of the block vector
+ Returns dimension of this BlockVector
"""
return 1
@property
def has_none(self):
- if not self._has_none:
- return False
- if not np.all(self._block_mask):
- return True
-
- block_arr = np.array([blk.has_none for blk in self if isinstance(blk, BlockVector)], dtype=bool)
- it_has = np.any(block_arr)
- self._has_none = it_has
- return it_has
+ """
+ Indicate if this BlockVector has any none entries.
+ """
+ # this flag is updated in __setattr__
+ return len(self._undefined_brows) != 0
- def block_sizes(self):
+ def block_sizes(self, copy=True):
"""
- Returns array with sizes of individual blocks
+ Returns 1D-Array with sizes of individual blocks in this BlockVector
"""
- return np.copy(self._brow_lengths)
+ assert_block_structure(self)
+ if copy:
+ return self._brow_lengths.copy()
+ return self._brow_lengths
+
+ def get_block_size(self, ndx):
+ if ndx in self._undefined_brows:
+ raise NotFullyDefinedBlockVectorError('The dimensions of the requested block are not defined.')
+ return int(self._brow_lengths[ndx])
+
+ def _set_block_size(self, ndx, size):
+ if ndx in self._undefined_brows:
+ self._undefined_brows.remove(ndx)
+ self._brow_lengths[ndx] = size
+ if len(self._undefined_brows) == 0:
+ self._brow_lengths = np.asarray(self._brow_lengths, dtype=np.int64)
+ else:
+ if self._brow_lengths[ndx] != size:
+ raise ValueError('Incompatible dimensions for '
+ 'block {ndx}; got {got}; '
+ 'expected {exp}'.format(ndx=ndx,
+ got=size,
+ exp=self._brow_lengths[ndx]))
+
+ def is_block_defined(self, ndx):
+ return ndx not in self._undefined_brows
def dot(self, other, out=None):
"""
@@ -295,256 +309,224 @@ def dot(self, other, out=None):
float
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert out is None, 'Operation not supported with out keyword'
+ assert_block_structure(self)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
- return sum(self[i].dot(other[i]) for i in range(self.nblocks))
- elif isinstance(other, np.ndarray):
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
+ return sum(self.get_block(i).dot(other.get_block(i)) for i in range(self.nblocks))
+ elif type(other)==np.ndarray:
bv = self.flatten()
return bv.dot(other)
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def sum(self, axis=None, dtype=None, out=None, keepdims=False):
"""
- Returns the sum of all entries in the block vector
+ Returns the sum of all entries in this BlockVector
"""
- return sum(self[i].sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
- for i in range(self.nblocks) if self._block_mask[i])
+ assert_block_structure(self)
+ results = np.array([self.get_block(i).sum() for i in range(self.nblocks)])
+ return results.sum(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def all(self, axis=None, out=None, keepdims=False):
"""
Returns True if all elements evaluate to True.
"""
- d = tuple(v for v in self if v is not None)
- arr = np.concatenate(d)
- return arr.all(axis=axis, out=out, keepdims=keepdims)
+ assert_block_structure(self)
+ results = np.array([self.get_block(i).all() for i in range(self.nblocks)],
+ dtype=np.bool)
+ return results.all(axis=axis, out=out, keepdims=keepdims)
def any(self, axis=None, out=None, keepdims=False):
"""
- Returns True if all elements evaluate to True.
+ Returns True if any element evaluate to True.
"""
- d = tuple(v for v in self if v is not None)
- arr = np.concatenate(d)
- return arr.any(axis=axis, out=out, keepdims=keepdims)
+ assert_block_structure(self)
+ results = np.array([self.get_block(i).any() for i in range(self.nblocks)],
+ dtype=np.bool)
+ return results.any(axis=axis, out=out, keepdims=keepdims)
def max(self, axis=None, out=None, keepdims=False):
"""
- Returns the largest value stored in the vector
+ Returns the largest value stored in this BlockVector
"""
- return max([self[i].max(axis=axis, out=None, keepdims=keepdims)
- for i in range(self.nblocks) if self._block_mask[i]])
-
- def argpartition(self, kth, axis=-1, kind='introselect', order=None):
- raise NotImplementedError("argpartition not implemented for BlockVector")
-
- def argsort(self, axis=-1, kind='quicksort', order=None):
- raise NotImplementedError("argsort not implemented for BlockVector")
+ assert_block_structure(self)
+ results = list()
+ for block in self:
+ if block.size > 0:
+ results.append(block.max())
+ return max(results)
def astype(self, dtype, order='K', casting='unsafe', subok=True, copy=True):
-
+ """Copy of the array, cast to a specified type"""
if copy:
bv = BlockVector(self.nblocks)
for bid, vv in enumerate(self):
- if self._block_mask[bid]:
- bv[bid] = vv.astype(dtype, order=order, casting=casting, subok=subok, copy=copy)
- else:
- bv[bid] = None
+ if bid not in self._undefined_brows:
+ bv.set_block(bid, vv.astype(dtype,
+ order=order,
+ casting=casting,
+ subok=subok,
+ copy=copy))
return bv
raise NotImplementedError("astype not implemented for copy=False")
- def byteswap(self, inplace=False):
- raise NotImplementedError("byteswap not implemented for BlockVector")
+ def clip(self, min=None, max=None, out=None):
+ """
+ Return BlockVector whose values are limited to [min, max].
+ One of max or min must be given.
- def choose(self, choices, out=None, mode='raise'):
- raise NotImplementedError("choose not implemented for BlockVector")
+ Parameters
+ ----------
+ min: scalar_like, optional
+ Minimum value. If None, clipping is not performed on lower interval edge.
+ max: scalar_like, optional
+ Maximum value. If None, clipping is not performed on upper interval edge.
- def clip(self, min=None, max=None, out=None):
+ Returns
+ -------
+ BlockVector
- if out is not None:
- raise NotImplementedError()
+ """
+ assert_block_structure(self)
+ assert out is None, 'Out keyword not supported'
bv = BlockVector(self.nblocks)
- for bid, vv in enumerate(self):
- if self._block_mask[bid]:
- bv[bid] = vv.clip(min=min, max=max, out=None)
- else:
- bv[bid] = None
+ for bid in range(self.nblocks):
+ bv.set_block(bid, self.get_block(bid).clip(min=min, max=max, out=None))
return bv
def compress(self, condition, axis=None, out=None):
- if out is not None:
- raise NotImplementedError('compress not supported with out')
+ """
+ Return selected slices of each subblock.
+
+ Parameters
+ ----------
+ condition: Array or BlockVector that selects which entries to return.
+ Determines to select (evaluate True in condition)
+
+ Returns
+ -------
+ BlockVector
+
+ """
+ assert_block_structure(self)
+ assert out is None, 'Out keyword not supported'
result = BlockVector(self.nblocks)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
if isinstance(condition, BlockVector):
- assert not condition.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == condition.shape, 'Dimension mismatch {} != {}'.format(self.shape, condition.shape)
- assert self.nblocks == condition.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- condition.nblocks)
- for idx, blk in enumerate(self):
- result[idx] = blk.compress(condition[idx])
+ assert_block_structure(condition)
+ assert self.shape == condition.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, condition.shape)
+ assert self.nblocks == condition.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ condition.nblocks)
+ for idx in range(self.nblocks):
+ result.set_block(idx, self.get_block(idx).compress(condition.get_block(idx)))
return result
- elif isinstance(condition, np.ndarray):
- assert self.shape == condition.shape, 'Dimension mismatch {} != {}'.format(self.shape,
- condition.shape)
+ elif type(condition)==np.ndarray:
+ assert self.shape == condition.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape,
+ condition.shape)
accum = 0
- for idx, blk in enumerate(self):
+ for idx in range(self.nblocks):
nelements = self._brow_lengths[idx]
- result[idx] = blk.compress(condition[accum: accum + nelements])
+ result.set_block(idx, self.get_block(idx).compress(condition[accum: accum + nelements]))
accum += nelements
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def conj(self):
"""
Complex-conjugate all elements.
"""
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
- for idx, blk in enumerate(self):
- if self._block_mask[idx]:
- result[idx] = blk.conj()
- else:
- result[idx] = None
+ for idx in range(self.nblocks):
+ result.set_block(idx, self.get_block(idx).conj())
return result
def conjugate(self):
"""
Complex-conjugate all elements.
"""
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
- for idx, blk in enumerate(self):
- if self._block_mask[idx]:
- result[idx] = blk.conjugate()
- else:
- result[idx] = None
+ for idx in range(self.nblocks):
+ result.set_block(idx, self.get_block(idx).conjugate())
return result
- def diagonal(self, offset=0, axis1=0, axis2=1):
- raise ValueError('diag requires an array of at least two dimensions')
-
- def dump(self, file):
- raise NotImplementedError('TODO')
-
- def dumps(self):
- raise NotImplementedError('TODO')
-
- def getfield(self, dtype, offset=0):
- raise NotImplementedError('getfield not implemented for BlockVector')
-
- def item(self, *args):
- raise NotImplementedError('item not implemented for BlockVector')
-
- def itemset(self, *args):
- raise NotImplementedError('itemset not implemented for BlockVector')
-
- def newbyteorder(self, new_order='S'):
- raise NotImplementedError('newbyteorder not implemented for BlockVector')
-
def nonzero(self):
"""
Return the indices of the elements that are non-zero.
"""
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
- for idx, blk in enumerate(self):
- if self._block_mask[idx]:
- result[idx] = blk.nonzero()[0]
- else:
- result[idx] = None
+ for idx in range(self.nblocks):
+ result.set_block(idx, self.get_block(idx).nonzero()[0])
return (result,)
def ptp(self, axis=None, out=None, keepdims=False):
"""
Peak to peak (maximum - minimum) value along a given axis.
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- return self.flatten().ptp(axis=axis, out=out)
-
- def put(self, indices, values, mode='raise'):
- raise NotImplementedError('TODO')
-
- def partition(self, kth, axis=-1, kind='introselect', order=None):
- raise NotImplementedError('partition not implemented for BlockVector')
-
- def repeat(self, repeats, axis=None):
- raise NotImplementedError('repeat not implemented for BlockVector')
-
- def reshape(self, shape, order='C'):
- raise NotImplementedError('reshape not implemented for BlockVector')
-
- def resize(self, new_shape, refcheck=True):
- raise NotImplementedError('resize not implemented for BlockVector')
+ assert_block_structure(self)
+ assert out is None, 'Out keyword not supported'
+ return self.max()-self.min()
def round(self, decimals=0, out=None):
"""
- Return a with each element rounded to the given number of decimals
+ Return BlockVector with each element rounded to the given number of decimals
"""
- if out is not None:
- raise NotImplementedError('round not implemented with out input')
+ assert_block_structure(self)
+ assert out is None, 'Out keyword not supported'
result = BlockVector(self.nblocks)
- for idx, blk in enumerate(self):
- if self._block_mask[idx]:
- result[idx] = blk.round(decimals=0, out=None)
- else:
- result[idx] = None
+ for idx in range(self.nblocks):
+ result.set_block(idx, self.get_block(idx).round(decimals=decimals))
return result
- def searchsorted(self, v, side='left', sorter=None):
- raise NotImplementedError('searchsorted not implemented for BlockVector')
-
- def setfield(self, val, dtype, offset=0):
- raise NotImplementedError('setfield not implemented for BlockVector')
-
- def setflags(self, write=None, align=None, uic=None):
- raise NotImplementedError('setflags not implemented for BlockVector')
-
- def sort(self, axis=-1, kind='quicksort', order=None):
- raise NotImplementedError('sort not implemented for BlockVector')
-
- def squeeze(self, axis=None):
- raise NotImplementedError('squeeze not implemented for BlockVector')
-
def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
- Returns the standard deviation of the array elements along given axis.
+ Returns the standard deviation of the BlockVector elements.
"""
return self.flatten().std(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
- def swapaxes(self, axis1, axis2):
- raise NotImplementedError('swapaxes not implemented for BlockVector')
-
- def take(self, indices, axis=None, out=None, mode='raise'):
+ def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
"""
- Return an array formed from the elements of a at the given indices.
+ Returns the variance of the BlockVector elements.
"""
- return self.flatten().take(indices, axis=axis, out=out, mode=mode)
-
- def tobytes(self, order='C'):
- raise NotImplementedError('tobytes not implemented for BlockVector')
+ return self.flatten().var(axis=axis, dtype=dtype, out=out, ddof=ddof, keepdims=keepdims)
def tofile(self, fid, sep="", format="%s"):
"""
- Write array to a file as text or binary (default).
+ Writes flat version of BlockVector to a file as text or binary (default).
"""
self.flatten().tofile(fid, sep=sep, format=format)
- def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
- raise NotImplementedError('trace not implemented for BlockVector')
-
def min(self, axis=None, out=None, keepdims=False):
"""
Returns the smallest value stored in the vector
"""
- return min([self[i].min(axis=axis, out=None, keepdims=keepdims)
- for i in range(self.nblocks) if self._block_mask[i]])
+ assert_block_structure(self)
+ results = list()
+ for block in self:
+ if block.size > 0:
+ results.append(block.min())
+ return min(results)
def mean(self, axis=None, dtype=None, out=None, keepdims=False):
"""
- Returns the average of all entries in the vector
+ Returns the average of all entries in this BlockVector
"""
n = self.size
if n == 0:
@@ -553,15 +535,15 @@ def mean(self, axis=None, dtype=None, out=None, keepdims=False):
def prod(self, axis=None, dtype=None, out=None, keepdims=False):
"""
- Returns the product of all entries in the vector
+ Returns the product of all entries in this BlockVector
"""
- arr = [self[i].prod(axis=axis, dtype=dtype, out=None, keepdims=keepdims)
- for i in range(self.nblocks) if self._block_mask[i]]
- return np.prod(arr)
+ assert_block_structure(self)
+ results = np.array([self.get_block(i).prod() for i in range(self.nblocks)])
+ return results.prod(axis=axis, dtype=dtype, out=out, keepdims=keepdims)
def fill(self, value):
"""
- Fills the array with a scalar value.
+ Fills the BlockVector with a scalar value.
Parameters
----------
@@ -573,13 +555,13 @@ def fill(self, value):
None
"""
+ assert_block_structure(self)
for i in range(self.nblocks):
- if self._block_mask[i]:
- self[i].fill(value)
+ self.get_block(i).fill(value)
def tolist(self):
"""
- Return the vector as a list.
+ Return the BlockVector flattened as a list.
Returns
-------
@@ -600,10 +582,11 @@ def flatten(self, order='C'):
Returns
-------
- ndarray
+ numpy.ndarray
"""
- all_blocks = tuple(v.flatten(order=order) for v in self)
+ assert_block_structure(self)
+ all_blocks = tuple(self.get_block(i).flatten(order=order) for i in range(self.nblocks))
return np.concatenate(all_blocks)
def ravel(self, order='C'):
@@ -618,31 +601,31 @@ def ravel(self, order='C'):
Returns
-------
- ndarray
+ numpy.ndarray
"""
- all_blocks = tuple(v.ravel(order=order) for v in self)
+ assert_block_structure(self)
+ all_blocks = tuple(self.get_block(i).ravel(order=order) for i in range(self.nblocks))
return np.concatenate(all_blocks)
def argmax(self, axis=None, out=None):
"""
- Returns the index of the largest element.
+ Returns the index of the larges element.
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
return self.flatten().argmax(axis=axis, out=out)
def argmin(self, axis=None, out=None):
"""
Returns the index of the smallest element.
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
return self.flatten().argmin(axis=axis, out=out)
def cumprod(self, axis=None, dtype=None, out=None):
"""
Returns the cumulative product of the elements along the given axis.
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
flat = self.flatten().cumprod(axis=axis, dtype=dtype, out=out)
v = self.clone()
v.copyfrom(flat)
@@ -652,7 +635,6 @@ def cumsum(self, axis=None, dtype=None, out=None):
"""
Returns the cumulative sum of the elements along the given axis.
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
flat = self.flatten().cumsum(axis=axis, dtype=dtype, out=out)
v = self.clone()
v.copyfrom(flat)
@@ -660,109 +642,150 @@ def cumsum(self, axis=None, dtype=None, out=None):
def clone(self, value=None, copy=True):
"""
- Returns a copy of the block vector
+ Returns a copy of this BlockVector
Parameters
----------
value: scalar (optional)
all entries of the cloned vector are set to this value
- copy: bool (optinal)
- if set to true makes a deepcopy of each block in this vector. default False
+ copy: bool (optional)
+ if True makes a deepcopy of each block in this vector. default True
Returns
-------
BlockVector
+
"""
result = BlockVector(self.nblocks)
- for idx, blk in enumerate(self):
- if copy:
- result[idx] = cp.deepcopy(blk)
- else:
- result[idx] = blk
- result._block_mask[idx] = self._block_mask[idx]
- result._brow_lengths[idx] = self._brow_lengths[idx]
+ for idx in range(self.nblocks):
+ if idx not in self._undefined_brows:
+ if copy:
+ result.set_block(idx, self.get_block(idx).copy())
+ else:
+ result.set_block(idx, self.get_block(idx))
if value is not None:
result.fill(value)
return result
def copyfrom(self, other):
"""
- Copies entries of other vector into this vector
+ Copy entries of other vector into this vector
Parameters
----------
- other: BlockVector or ndarray
+ other: BlockVector or numpy.ndarray
+ vector to be copied to this BlockVector
Returns
-------
None
+
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
+
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
- for idx, blk in enumerate(other):
- if isinstance(blk, BlockVector) or isinstance(self[idx], BlockVector):
- self[idx].copyfrom(blk)
+ assert_block_structure(other)
+
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
+ for idx in range(other.nblocks):
+ if isinstance(self.get_block(idx), BlockVector):
+ self.get_block(idx).copyfrom(other.get_block(idx))
+ elif isinstance(self.get_block(idx), np.ndarray):
+ if isinstance(other.get_block(idx), BlockVector):
+ self.set_block(idx, other.get_block(idx).copy())
+ elif isinstance(other.get_block(idx), np.ndarray):
+ np.copyto(self.get_block(idx), other.get_block(idx))
+ else:
+ raise RuntimeError('Input not recognized')
+ elif self.get_block(idx) is None:
+ if isinstance(other.get_block(idx), np.ndarray):
+ # this inlcude block vectors too
+ self.set_block(idx, other.get_block(idx).copy())
+ else:
+ raise RuntimeError('Input not recognized')
else:
- np.copyto(self[idx], blk)
+ raise RuntimeError('Input not recognized')
elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
offset = 0
- for idx, blk in enumerate(self):
- subarray = other[offset: offset + self[idx].size]
- if isinstance(self[idx], BlockVector):
- self[idx].copyfrom(subarray)
+ for idx in range(self.nblocks):
+ subarray = other[offset: offset + self.get_block(idx).size]
+ if isinstance(self.get_block(idx), BlockVector):
+ self.get_block(idx).copyfrom(subarray)
else:
- np.copyto(self[idx], subarray)
- offset += self[idx].size
+ np.copyto(self.get_block(idx), subarray)
+ offset += self.get_block(idx).size
else:
- raise NotImplementedError()
+ raise NotImplementedError('Operation not supported by BlockVector')
def copyto(self, other):
"""
- Copies entries of this vector into other
+ Copy entries of this BlockVector into other
Parameters
----------
- other: BlockVector or ndarray
+ other: BlockVector or numpy.ndarray
Returns
-------
None
+
"""
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+
if isinstance(other, BlockVector):
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
- for idx, blk in enumerate(self):
- if other[idx] is not None:
- msgi = 'Dimension mismatch in subblock {} {} != {}'
- assert other[idx].shape == blk.shape, msgi.format(idx,
- blk.shape,
- other[idx].shape)
- if isinstance(blk, BlockVector):
- other[idx] = blk.clone(copy=True)
+ msgj = 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
+ assert self.nblocks == other.nblocks, msgj
+ for idx in range(self.nblocks):
+ if isinstance(other.get_block(idx), BlockVector):
+ other.get_block(idx).copyfrom(self.get_block(idx))
+ elif isinstance(other.get_block(idx), np.ndarray):
+ if self.get_block(idx) is not None:
+ np.copyto(other.get_block(idx), self.get_block(idx).flatten())
+ else:
+ other.set_block(idx, None)
+ elif other.get_block(idx) is None:
+ if self.get_block(idx) is not None:
+ other.set_block(idx, self.get_block(idx).copy())
+ else:
+ other.set_block(idx, None)
else:
- other[idx] = cp.deepcopy(blk)
+ raise RuntimeError('Should never get here')
elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
np.copyto(other, self.flatten())
-
else:
raise NotImplementedError()
def copy(self, order='C'):
+ """
+ Returns a copy of the BlockVector
+ """
bv = BlockVector(self.nblocks)
- for bid, vv in enumerate(self):
- if self._block_mask[bid]:
- bv[bid] = vv.copy(order=order)
- else:
- bv[bid] = None
+ for bid in range(self.nblocks):
+ if bid not in self._undefined_brows:
+ bv.set_block(bid, self.get_block(bid).copy(order=order))
+ return bv
+
+ def copy_structure(self):
+ """
+ Returns a copy of the BlockVector structure filled with zeros
+ """
+ bv = BlockVector(self.nblocks)
+ for bid in range(self.nblocks):
+ if self.get_block(bid) is not None:
+ if isinstance(self.get_block(bid), BlockVector):
+ bv.set_block(bid, self.get_block(bid).copy_structure())
+ elif type(self.get_block(bid)) == np.ndarray:
+ bv.set_block(bid, np.zeros(self.get_block(bid).size, dtype=self.get_block(bid).dtype))
+ else:
+ raise NotImplementedError('Should never get here')
return bv
def set_blocks(self, blocks):
@@ -772,533 +795,549 @@ def set_blocks(self, blocks):
Parameters
----------
blocks: list
- list of vectors
+ list of numpy.ndarrays and/or BlockVectors
Returns
-------
None
+
"""
- assert isinstance(blocks, list), 'blocks should be passed in ordered list'
- msg = 'More blocks passed than allocated {} != {}'.format(len(blocks), self.nblocks)
- assert len(blocks) == self.nblocks, msg
+ assert isinstance(blocks, list), \
+ 'blocks should be passed in ordered list'
+ assert len(blocks) == self.nblocks, \
+ 'More blocks passed than allocated {} != {}'.format(len(blocks),
+ self.nblocks)
for idx, blk in enumerate(blocks):
- self[idx] = blk
+ self.set_block(idx, blk)
- def _check_mask(self):
- msg = 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- msg += '\n{}'.format(self.__str__())
- if not np.all(self._block_mask):
- raise RuntimeError(msg)
- for idx, blk in enumerate(self):
- if isinstance(blk, BlockVector):
- blk._check_mask()
+ def __iter__(self):
+ for ndx in range(self._nblocks):
+ yield self.get_block(ndx)
def __add__(self, other):
+ # add this BlockVector with other vector
+ # supports addition with scalar, numpy.ndarray and BlockVectors
+ # returns BlockVector
result = BlockVector(self.nblocks)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = blk + other[idx]
+ result.set_block(idx, blk + other.get_block(idx))
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = blk + other[accum: accum + nelements]
+ result.set_block(idx, blk + other[accum: accum + nelements])
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = blk + other
+ result.set_block(idx, blk + other)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __radd__(self, other): # other + self
return self.__add__(other)
def __sub__(self, other):
+ # substract this BlockVector with other vector
+ # supports substraction with scalar, numpy.ndarray and BlockVectors
+ # returns BlockVector
result = BlockVector(self.nblocks)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = blk - other[idx]
+ result.set_block(idx, blk - other.get_block(idx))
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = blk - other[accum: accum + nelements]
+ result.set_block(idx, blk - other[accum: accum + nelements])
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = blk - other
+ result.set_block(idx, blk - other)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __rsub__(self, other): # other - self
+
result = BlockVector(self.nblocks)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = other[idx] - blk
+ result.set_block(idx, other.get_block(idx) - blk)
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = other[accum: accum + nelements] - blk
+ result.set_block(idx, other[accum: accum + nelements] - blk)
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = other - blk
+ result.set_block(idx, other - blk)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __mul__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise multiply this BlockVector with other vector
+ # supports multiplication with scalar, numpy.ndarray and BlockVectors
+ # returns BlockVector
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = blk .__mul__(other[idx])
+ result.set_block(idx, blk .__mul__(other.get_block(idx)))
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = blk.__mul__(other[accum: accum + nelements])
+ result.set_block(idx, blk.__mul__(other[accum: accum + nelements]))
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = blk.__mul__(other)
+ result.set_block(idx, blk.__mul__(other))
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __rmul__(self, other): # other + self
return self.__mul__(other)
def __truediv__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise divide this BlockVector with other vector
+ # supports division with scalar, numpy.ndarray and BlockVectors
+ # returns BlockVector
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = blk.__truediv__(other[idx])
+ result.set_block(idx, blk / other.get_block(idx))
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = blk.__truediv__(other[accum: accum + nelements])
+ result.set_block(idx, blk / other[accum: accum + nelements])
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = blk.__truediv__(other)
+ result.set_block(idx, blk / other)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __rtruediv__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = other[idx].__rtruediv__(blk)
+ result.set_block(idx, other.get_block(idx) / blk)
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = other[accum: accum + nelements].__rtruediv__(blk)
+ result.set_block(idx, other[accum: accum + nelements] / blk)
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = other.__rtruediv__(blk)
+ result.set_block(idx, other / blk)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __floordiv__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = blk.__floordiv__(other[idx])
+ result.set_block(idx, blk // other.get_block(idx))
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = blk.__floordiv__(other[accum: accum + nelements])
+ result.set_block(idx, blk // other[accum: accum + nelements])
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = blk.__floordiv__(other)
+ result.set_block(idx, blk // other)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __rfloordiv__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
result = BlockVector(self.nblocks)
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- result[idx] = other[idx].__rfloordiv__(blk)
+ result.set_block(idx, other.get_block(idx) // blk)
return result
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- result[idx] = other[accum: accum + nelements].__rfloordiv__(blk)
+ result.set_block(idx, other[accum: accum + nelements] // blk)
accum += nelements
return result
elif np.isscalar(other):
for idx, blk in enumerate(self):
- result[idx] = other.__rfloordiv__(blk)
+ result.set_block(idx, other // blk)
return result
else:
+ if other.__class__.__name__ == 'MPIBlockVector':
+ raise RuntimeError('Operation not supported by BlockVector')
raise NotImplementedError()
def __iadd__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise inplace addition to this BlockVector with other vector
+ # supports addition with scalar, numpy.ndarray and BlockVectors
+ assert_block_structure(self)
if np.isscalar(other):
for idx, blk in enumerate(self):
- self[idx] = self[idx] + other # maybe it suffice with doing self[idx] = self[idf] + other
+ blk += other
return self
elif isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- self[idx] = self[idx] + other[idx]
+ blk += other.get_block(idx)
return self
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- self[idx] = blk + other[accum: accum + nelements]
+ blk += other[accum: accum + nelements]
accum += nelements
return self
else:
raise NotImplementedError()
def __isub__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise inplace subtraction to this BlockVector with other vector
+ # supports subtraction with scalar, numpy.ndarray and BlockVectors
+ assert_block_structure(self)
if np.isscalar(other):
for idx, blk in enumerate(self):
- self[idx] = self[idx] - other
+ blk -= other
return self
elif isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- self[idx] = self[idx] - other[idx]
+ blk -= other.get_block(idx)
return self
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- self[idx] = blk - other[accum: accum + nelements]
+ blk -= other[accum: accum + nelements]
accum += nelements
return self
else:
raise NotImplementedError()
def __imul__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise inplace multiplication to this BlockVector with other vector
+ # supports multiplication with scalar, numpy.ndarray and BlockVectors
+ assert_block_structure(self)
if np.isscalar(other):
for idx, blk in enumerate(self):
- self[idx] = self[idx] * other
+ blk *= other
return self
elif isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- self[idx] = self[idx] * other[idx]
+ blk *= other.get_block(idx)
return self
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- self[idx] = blk * other[accum: accum + nelements]
+ blk *= other[accum: accum + nelements]
accum += nelements
return self
else:
raise NotImplementedError()
def __itruediv__(self, other):
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ # elementwise inplace division to this BlockVector with other vector
+ # supports division with scalar, numpy.ndarray and BlockVectors
+ assert_block_structure(self)
if np.isscalar(other):
for idx, blk in enumerate(self):
- self[idx] = self[idx] / other
+ blk /= other
return self
elif isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- assert self.nblocks == other.nblocks, 'Number of blocks mismatch {} != {}'.format(self.nblocks,
- other.nblocks)
+ assert_block_structure(other)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch {} != {}'.format(self.nblocks,
+ other.nblocks)
for idx, blk in enumerate(self):
- self[idx] = self[idx] / other[idx]
+ blk /= other.get_block(idx)
return self
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ elif type(other)==np.ndarray:
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
for idx, blk in enumerate(self):
nelements = self._brow_lengths[idx]
- self[idx] = blk / other[accum: accum + nelements]
+ blk /= other[accum: accum + nelements]
accum += nelements
return self
else:
raise NotImplementedError()
- def __str__(self):
+ def __div__(self, other):
+ return self.__truediv__(other)
+
+ def __rdiv__(self, other):
+ return self.__rtruediv__(other)
+
+ def __idiv__(self, other):
+ return self.__itruediv__(other)
+
+ def _print(self, indent):
msg = ''
- for idx in range(self.bshape[0]):
- if isinstance(self[idx], BlockVector):
- repn = self[idx].__repr__()
- elif isinstance(self[idx], np.ndarray):
- repn = "array({})".format(self[idx].size)
- elif self[idx] is None:
- repn = None
+ for ndx, block in enumerate(self):
+ if isinstance(block, BlockVector):
+ msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.bshape) + '\n'
+ msg += block._print(indent=indent+' ')
else:
- raise NotImplementedError("Should not get here")
- msg += '{}: {}\n'.format(idx, repn)
+ msg += indent + str(ndx) + ': ' + block.__class__.__name__ + str(block.shape) + '\n'
return msg
+ def __str__(self):
+ return self._print(indent='')
+
def __repr__(self):
- return '{}{}'.format(self.__class__.__name__, self.shape)
+ return '{}{}'.format(self.__class__.__name__, self.bshape)
- def __getitem__(self, item):
+ def get_block(self, key):
+ return super(BlockVector, self).__getitem__(key)
- if np.isscalar(item):
- return super(BlockVector, self).__getitem__(item)
+ def set_block(self, key, value):
+ assert -self.nblocks < key < self.nblocks, 'out of range'
+ assert isinstance(value, np.ndarray) or \
+ isinstance(value, BaseBlockVector), \
+ 'Blocks need to be numpy arrays or BlockVectors'
+ assert value.ndim == 1, 'Blocks need to be 1D'
- # deal with slices
- arr = self.flatten()
- return arr[item]
+ if isinstance(value, BaseBlockVector):
+ assert_block_structure(value)
+ self._set_block_size(key, value.size)
+ super(BlockVector, self).__setitem__(key, value)
- def __setitem__(self, key, value):
+ def _has_equal_structure(self, other):
+ """
+ Parameters
+ ----------
+ other: BlockVector
- if isinstance(key, slice):
- raise NotImplementedError()
+ Returns
+ -------
+ equal_structure: bool
+ True if self and other have the same block structure (recursive). False otherwise.
+ """
+ if not isinstance(other, BlockVector):
+ return False
+ if self.nblocks != other.nblocks:
+ return False
+ for ndx, block1 in enumerate(self):
+ block2 = other.get_block(ndx)
+ if isinstance(block1, BlockVector):
+ if not isinstance(block2, BlockVector):
+ return False
+ if not block1._has_equal_structure(block2):
+ return False
+ elif isinstance(block2, BlockVector):
+ return False
+ return True
- assert -self.nblocks < key < self.nblocks, 'out of range'
- if value is None:
- super(BlockVector, self).__setitem__(key, None)
- self._block_mask[key] = False
- self._brow_lengths[key] = 0
- self._has_none = True
- else:
- msg = 'Blocks need to be numpy arrays or BlockVectors'
- assert isinstance(value, np.ndarray) or isinstance(value, BlockVector), msg
- assert value.ndim == 1, 'Blocks need to be 1D'
- super(BlockVector, self).__setitem__(key, value)
- self._block_mask[key] = True
- self._brow_lengths[key] = value.size
+ def __getitem__(self, item):
+ if not self._has_equal_structure(item):
+ raise ValueError('BlockVector.__getitem__ only accepts slices in the form of BlockVectors of the same structure')
+ res = BlockVector(self.nblocks)
+ for ndx, block in self:
+ res.set_block(ndx, block[item.get_block(ndx)])
- def __le__(self, other):
+ def __setitem__(self, key, value):
+ if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))):
+ raise ValueError(
+ 'BlockVector.__setitem__ only accepts slices in the form of BlockVectors of the same structure')
+ if np.isscalar(value):
+ for ndx, block in enumerate(self):
+ block[key.get_block(ndx)] = value
+ else:
+ for ndx, block in enumerate(self):
+ block[key.get_block(ndx)] = value.get_block(ndx)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ def _comparison_helper(self, other, operation):
+ assert_block_structure(self)
+ result = self.copy_structure()
if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__le__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
+ assert_block_structure(other)
+ for ndx in range(self.nblocks):
+ result.set_block(ndx, operation(self.get_block(ndx), other.get_block(ndx)))
+ return result
elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
+ assert self.shape == other.shape, \
+ 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__le__(other[accum: accum + nelements])
- accum += nelements
+ for ndx in range(self.nblocks):
+ result.set_block(ndx, operation(self.get_block(ndx), other[accum : accum + self.get_block_size(ndx)]))
+ accum += self.get_block_size(ndx)
return result
elif np.isscalar(other):
- flags = [vv.__le__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
+ for ndx in range(self.nblocks):
+ result.set_block(ndx, operation(self.get_block(ndx), other))
+ return result
else:
- raise NotImplementedError()
+ raise NotImplementedError('Operation not supported by BlockVector')
- def __lt__(self, other):
+ def __le__(self, other):
+ return self._comparison_helper(other, operator.le)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__lt__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
- accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__lt__(other[accum: accum + nelements])
- accum += nelements
- return result
- elif np.isscalar(other):
- flags = [vv.__lt__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- else:
- raise NotImplementedError()
+ def __lt__(self, other):
+ return self._comparison_helper(other, operator.lt)
def __ge__(self, other):
-
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__ge__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
- accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__ge__(other[accum: accum + nelements])
- accum += nelements
- return result
- elif np.isscalar(other):
- flags = [vv.__ge__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- else:
- raise NotImplementedError()
+ return self._comparison_helper(other, operator.ge)
def __gt__(self, other):
-
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__gt__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
- accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__gt__(other[accum: accum + nelements])
- accum += nelements
- return result
- elif np.isscalar(other):
- flags = [vv.__gt__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- else:
- raise NotImplementedError()
+ return self._comparison_helper(other, operator.gt)
def __eq__(self, other):
-
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__eq__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
- accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__eq__(other[accum: accum + nelements])
- accum += nelements
- return result
- elif np.isscalar(other):
- flags = [vv.__eq__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- else:
- raise NotImplementedError()
+ return self._comparison_helper(other, operator.eq)
def __ne__(self, other):
+ return self._comparison_helper(other, operator.ne)
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- if isinstance(other, BlockVector):
- assert not other.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
- flags = [vv.__ne__(other[bid]) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- elif isinstance(other, np.ndarray):
- assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
- result = BlockVector(self.nblocks)
- accum = 0
- for idx, blk in enumerate(self):
- nelements = self._brow_lengths[idx]
- result[idx] = blk.__ne__(other[accum: accum + nelements])
- accum += nelements
- return result
- elif np.isscalar(other):
- flags = [vv.__ne__(other) for bid, vv in enumerate(self)]
- bv = BlockVector(flags)
- return bv
- else:
- raise NotImplementedError()
+ def __neg__(self):
+ # elementwise negate this BlockVector
+ assert_block_structure(self)
+ bv = BlockVector(self.nblocks)
+ for bid in range(self.nblocks):
+ bv.set_block(bid, self.get_block(bid).__neg__())
+ return bv
def __contains__(self, item):
other = item
- assert not self.has_none, 'Operation not allowed with None blocks. Specify all blocks in BlockVector'
+ assert_block_structure(self)
if np.isscalar(other):
contains = False
for idx, blk in enumerate(self):
@@ -1307,3 +1346,130 @@ def __contains__(self, item):
return contains
else:
raise NotImplementedError()
+
+ def __len__(self):
+ return self.nblocks
+
+ def pprint(self):
+ """Prints BlockVector in pretty format"""
+ msg = self.__repr__()
+ msg += '\n'
+ msg += self.__str__()
+ print(msg)
+
+ def toMPIBlockVector(self, rank_ownership, mpi_comm):
+ """
+ Creates a parallel MPIBlockVector from this BlockVector
+
+ Parameters
+ ----------
+ rank_ownership: array_like
+ Array_like of size nblocks. Each entry defines ownership of each block.
+ There are two types of ownership. Block that are owned by all processor,
+ and blocks owned by a single processor. If a block is owned by all
+ processors then its ownership is -1. Otherwise, if a block is owned by
+ a single processor, then its ownership is equal to the rank of the
+ processor.
+ mpi_comm: MPI communicator
+ An MPI communicator. Tyically MPI.COMM_WORLD
+
+ """
+ from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector
+
+ assert_block_structure(self)
+ assert len(rank_ownership) == self.nblocks, \
+ 'rank_ownership must be of size {}'.format(self.nblocks)
+
+ mpi_bv = MPIBlockVector(self.nblocks,
+ rank_ownership,
+ mpi_comm)
+
+ # populate blocks in the right spaces
+ for bid in mpi_bv.owned_blocks:
+ mpi_bv.set_block(bid, self.get_block(bid))
+ mpi_bv.broadcast_block_sizes()
+
+ return mpi_bv
+
+ # the following methods are not supported by blockvector
+
+ def argpartition(self, kth, axis=-1, kind='introselect', order=None):
+ BaseBlockVector.argpartition(self, kth, axis=axis, kind=kind, order=order)
+
+ def argsort(self, axis=-1, kind='quicksort', order=None):
+ BaseBlockVector.argsort(self, axis=axis, kind=kind, order=order)
+
+ def byteswap(self, inplace=False):
+ BaseBlockVector.byteswap(self, inplace=inplace)
+
+ def choose(self, choices, out=None, mode='raise'):
+ BaseBlockVector.choose(self, choices, out=out, mode=mode)
+
+ def diagonal(self, offset=0, axis1=0, axis2=1):
+ BaseBlockVector.diagonal(self, offset=offset, axis1=axis1, axis2=axis2)
+
+ def dump(self, file):
+ BaseBlockVector.dump(self, file)
+
+ def dumps(self):
+ BaseBlockVector.dumps(self)
+
+ def getfield(self, dtype, offset=0):
+ BaseBlockVector.getfield(self, dtype, offset=offset)
+
+ def item(self, *args):
+ BaseBlockVector.item(self, *args)
+
+ def itemset(self, *args):
+ BaseBlockVector.itemset(self, *args)
+
+ def newbyteorder(self, new_order='S'):
+ BaseBlockVector.newbyteorder(self, new_order=new_order)
+
+ def put(self, indices, values, mode='raise'):
+ BaseBlockVector.put(self, indices, values, mode=mode)
+
+ def partition(self, kth, axis=-1, kind='introselect', order=None):
+ BaseBlockVector.partition(self, kth, axis=axis, kind=kind, order=order)
+
+ def repeat(self, repeats, axis=None):
+ BaseBlockVector.repeat(self, repeats, axis=axis)
+
+ def reshape(self, shape, order='C'):
+ BaseBlockVector.reshape(self, shape, order=order)
+
+ def resize(self, new_shape, refcheck=True):
+ BaseBlockVector.resize(self, new_shape, refcheck=refcheck)
+
+ def searchsorted(self, v, side='left', sorter=None):
+ BaseBlockVector.searchsorted(self, v, side=side, sorter=sorter)
+
+ def setfield(self, val, dtype, offset=0):
+ BaseBlockVector.setfield(self, val, dtype, offset=offset)
+
+ def setflags(self, write=None, align=None, uic=None):
+ BaseBlockVector.setflags(self, write=write, align=align, uic=uic)
+
+ def sort(self, axis=-1, kind='quicksort', order=None):
+ BaseBlockVector.sort(self, axis=axis, kind=kind, order=order)
+
+ def squeeze(self, axis=None):
+ BaseBlockVector.squeeze(self, axis=axis)
+
+ def swapaxes(self, axis1, axis2):
+ BaseBlockVector.swapaxes(self, axis1, axis2)
+
+ def tobytes(self, order='C'):
+ BaseBlockVector.tobytes(self, order=order)
+
+ def take(self, indices, axis=None, out=None, mode='raise'):
+ BaseBlockVector.take(self, indices, axis=axis, out=out, mode=mode)
+
+ def trace(self, offset=0, axis1=0, axis2=1, dtype=None, out=None):
+ raise NotImplementedError('trace not implemented for BlockVector')
+
+ def transpose(*axes):
+ BaseBlockVector.transpose(*axes)
+
+ def tostring(order='C'):
+ BaseBlockVector.tostring(order=order)
diff --git a/pyomo/contrib/pynumero/sparse/coo.py b/pyomo/contrib/pynumero/sparse/coo.py
deleted file mode 100644
index b095fbc60ff..00000000000
--- a/pyomo/contrib/pynumero/sparse/coo.py
+++ /dev/null
@@ -1,80 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-
-from scipy.sparse import coo_matrix as scipy_coo_matrix
-
-import numpy as np
-
-
-__all__ = ['empty_matrix',
- 'diagonal_matrix']
-
-
-# this mimics an empty matrix
-class empty_matrix(scipy_coo_matrix):
-
- def __init__(self, nrows, ncols):
-
- """
-
- Parameters
- ----------
- nrows : int
- Number of rows of sparse matrix
- ncol : int
- Number of columns of sparse matrix
- """
-
- data = np.zeros(0)
- irows = np.zeros(0)
- jcols = np.zeros(0)
- arg1 = (data, (irows, jcols))
- super(empty_matrix, self).__init__(arg1, shape=(nrows, ncols), dtype=np.double, copy=False)
-
-
-class diagonal_matrix(scipy_coo_matrix):
-
- def __init__(self, values, eliminate_zeros=False):
- """
-
- Parameters
- ----------
- values : array-like
- vector with diagonal values
- """
- data = np.array(values, dtype=np.double)
- nrowcols = len(data)
- if eliminate_zeros:
- irows = np.nonzero(data)[0]
- jcols = irows
- data = data[irows]
- else:
- irows = np.arange(0, nrowcols)
- jcols = np.arange(0, nrowcols)
- arg1 = (data, (irows, jcols))
- super(diagonal_matrix, self).__init__(arg1, shape=(nrowcols, nrowcols), dtype=np.double, copy=False)
-
- def __repr__(self):
- return 'diagonal_matrix{}'.format(self.shape)
-
- def inv(self):
-
- """
- Returns inverse of diagonal matrix
-
- Returns
- -------
- diagonal_matrix
- """
- data = 1.0 / self.data
- return diagonal_matrix(data)
-
-
-
diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py
new file mode 100644
index 00000000000..954c0ba0411
--- /dev/null
+++ b/pyomo/contrib/pynumero/sparse/mpi_block_matrix.py
@@ -0,0 +1,1246 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+"""
+The pyomo.contrib.pynumero.sparse.block_matrix module includes methods that extend
+linear algebra operations in scipy for case of structured problems
+where linear algebra operations present an inherent block structure.
+This interface consider matrices of the form:
+
+m = [[m11, m12],[m21, m22], ..]
+
+where m_{i,j} are sparse matrices
+
+.. rubric:: Contents
+
+"""
+
+from .mpi_block_vector import MPIBlockVector
+from .mpi_block_vector import assert_block_structure as mpi_block_vector_assert_block_structure
+from .block_vector import BlockVector
+from .block_vector import assert_block_structure as block_vector_assert_block_structure
+from .block_matrix import BlockMatrix, NotFullyDefinedBlockMatrixError
+from .block_matrix import assert_block_structure as block_matrix_assert_block_structure
+from .base_block import BaseBlockMatrix
+from warnings import warn
+from mpi4py import MPI
+import numpy as np
+from scipy.sparse import coo_matrix
+import operator
+
+__all__ = ['MPIBlockMatrix']
+
+# Array classifiers
+SINGLE_OWNER = 1
+MULTIPLE_OWNER = 2
+ALL_OWN_IT = 0
+
+
+# ALL_OWNED = -1
+
+
+def assert_block_structure(mat):
+ if mat.has_undefined_row_sizes() or mat.has_undefined_col_sizes():
+ msg = 'Call MPIBlockMatrix.broadcast_block_sizes() first. '
+ raise NotFullyDefinedBlockMatrixError(msg)
+
+
+class MPIBlockMatrix(BaseBlockMatrix):
+ """
+ Parallel Structured Matrix interface
+
+ Attributes
+ ----------
+ _rank_owner: numpy.ndarray
+ 2D-array with processor ownership of each block. A block can be own by a
+ single processor or by all processors. Blocks own by all processors have
+ ownership -1. Blocks own by a single processor have ownership rank. where
+ rank=MPI.COMM_WORLD.Get_rank()
+ _mpiw: MPI communicator
+ A communicator from the MPI space. Typically MPI.COMM_WORLD
+ _block_matrix: BlockMatrix
+ Internal BlockMatrix. Blocks that belong to this processor are stored
+ in _block_matrix.
+ _owned_mask: numpy.ndarray bool
+ 2D-array that indicates if a block belongs to this processor. While
+ _rank_owner tells which processor(s) owns each block, _owned_mask tells
+ if a block is owned by this processor. Blocks that are owned by everyone
+ (i.e. ownership = -1) are True in _owned_mask
+ _unique_owned_mask: numpy.ndarray bool
+ 2D-array that indicates if a block belongs to this processor. While
+ _rank_owner tells which processor(s) owns each block, _unique_owned_mask tells
+ if a block is owned by this processor. Blocks that are owned by everyone
+ (i.e. ownership = -1) are False in _unique_owned_mask
+
+ Parameters
+ -------------------
+ nbrows : int
+ number of block-rows in the matrix
+ nbcols : int
+ number of block-columns in the matrix
+ rank_ownership: array_like
+ integer 2D array that specifies the rank of process
+ owner of each block in the matrix. For blocks that are
+ owned by all processes the rank is -1. Blocks that are
+ None should be owned by all processes.
+ mpi_comm : MPI communicator
+ """
+
+ def __init__(self,
+ nbrows,
+ nbcols,
+ rank_ownership,
+ mpi_comm):
+
+ shape = (nbrows, nbcols)
+ self._block_matrix = BlockMatrix(nbrows, nbcols)
+ self._mpiw = mpi_comm
+ self._rank_owner = np.zeros(shape, dtype=np.int64)
+ self._owned_mask = np.zeros(shape, dtype=bool)
+ self._unique_owned_mask = np.zeros(shape, dtype=bool)
+
+ rank = self._mpiw.Get_rank()
+
+ if isinstance(rank_ownership, list):
+ rank_ownership = np.asarray(rank_ownership, dtype=np.int64)
+ if not isinstance(rank_ownership, np.ndarray):
+ raise RuntimeError('rank_ownership must be a list of lists or a numpy array')
+ assert rank_ownership.ndim == 2, 'rank_ownership must be of size 2'
+
+ for i in range(nbrows):
+ for j in range(nbcols):
+ owner = rank_ownership[i, j]
+ assert owner < self._mpiw.Get_size(), \
+ 'rank owner out of range'
+ self._rank_owner[i, j] = owner
+ if rank == owner or owner < 0:
+ self._owned_mask[i, j] = True
+ if owner == rank:
+ self._unique_owned_mask[i, j] = True
+
+ # Note: this requires communication but is disabled when assertions
+ # are turned off
+ assert self._assert_correct_owners(), \
+ 'rank_owner must be the same in all processors'
+
+ # make some of the pointers unmutable
+ self._rank_owner.flags.writeable = False
+ self._owned_mask.flags.writeable = False
+ self._unique_owned_mask.flags.writeable = False
+
+ @property
+ def bshape(self):
+ """
+ Returns tuple with the block-shape of the matrix
+ """
+ return self._block_matrix.bshape
+
+ @property
+ def shape(self):
+ """
+ Returns tuple with total number of rows and columns
+ """
+ return self._block_matrix.shape
+
+ @property
+ def nnz(self):
+ """
+ Returns total number of nonzero values in this matrix
+ """
+ local_nnz = 0
+ rank = self._mpiw.Get_rank()
+ block_indices = self._unique_owned_mask if rank != 0 else self._owned_mask
+
+ # this is an easy and efficient way to loop though owned blocks
+ ii, jj = np.nonzero(block_indices)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ local_nnz += self._block_matrix.get_block(i, j).nnz
+
+ return self._mpiw.allreduce(local_nnz, op=MPI.SUM)
+
+ @property
+ def owned_blocks(self):
+ """
+ Returns list with inidices of blocks owned by this processor.
+ """
+ bm, bn = self.bshape
+ owned_blocks = []
+ for i in range(bm):
+ for j in range(bn):
+ if self._owned_mask[i, j]:
+ owned_blocks.append((i,j))
+ return owned_blocks
+
+ @property
+ def shared_blocks(self):
+ """
+ Returns list of 2-tuples with inidices of blocks shared by all processors
+ """
+ bm, bn = self.bshape
+ owned_blocks = []
+ for i in range(bm):
+ for j in range(bn):
+ if self._owned_mask[i, j] and self._rank_owner[i, j]<0:
+ owned_blocks.append((i,j))
+ return owned_blocks
+
+ @property
+ def rank_ownership(self):
+ """
+ Returns 2D array that specifies process rank that owns each blocks. If
+ a block is owned by all the ownership=-1.
+ """
+ return self._rank_owner
+
+ @property
+ def ownership_mask(self):
+ """
+ Returns boolean 2D-Array that indicates which blocks are owned by
+ this processor
+ """
+ return self._owned_mask
+
+ @property
+ def mpi_comm(self):
+ """Returns MPI communicator"""
+ return self._mpiw
+
+ def get_row_size(self, row):
+ return self._block_matrix.get_row_size(row)
+
+ def get_col_size(self, col):
+ return self._block_matrix.get_col_size(col)
+
+ def set_row_size(self, row, size):
+ self._block_matrix.set_row_size(row, size)
+
+ def set_col_size(self, col, size):
+ self._block_matrix.set_col_size(col, size)
+
+ def is_row_size_defined(self, row):
+ return self._block_matrix.is_row_size_defined(row)
+
+ def is_col_size_defined(self, col):
+ return self._block_matrix.is_col_size_defined(col)
+
+ @property
+ def T(self):
+ """
+ Transpose matrix
+ """
+ return self.transpose()
+
+ def dot(self, other):
+ """
+ Ordinary dot product
+ """
+ return self * other
+
+ def transpose(self, axes=None, copy=True):
+ """
+ Reverses the dimensions of the block matrix.
+
+ Parameters
+ ----------
+ axes: None, optional
+ This argument is in the signature solely for NumPy compatibility reasons. Do not pass in
+ anything except for the default value.
+ copy: bool
+ This argument is in the signature solely for scipy compatibility reasons. Do not pass in
+ anything except for the default value.
+
+ Returns
+ -------
+ MPIBlockMatrix with dimensions reversed
+ """
+ if axes is not None:
+ raise ValueError(("Sparse matrices do not support "
+ "an 'axes' parameter because swapping "
+ "dimensions is the only logical permutation."))
+ if not copy:
+ raise ValueError('MPIBlockMatrix only supports transpose with copy=True')
+
+ m = self.bshape[0]
+ n = self.bshape[1]
+ assert_block_structure(self)
+ result = MPIBlockMatrix(n, m, self._rank_owner.T, self._mpiw)
+ result._block_matrix = self._block_matrix.transpose()
+ return result
+
+ def tocoo(self):
+ """
+ Converts this matrix to coo_matrix format.
+
+ Returns
+ -------
+ coo_matrix
+
+ """
+ raise RuntimeError('Operation not supported by MPIBlockMatrix')
+
+ def tocsr(self):
+ """
+ Converts this matrix to csr format.
+
+ Returns
+ -------
+ csr_matrix
+
+ """
+ raise RuntimeError('Operation not supported by MPIBlockMatrix')
+
+ def tocsc(self):
+ """
+ Converts this matrix to csc format.
+
+ Returns
+ -------
+ csc_matrix
+
+ """
+ raise RuntimeError('Operation not supported by MPIBlockMatrix')
+
+ def tolil(self, copy=False):
+ BaseBlockMatrix.tolil(self, copy=copy)
+
+ def todia(self, copy=False):
+ BaseBlockMatrix.todia(self, copy=copy)
+
+ def tobsr(self, blocksize=None, copy=False):
+ BaseBlockMatrix.tobsr(self, blocksize=blocksize, copy=copy)
+
+ def coo_data(self):
+ raise RuntimeError('Operation not supported by MPIBlockMatrix')
+
+ def toarray(self):
+ """
+ Returns a dense ndarray representation of this matrix.
+
+ Returns
+ -------
+ arr : ndarray, 2-dimensional
+ An array with the same shape and containing the same data
+ represented by the block matrix.
+
+ """
+ raise RuntimeError('Operation not supported by MPIBlockMatrix')
+
+ def to_local_array(self):
+ """
+ This method is only for testing/debugging
+
+ Returns
+ -------
+ result: np.ndarray
+ """
+ local_result = self._block_matrix.copy_structure()
+ rank = self._mpiw.Get_rank()
+ block_indices = self._unique_owned_mask if rank != 0 else self._owned_mask
+
+ ii, jj = np.nonzero(block_indices)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ local_result.set_block(i, j, self.get_block(i, j))
+ local_result = local_result.toarray()
+ global_result = np.zeros(shape=local_result.shape, dtype=local_result.dtype)
+ self._mpiw.Allreduce(local_result, global_result)
+ return global_result
+
+ def is_empty_block(self, idx, jdx):
+ """
+ Indicates if a block is empty
+
+ Parameters
+ ----------
+ idx: int
+ block-row index
+ jdx: int
+ block-column index
+
+ Returns
+ -------
+ boolean
+
+ """
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ # Note: this requires communication
+ def broadcast_block_sizes(self):
+ """
+ Send sizes of all blocks to all processors. After this method is called
+ this MPIBlockMatrix knows it's dimensions of all rows and columns. This method
+ must be called before running any operations with the MPIBlockMatrix.
+ """
+ rank = self._mpiw.Get_rank()
+ num_processors = self._mpiw.Get_size()
+
+ local_row_data = np.zeros(self.bshape[0], dtype=np.int64)
+ local_col_data = np.zeros(self.bshape[1], dtype=np.int64)
+ local_row_data.fill(-1)
+ local_col_data.fill(-1)
+ for row_ndx in range(self.bshape[0]):
+ if self._block_matrix.is_row_size_defined(row_ndx):
+ local_row_data[row_ndx] = self._block_matrix.get_row_size(row_ndx)
+ for col_ndx in range(self.bshape[1]):
+ if self._block_matrix.is_col_size_defined(col_ndx):
+ local_col_data[col_ndx] = self._block_matrix.get_col_size(col_ndx)
+
+ send_data = np.concatenate([local_row_data, local_col_data])
+
+ receive_data = np.empty(num_processors * (self.bshape[0] + self.bshape[1]),
+ dtype=np.int64)
+ self._mpiw.Allgather(send_data, receive_data)
+
+ proc_dims = np.split(receive_data, num_processors)
+ m, n = self.bshape
+
+ brow_lengths = np.zeros(m, dtype=np.int64)
+ bcol_lengths = np.zeros(n, dtype=np.int64)
+
+ # check the rows
+ for i in range(m):
+ rows_length = set()
+ for k in range(num_processors):
+ row_sizes, col_sizes = np.split(proc_dims[k],
+ [self.bshape[0]])
+ rows_length.add(row_sizes[i])
+ if len(rows_length) > 2:
+ msg = 'Row {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ elif len(rows_length) == 2:
+ if -1 not in rows_length:
+ msg = 'Row {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ rows_length.remove(-1)
+ elif -1 in rows_length:
+ msg = 'The dimensions of block row {} were not defined in any process'.format(i)
+ raise NotFullyDefinedBlockMatrixError(msg)
+
+ # here rows_length must only have one element
+ brow_lengths[i] = rows_length.pop()
+
+ # check columns
+ for i in range(n):
+ cols_length = set()
+ for k in range(num_processors):
+ rows_sizes, col_sizes = np.split(proc_dims[k],
+ [self.bshape[0]])
+ cols_length.add(col_sizes[i])
+ if len(cols_length) > 2:
+ msg = 'Column {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ elif len(cols_length) == 2:
+ if -1 not in cols_length:
+ msg = 'Column {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ cols_length.remove(-1)
+ elif -1 in cols_length:
+ msg = 'The dimensions of block column {} were not defined in any process'.format(i)
+ raise NotFullyDefinedBlockMatrixError(msg)
+
+ # here rows_length must only have one element
+ bcol_lengths[i] = cols_length.pop()
+
+ for row_ndx, row_size in enumerate(brow_lengths):
+ self.set_row_size(row_ndx, row_size)
+ for col_ndx, col_size in enumerate(bcol_lengths):
+ self.set_col_size(col_ndx, col_size)
+
+ def row_block_sizes(self, copy=True):
+ """
+ Returns array with row-block sizes
+
+ Parameters
+ ----------
+ copy: bool
+ If False, then the internal array which stores the row block sizes will be returned without being copied.
+ Setting copy to False is risky and should only be done with extreme care.
+
+ Returns
+ -------
+ numpy.ndarray
+
+ """
+ assert_block_structure(self)
+ return self._block_matrix.row_block_sizes(copy=copy)
+
+ def col_block_sizes(self, copy=True):
+ """
+ Returns array with col-block sizes
+
+ Parameters
+ ----------
+ copy: bool
+ If False, then the internal array which stores the column block sizes will be returned without being copied.
+ Setting copy to False is risky and should only be done with extreme care.
+
+ Returns
+ -------
+ numpy.ndarray
+ """
+ assert_block_structure(self)
+ return self._block_matrix.col_block_sizes(copy=copy)
+
+ def block_shapes(self):
+ """
+ Returns list with shapes of blocks in this BlockMatrix
+
+ Notes
+ -----
+ For an MPIBlockMatrix with 2 block-rows and 2 block-cols
+ this method returns [[Block_00.shape, Block_01.shape],[Block_10.shape, Block_11.shape]]
+
+ Returns
+ -------
+ list
+
+ """
+ assert_block_structure(self)
+ return self._block_matrix.block_shapes()
+
+ def has_undefined_row_sizes(self):
+ """
+ Indicates if the matrix has block-rows with undefined dimensions
+
+ Returns
+ -------
+ bool
+
+ """
+ return self._block_matrix.has_undefined_row_sizes()
+
+ def has_undefined_col_sizes(self):
+ """
+ Indicates if the matrix has block-columns with undefined dimensions
+
+ Returns
+ -------
+ bool
+
+ """
+ return self._block_matrix.has_undefined_col_sizes()
+
+ def reset_bcol(self, jdx):
+ """
+ Resets all blocks in selected column to None (0 nonzero entries)
+
+ Parameters
+ ----------
+ jdx: integer
+ column index to be reset
+
+ Returns
+ -------
+ None
+
+ """
+ self._block_matrix.reset_bcol(jdx)
+
+ def reset_brow(self, idx):
+ """
+ Resets all blocks in selected row to None (0 nonzero entries)
+
+ Parameters
+ ----------
+ idx: integer
+ row index to be reset
+
+ Returns
+ -------
+ None
+
+ """
+ self._block_matrix.reset_brow(idx)
+
+ def copy(self):
+ """
+ Makes a copy of this MPIBlockMatrix
+
+ Returns
+ -------
+ MPIBlockMatrix
+
+ """
+ m, n = self.bshape
+ result = MPIBlockMatrix(m, n, self._rank_owner, self._mpiw)
+ result._block_matrix = self._block_matrix.copy()
+ return result
+
+ def copy_structure(self):
+ """
+ Makes a copy of the structure of this MPIBlockMatrix. This proivides a
+ light-weighted copy of each block in this MPIBlockMatrix. The blocks in the
+ resulting matrix have the same shape as in the original matrices but not
+ the same number of nonzeros.
+
+ Returns
+ -------
+ MPIBlockMatrix
+
+ """
+ m, n = self.bshape
+ result = MPIBlockMatrix(m, n, self._rank_owner, self._mpiw)
+ result._block_matrix = self._block_matrix.copy_structure()
+ return result
+
+ # ToDo: need support for copy from and copy to
+
+ # Note: this requires communication
+ def _assert_correct_owners(self, root=0):
+
+ rank = self._mpiw.Get_rank()
+ num_processors = self._mpiw.Get_size()
+
+ if num_processors == 1:
+ return True
+
+ local_owners = self._rank_owner.flatten()
+ flat_size = self.bshape[0] * self.bshape[1]
+ receive_data = None
+ if rank == root:
+ receive_data = np.empty(flat_size * num_processors, dtype=np.int64)
+ self._mpiw.Gather(local_owners, receive_data, root=root)
+
+ if rank == root:
+ owners_in_processor = np.split(receive_data, num_processors)
+ root_rank_owners = owners_in_processor[root]
+ for i in range(flat_size):
+ for k in range(num_processors):
+ if k != root:
+ if owners_in_processor[k][i] != root_rank_owners[i]:
+ return False
+ return True
+
+ def __repr__(self):
+ return '{}{}'.format(self.__class__.__name__, self.bshape)
+
+ def __str__(self):
+ msg = '{}{}\n'.format(self.__class__.__name__, self.bshape)
+ for idx in range(self.bshape[0]):
+ for jdx in range(self.bshape[1]):
+ rank = self._rank_owner[idx, jdx] if self._rank_owner[idx, jdx] >= 0 else 'A'
+ msg += '({}, {}): Owned by processor{}\n'.format(idx, jdx, rank)
+ return msg
+
+ def pprint(self, root=0):
+ """Prints MPIBlockMatrix in pretty format"""
+ assert_block_structure(self)
+ msg = self.__repr__() + '\n'
+ num_processors = self._mpiw.Get_size()
+ # figure out which ones are none
+ local_mask = self._block_matrix._block_mask.flatten()
+ receive_data = np.empty(num_processors * local_mask.size,
+ dtype=np.bool)
+
+ self._mpiw.Allgather(local_mask, receive_data)
+ all_masks = np.split(receive_data, num_processors)
+ m, n = self.bshape
+ matrix_maks = [mask.reshape(m, n) for mask in all_masks]
+
+ global_mask = np.zeros((m, n), dtype=np.bool)
+ for k in range(num_processors):
+ for idx in range(m):
+ for jdx in range(n):
+ global_mask[idx, jdx] += matrix_maks[k][idx, jdx]
+
+ for idx in range(m):
+ for jdx in range(n):
+ rank = self._rank_owner[idx, jdx] if self._rank_owner[idx, jdx] >= 0 else 'A'
+ row_size = self.get_row_size(idx)
+ col_size = self.get_col_size(jdx)
+ is_none = '' if global_mask[idx, jdx] else '*'
+ repn = 'Owned by {} Shape({},{}){}'.format(rank,
+ row_size,
+ col_size,
+ is_none)
+ msg += '({}, {}): {}\n'.format(idx, jdx, repn)
+ if self._mpiw.Get_rank() == root:
+ print(msg)
+
+ def get_block(self, row, col):
+ block = self._block_matrix.get_block(row, col)
+ owner = self._rank_owner[row, col]
+ rank = self._mpiw.Get_rank()
+ assert owner == rank or \
+ owner < 0, \
+ 'Block {} not owned by processor {}'.format((row, col), rank)
+
+ return block
+
+ def set_block(self, row, col, value):
+ assert row >= 0 and \
+ col >= 0, 'Indices must be positive'
+
+ assert row < self.bshape[0] and \
+ col < self.bshape[1], 'Indices out of range'
+
+ owner = self._rank_owner[row, col]
+ rank = self._mpiw.Get_rank()
+ assert owner == rank or \
+ owner < 0, \
+ 'Block {} not owned by processor {}'.format((row, col), rank)
+
+ self._block_matrix.set_block(row, col, value)
+
+ def __getitem__(self, item):
+ raise NotImplementedError('MPIBlockMatrix does not support __getitem__.')
+
+ def __setitem__(self, item, val):
+ raise NotImplementedError('MPIBlockMatrix does not support __setitem__.')
+
+ def __add__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+ result = self.copy_structure()
+
+ rank = self._mpiw.Get_rank()
+
+ if isinstance(other, MPIBlockMatrix):
+ assert_block_structure(other)
+
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockMatrices must be distributed in same processors'
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ mat1 = self.get_block(i, j)
+ mat2 = other.get_block(i, j)
+ if mat1 is not None and mat2 is not None:
+ result.set_block(i, j, mat1 + mat2)
+ elif mat1 is not None and mat2 is None:
+ result.set_block(i, j, mat1.copy())
+ elif mat1 is None and mat2 is not None:
+ result.set_block(i, j, mat2.copy())
+ else:
+ result.set_block(i, j, None)
+ return result
+
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __radd__(self, other): # other + self
+ return self.__add__(other)
+
+ def __sub__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+ result = self.copy_structure()
+ rank = self._mpiw.Get_rank()
+
+ if isinstance(other, MPIBlockMatrix):
+ assert_block_structure(other)
+
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockMatrices must be distributed in same processors'
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ mat1 = self.get_block(i, j)
+ mat2 = other.get_block(i, j)
+ if mat1 is not None and mat2 is not None:
+ result.set_block(i, j, mat1 - mat2)
+ elif mat1 is not None and mat2 is None:
+ result.set_block(i, j, mat1.copy())
+ elif mat1 is None and mat2 is not None:
+ result.set_block(i, j, -mat2)
+ else:
+ result.set_block(i, j, None)
+ return result
+
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __rsub__(self, other):
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def _block_vector_multiply(self, other):
+ """
+ Parameters
+ ----------
+ other: BlockVector
+
+ Returns
+ -------
+ result: BlockVector
+ """
+ block_vector_assert_block_structure(other)
+ assert self.bshape[1] == other.nblocks, 'Dimension mismatch'
+ local_result = BlockVector(self.bshape[0])
+ for row_ndx in range(self.bshape[0]):
+ local_result.set_block(row_ndx, np.zeros(self.get_row_size(row_ndx)))
+ rank = self._mpiw.Get_rank()
+ if rank == 0:
+ block_indices = self._owned_mask
+ else:
+ block_indices = self._unique_owned_mask
+ for row_ndx, col_ndx in zip(*np.nonzero(block_indices)):
+ if self.get_block(row_ndx, col_ndx) is not None:
+ res_blk = local_result.get_block(row_ndx)
+ _tmp = self.get_block(row_ndx, col_ndx) * other.get_block(col_ndx)
+ res_blk = _tmp + res_blk
+ local_result.set_block(row_ndx, res_blk)
+ flat_local = local_result.flatten()
+ flat_global = np.zeros(flat_local.size)
+ self._mpiw.Allreduce(flat_local, flat_global)
+ global_result = local_result.copy_structure()
+ global_result.copyfrom(flat_global)
+ return global_result
+
+ def __mul__(self, other):
+ """
+ When doing A*B with numpy arrays, element-by-element multiplication is done. However, when doing
+ A*B with scipy sparse matrices, a matrix-matrix dot product is performed. We are following the
+ scipy sparse matrix API.
+ """
+
+ assert_block_structure(self)
+
+ if isinstance(other, MPIBlockVector):
+ global_other = other.make_local_copy()
+ result = self._block_vector_multiply(global_other)
+ return result
+ elif isinstance(other, BlockVector):
+ return self._block_vector_multiply(other)
+ elif isinstance(other, np.ndarray):
+ block_other = BlockVector(nblocks=self.bshape[1])
+ for ndx in range(self.bshape[1]):
+ block_other[ndx] = np.zeros(self.get_col_size(ndx), dtype=other.dtype)
+ block_other.copyfrom(other)
+ return self._block_vector_multiply(block_other).flatten()
+ elif np.isscalar(other):
+ result = self.copy_structure()
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, self.get_block(i, j) * other)
+ return result
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __rmul__(self, other):
+ """
+ When doing A*B with numpy arrays, element-by-element multiplication is done. However, when doing
+ A*B with scipy sparse matrices, a matrix-matrix dot product is performed. We are following the
+ scipy sparse matrix API.
+ """
+
+ assert_block_structure(self)
+ m, n = self.bshape
+ result = self.copy_structure()
+
+ if np.isscalar(other):
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, self.get_block(i, j) * other)
+ return result
+
+ if isinstance(other, MPIBlockVector):
+ raise NotImplementedError('Vector-Matrix multiply not supported yet')
+ if isinstance(other, BlockVector):
+ raise NotImplementedError('Vector-Matrix multiply not supported yet')
+
+ if isinstance(other, MPIBlockMatrix):
+ raise NotImplementedError('Matrix-Matrix multiply not supported yet')
+ if isinstance(other, BlockMatrix):
+ raise NotImplementedError('Matrix-Matrix multiply not supported yet')
+
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __pow__(self, other):
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __truediv__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+ result = self.copy_structure()
+
+ if np.isscalar(other):
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, self.get_block(i, j) / other)
+ return result
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __floordiv__(self, other):
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __iadd__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+
+ if isinstance(other, MPIBlockMatrix):
+ assert_block_structure(other)
+
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockMatrices must be distributed in same processors'
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ mat1 = self.get_block(i, j)
+ mat2 = other.get_block(i, j)
+ if mat1 is not None and mat2 is not None:
+ mat1 += mat2
+ self.set_block(i, j, mat1)
+ elif mat1 is None and mat2 is not None:
+ self.set_block(i, j, mat2.copy())
+ return self
+
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __isub__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+
+ if isinstance(other, MPIBlockMatrix):
+ assert_block_structure(other)
+
+ assert other.bshape == self.bshape, \
+ 'dimensions mismatch {} != {}'.format(self.bshape, other.bshape)
+
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockMatrices must be distributed in same processors'
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ mat1 = self.get_block(i, j)
+ mat2 = other.get_block(i, j)
+ if mat1 is not None and mat2 is not None:
+ blk = self.get_block(i, j)
+ blk -= mat2
+ self.set_block(i, j, blk)
+ elif mat1 is None and mat2 is not None:
+ self.set_block(i, j, -mat2)
+ return self
+
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __imul__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+
+ if np.isscalar(other):
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ blk = self.get_block(i, j)
+ blk *= other
+ self.set_block(i, j, blk)
+ return self
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __itruediv__(self, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+
+ if np.isscalar(other):
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ blk = self.get_block(i, j)
+ blk /= other
+ self.set_block(i, j, blk)
+ return self
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __div__(self, other):
+ return self.__truediv__(other)
+
+ def __rdiv__(self, other):
+ return self.__rtruediv__(other)
+
+ def __idiv__(self, other):
+ return self.__itruediv__(other)
+
+ def __neg__(self):
+ assert_block_structure(self)
+ result = self.copy_structure()
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, -self.get_block(i, j))
+ return result
+
+ def __abs__(self):
+ assert_block_structure(self)
+ result = self.copy_structure()
+
+ ii, jj = np.nonzero(self._owned_mask)
+ for i, j in zip(ii, jj):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, abs(self.get_block(i, j)))
+ return result
+
+ def _comparison_helper(self, operation, other):
+ assert_block_structure(self)
+ m, n = self.bshape
+ result = self.copy_structure()
+
+ if isinstance(other, MPIBlockMatrix):
+ assert_block_structure(other)
+ assert other.bshape == self.bshape, 'dimension mismatch {} != {}'.format(self.bshape, other.bshape)
+ assert np.array_equal(self.rank_ownership, other.rank_ownership), 'MPIBlockMatrices must be distributed in ' \
+ 'the same processors'
+
+ for i, j in zip(*np.nonzero(self.ownership_mask)):
+ mat1 = self.get_block(i, j)
+ mat2 = other.get_block(i, j)
+
+ if mat1 is not None and mat2 is not None:
+ result.set_block(i, j, operation(mat1, mat2))
+ else:
+ nrows = self.get_row_size(i)
+ ncols = self.get_col_size(j)
+ mat = coo_matrix((nrows, ncols))
+ if mat1 is not None:
+ result.set_block(i, j, operation(mat1, mat))
+ elif mat2 is not None:
+ result.set_block(i, j, operation(mat, mat2))
+ else:
+ result.set_block(i, j, operation(mat, mat))
+ return result
+ elif np.isscalar(other):
+ for i, j in zip(*np.nonzero(self.ownership_mask)):
+ if not self._block_matrix.is_empty_block(i, j):
+ result.set_block(i, j, operation(self.get_block(i, j), other))
+ else:
+ nrows = self.get_row_size(i)
+ ncols = self.get_col_size(j)
+ mat = coo_matrix((nrows, ncols))
+ result.set_block(i, j, operation(mat, other))
+ return result
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockMatrix')
+
+ def __eq__(self, other):
+ return self._comparison_helper(operation=operator.eq, other=other)
+
+ def __ne__(self, other):
+ return self._comparison_helper(operation=operator.ne, other=other)
+
+ def __le__(self, other):
+ return self._comparison_helper(operation=operator.le, other=other)
+
+ def __lt__(self, other):
+ return self._comparison_helper(operation=operator.lt, other=other)
+
+ def __ge__(self, other):
+ return self._comparison_helper(operation=operator.ge, other=other)
+
+ def __gt__(self, other):
+ return self._comparison_helper(operation=operator.gt, other=other)
+
+ def get_block_column_index(self, index):
+ """
+ Returns block-column idx from matrix column index.
+
+ Parameters
+ ----------
+ index: int
+ Column index
+
+ Returns
+ -------
+ int
+
+ """
+ assert_block_structure(self)
+
+ bm, bn = self.bshape
+ # get cummulative sum of block sizes
+ cum = self.col_block_sizes(copy=False).cumsum()
+ assert index >= 0, 'index out of bounds'
+ assert index < cum[bn-1], 'index out of bounds'
+
+ # exits if only has one column
+ if bn <= 1:
+ return 0
+
+ ge = cum >= index
+ # find first entry that is greater or equal
+ block_index = np.argmax(ge)
+
+ if cum[block_index] == index:
+ return block_index + 1
+ return block_index
+
+ def get_block_row_index(self, index):
+ """
+ Returns block-row idx from matrix row index.
+
+ Parameters
+ ----------
+ index: int
+ Row index
+
+ Returns
+ -------
+ int
+
+ """
+ assert_block_structure(self)
+
+ bm, bn = self.bshape
+ # get cummulative sum of block sizes
+ cum = self.row_block_sizes(copy=False).cumsum()
+ assert index >= 0, 'index out of bounds'
+ assert index < cum[bm-1], 'index out of bounds'
+
+ # exits if only has one column
+ if bm <= 1:
+ return 0
+
+ ge = cum >= index
+ # find first entry that is greater or equal
+ block_index = np.argmax(ge)
+
+ if cum[block_index] == index:
+ return block_index + 1
+ return block_index
+
+ def getcol(self, j):
+ """
+ Returns MPIBlockVector of column j
+
+ Parameters
+ ----------
+ j: int
+ Column index
+
+ Returns
+ -------
+ pyomo.contrib.pynumero.sparse MPIBlockVector
+
+ """
+ # get size of the blocks to input in the vector
+ # this implicitly checks that sizes have been broadcasted beforehand
+ block_sizes = self.row_block_sizes()
+ # get block column index
+ bcol = self.get_block_column_index(j)
+ # get rank ownership
+ col_ownership = []
+ bm, bn = self.bshape
+ for i in range(bm):
+ col_ownership.append(self._rank_owner[i, bcol])
+ # create vector
+ bv = MPIBlockVector(bm,
+ col_ownership,
+ self._mpiw)
+
+ # compute offset columns
+ offset = 0
+ if bcol > 0:
+ cum_sum = self.col_block_sizes(copy=False).cumsum()
+ offset = cum_sum[bcol-1]
+
+ # populate vector
+ rank = self._mpiw.Get_rank()
+ for row_bid, owner in enumerate(col_ownership):
+ if rank == owner or owner < 0:
+ sub_matrix = self._block_matrix.get_block(row_bid, bcol)
+ if self._block_matrix.is_empty_block(row_bid, bcol):
+ v = np.zeros(self.get_row_size(row_bid))
+ elif isinstance(sub_matrix, BaseBlockMatrix):
+ v = sub_matrix.getcol(j-offset)
+ else:
+ # if it is sparse matrix transform array to vector
+ v = sub_matrix.getcol(j-offset).toarray().flatten()
+ bv.set_block(row_bid, v)
+ return bv
+
+ def getrow(self, i):
+ """
+ Returns MPIBlockVector of column i
+
+ Parameters
+ ----------
+ i: int
+ Row index
+
+ Returns
+ -------
+ pyomo.contrib.pynumero.sparse MPIBlockVector
+
+ """
+ # get size of the blocks to input in the vector
+ # this implicitly checks that sizes have been broadcasted beforehand
+ block_sizes = self.col_block_sizes()
+ # get block column index
+ brow = self.get_block_row_index(i)
+ # get rank ownership
+ row_ownership = []
+ bm, bn = self.bshape
+ for j in range(bn):
+ row_ownership.append(self._rank_owner[brow, j])
+ # create vector
+ bv = MPIBlockVector(bn,
+ row_ownership,
+ self._mpiw)
+ # compute offset columns
+ offset = 0
+ if brow > 0:
+ cum_sum = self.row_block_sizes(copy=False).cumsum()
+ offset = cum_sum[brow-1]
+ # populate vector
+ rank = self._mpiw.Get_rank()
+ for col_bid, owner in enumerate(row_ownership):
+ if rank == owner or owner<0:
+ sub_matrix = self._block_matrix.get_block(brow, col_bid)
+ if self._block_matrix.is_empty_block(brow, col_bid):
+ v = np.zeros(self.get_col_size(col_bid))
+ elif isinstance(sub_matrix, BaseBlockMatrix):
+ v = sub_matrix.getrow(i-offset)
+ else:
+ # if it is sparse matrix transform array to vector
+ v = sub_matrix.getrow(i-offset).toarray().flatten()
+ bv.set_block(col_bid, v)
+ return bv
+
+ @staticmethod
+ def fromBlockMatrix(block_matrix, rank_ownership, mpi_comm):
+ """
+ Creates a parallel MPIBlockMatrix from blockmatrix
+
+ Parameters
+ ----------
+ block_matrix: BlockMatrix
+ The block matrix to use to create the MPIBlockMatrix
+ rank_ownership: array_like
+ 2D-array with processor ownership of each block. A block can be own by a
+ single processor or by all processors. Blocks own by all processors have
+ ownership -1. Blocks own by a single processor have ownership rank. where
+ rank=MPI.COMM_WORLD.Get_rank()
+ mpi_comm: MPI communicator
+ An MPI communicator. Tyically MPI.COMM_WORLD
+ """
+ block_matrix_assert_block_structure(block_matrix)
+
+ # create mpi matrix
+ bm, bn = block_matrix.bshape
+ mat = MPIBlockMatrix(bm,
+ bn,
+ rank_ownership,
+ mpi_comm)
+
+ # populate matrix
+ for i, j in mat.owned_blocks:
+ mat.set_block(i, j, block_matrix.get_block(i, j))
+
+ mat.broadcast_block_sizes()
+ return mat
diff --git a/pyomo/contrib/pynumero/sparse/mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py
new file mode 100644
index 00000000000..532055263ae
--- /dev/null
+++ b/pyomo/contrib/pynumero/sparse/mpi_block_vector.py
@@ -0,0 +1,1320 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+
+from pyomo.contrib.pynumero.sparse import BlockVector
+from .base_block import BaseBlockVector
+from .block_vector import NotFullyDefinedBlockVectorError
+from .block_vector import assert_block_structure as block_vector_assert_block_structure
+from mpi4py import MPI
+import numpy as np
+import copy as cp
+import operator
+
+__all__ = ['MPIBlockVector']
+
+
+def assert_block_structure(vec):
+ if vec.has_none:
+ msg = 'Call MPIBlockVector.broadcast_block_sizes() first.'
+ raise NotFullyDefinedBlockVectorError(msg)
+
+
+class MPIBlockVector(np.ndarray, BaseBlockVector):
+ """
+ Parallel structured vector interface. This interface can be used to
+ perform parallel operations on vectors composed by vectors. The main
+ idea is to allocate vectors in different processors and make the corresponding
+ parallel calls when necessary.
+
+ Attributes
+ ----------
+ _rank_owner: numpy.ndarray
+ 1D-array with processor ownership of each block. A block can be own by a
+ single processor or by all processors. Blocks own by all processors have
+ ownership -1. Blocks own by a single processor have ownership rank. where
+ rank=MPI.COMM_WORLD.Get_rank()
+ _mpiw: MPI communicator
+ A communicator from the MPI space. Typically MPI.COMM_WORLD
+ _block_vector: BlockVector
+ Internal BlockVector. Blocks that belong to this processor are stored
+ in _block_vector. Blocks that do not belong to this proceesor are empty
+ and store as numpy.zeros(0)
+ _owned_mask: numpy.ndarray bool
+ 1D-array that indicates if a block belongs to this processor. While
+ _rank_owner tells which processor(s) owns each block, _owned_mask tells
+ if a block is owned by this processor. Blocks that are owned by everyone
+ (i.e. ownership = -1) are True in _owned_mask
+ _owned_blocks: numpy.ndarray
+ 1D-array with block indices owned by this processor. This includes blocks
+ with ownership -1.
+ _unique_owned_blocks: numpy.ndarray
+ 1D-array with block indices owned only by this processor. This does not
+ include blocks with ownership -1.
+ _brow_lengths: numpy.ndarray
+ 1D-Array of size nblocks that specifies the length of each entry
+ in the MPIBlockVector. This is the same accross all processors.
+ _undefined_brows: set
+ A set of block indices for which the blocks are still None (i.e., the dimensions
+ have not yet ben set). Operations with BlockVectors require all entries to be
+ different than None.
+
+ Notes
+ ------
+ This is the parallel implementation of pyomo.contrib.pynumero.sparse.BlockVector
+
+ Parameters
+ -------------------
+ nblocks: int
+ number of blocks contained in the block vector
+ rank_owner: array_like
+ Array_like of size nblocks. Each entry defines ownership of each block.
+ There are two types of ownership. Block that are owned by all processor,
+ and blocks owned by a single processor. If a block is owned by all
+ processors then its ownership is -1. Otherwise, if a block is owned by
+ a single processor, then its ownership is equal to the rank of the
+ processor.
+ mpi_com: MPI communicator
+ An MPI communicator. Tyically MPI.COMM_WORLD
+ """
+
+ def __new__(cls, nblocks, rank_owner, mpi_comm):
+
+ assert isinstance(nblocks, int)
+ assert len(rank_owner) == nblocks
+
+ blocks = [None for i in range(nblocks)]
+ arr = np.asarray(blocks, dtype='object')
+ obj = arr.view(cls)
+
+ obj._rank_owner = np.array([i for i in rank_owner])
+ obj._mpiw = mpi_comm
+ obj._block_vector = BlockVector(nblocks)
+
+ rank = obj._mpiw.Get_rank()
+ comm_size = obj._mpiw.Get_size()
+ assert np.all(obj._rank_owner < comm_size)
+
+ # Determine which blocks are owned by this processor
+ obj._owned_blocks = list()
+ obj._unique_owned_blocks = list()
+ obj._owned_mask = np.zeros(nblocks, dtype=bool)
+ for i, owner in enumerate(obj._rank_owner):
+ if owner == rank or owner < 0:
+ obj._owned_blocks.append(i)
+ obj._owned_mask[i] = True
+ if owner == rank:
+ obj._unique_owned_blocks.append(i)
+
+ # containers that facilitate looping
+ obj._owned_blocks = np.array(obj._owned_blocks)
+ obj._unique_owned_blocks = np.array(obj._unique_owned_blocks)
+ obj._brow_lengths = np.empty(nblocks, dtype=np.float64)
+ obj._brow_lengths.fill(np.nan)
+ obj._undefined_brows = set(range(nblocks))
+
+ # make some pointers unmutable. These arrays don't change after
+ # MPIBlockVector has been created
+ obj._rank_owner.flags.writeable = False
+ obj._owned_blocks.flags.writeable = False
+ obj._owned_mask.flags.writeable = False
+ obj._unique_owned_blocks.flags.writeable = False
+
+ return obj
+
+ def __init__(self, nblocks, rank_owner, mpi_comm):
+ # Note: this requires communication but is disabled when assertions
+ # are turned off
+ assert self._assert_correct_owners(), \
+ 'rank_owner must be the same in all processors'
+
+ def __array_prepare__(self, out_arr, context=None):
+ return super(MPIBlockVector, self).__array_prepare__(self, out_arr, context)
+
+ def __array_wrap__(self, out_arr, context=None):
+ return super(MPIBlockVector, self).__array_wrap__(self, out_arr, context)
+
+ def __array_ufunc__(self, ufunc, method, *inputs, **kwargs):
+ """Runs ufuncs speciallizations to MPIBlockVector"""
+ # functions that take one vector
+ unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil,
+ np.floor, np.tan, np.arctan, np.arcsin,
+ np.arccos, np.sinh, np.cosh, np.abs,
+ np.tanh, np.arccosh, np.arcsinh, np.arctanh,
+ np.fabs, np.sqrt, np.log, np.log2, np.absolute,
+ np.isfinite, np.isinf, np.isnan, np.log1p,
+ np.logical_not, np.expm1, np.exp2, np.sign,
+ np.rint, np.square, np.positive, np.negative,
+ np.rad2deg, np.deg2rad, np.conjugate, np.reciprocal,
+ np.signbit]
+ # functions that take two vectors
+ binary_funcs = [np.add, np.multiply, np.divide, np.subtract,
+ np.greater, np.greater_equal, np.less, np.less_equal,
+ np.not_equal, np.maximum, np.minimum, np.fmax,
+ np.fmin, np.equal, np.logical_and,
+ np.logical_or, np.logical_xor, np.logaddexp,
+ np.logaddexp2, np.remainder, np.heaviside,
+ np.hypot]
+
+ outputs = kwargs.pop('out', None)
+ if outputs is not None:
+ raise NotImplementedError(str(ufunc) + ' cannot be used with MPIBlockVector if the out keyword argument is given.')
+
+ if ufunc in unary_funcs:
+ results = self._unary_operation(ufunc, method, *inputs, **kwargs)
+ return results
+ elif ufunc in binary_funcs:
+ results = self._binary_operation(ufunc, method, *inputs, **kwargs)
+ return results
+ else:
+ raise NotImplementedError(str(ufunc) + "not supported for MPIBlockVector")
+
+ def _unary_operation(self, ufunc, method, *args, **kwargs):
+ """Run recursion to perform unary_funcs on MPIBlockVector"""
+ # ToDo: deal with out
+ x = args[0]
+
+ if isinstance(x, MPIBlockVector):
+ rank = self._mpiw.Get_rank()
+ v = x.copy_structure()
+ for i in self._owned_blocks:
+ _args = [x.get_block(i)] + [args[j] for j in range(1, len(args))]
+ v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs))
+ return v
+ elif isinstance(x, BlockVector):
+ v = BlockVector(x.nblocks)
+ for i in range(x.nblocks):
+ _args = [x.get_block(i)] + [args[j] for j in range(1, len(args))]
+ v.set_block(i, self._unary_operation(ufunc, method, *_args, **kwargs))
+ return v
+ elif type(x) == np.ndarray:
+ return super(MPIBlockVector, self).__array_ufunc__(ufunc, method,
+ *args, **kwargs)
+ else:
+ raise NotImplementedError()
+
+ def _binary_operation(self, ufunc, method, *args, **kwargs):
+ """Run recursion to perform binary_funcs on MPIBlockVector"""
+ # ToDo: deal with out
+ x1 = args[0]
+ x2 = args[1]
+ if isinstance(x1, MPIBlockVector) and isinstance(x2, MPIBlockVector):
+
+ msg = 'BlockVectors must be distributed in same processors'
+ assert np.array_equal(x1._rank_owner, x2._rank_owner), msg
+ assert x1._mpiw == x2._mpiw, 'Need to have same communicator'
+
+ res = x1.copy_structure()
+ for i in x1._owned_blocks:
+ _args = [x1.get_block(i)] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
+ return res
+ elif isinstance(x1, BlockVector) and isinstance(x2, MPIBlockVector):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(x1, MPIBlockVector) and isinstance(x2, BlockVector):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(x1, MPIBlockVector) and np.isscalar(x2):
+ res = x1.copy_structure()
+ for i in x1._owned_blocks:
+ _args = [x1.get_block(i)] + [x2] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
+ return res
+ elif isinstance(x2, MPIBlockVector) and np.isscalar(x1):
+ res = x2.copy_structure()
+ for i in x2._owned_blocks:
+ _args = [x1] + [x2.get_block(i)] + [args[j] for j in range(2, len(args))]
+ res.set_block(i, self._binary_operation(ufunc, method, *_args, **kwargs))
+ return res
+ elif isinstance(x1, MPIBlockVector) and type(x2)==np.ndarray:
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(x2, MPIBlockVector) and type(x1)==np.ndarray:
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(x1, np.ndarray) and isinstance(x2, np.ndarray):
+ # this will take care of blockvector and ndarrays
+ return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs)
+ elif (type(x1)==BlockVector or np.isscalar(x1)) and (type(x2)==BlockVector or np.isscalar(x2)):
+ return self._block_vector.__array_ufunc__(ufunc, method, *args, **kwargs)
+ elif (type(x1)==np.ndarray or np.isscalar(x1)) and (type(x2)==np.ndarray or np.isscalar(x2)):
+ return super(MPIBlockVector, self).__array_ufunc__(ufunc, method,
+ *args, **kwargs)
+ else:
+ raise NotImplementedError()
+
+ @property
+ def nblocks(self):
+ """
+ Returns the number of blocks.
+ """
+ return self._block_vector.nblocks
+
+ @property
+ def bshape(self):
+ """
+ Returns the number of blocks in this MPIBlockVector in a tuple.
+ """
+ return self.nblocks,
+
+ @property
+ def shape(self):
+ """
+ Returns total number of elements in the MPIBlockVector
+ """
+ assert_block_structure(self)
+ return np.sum(self._brow_lengths),
+
+ @property
+ def size(self):
+ """
+ Returns total number of elements in this MPIBlockVector
+ """
+ assert_block_structure(self)
+ return np.sum(self._brow_lengths)
+
+ @property
+ def ndim(self):
+ """
+ Returns dimension of this MPIBlockVector
+ """
+ return 1
+
+ @property
+ def has_none(self):
+ """
+ Returns True if block vector has none entry
+ """
+ return len(self._undefined_brows) != 0
+
+ @property
+ def owned_blocks(self):
+ """
+ Returns list with inidices of blocks owned by this processor.
+ """
+ return self._owned_blocks
+
+ @property
+ def shared_blocks(self):
+ """
+ Returns list with inidices of blocks shared by all processors
+ """
+ return np.array([i for i in range(self.nblocks) if self._rank_owner[i] < 0])
+
+ @property
+ def rank_ownership(self):
+ """
+ Returns 1D-Array with processor ranks that own each block. The ownership
+ of blocks that are owned by all processors is -1.
+ """
+ return self._rank_owner
+
+ @property
+ def ownership_mask(self):
+ """
+ Returns boolean 1D-Array that indicates which blocks are owned by
+ this processor
+ """
+ return self._owned_mask
+
+ @property
+ def mpi_comm(self):
+ """Returns MPI communicator"""
+ return self._mpiw
+
+ def block_sizes(self, copy=True):
+ """
+ Returns 1D-Array with sizes of individual blocks in this MPIBlockVector
+ """
+ assert_block_structure(self)
+ if copy:
+ return self._brow_lengths.copy()
+ return self._brow_lengths
+
+ def get_block_size(self, ndx):
+ if ndx in self._undefined_brows:
+ raise NotFullyDefinedBlockVectorError('The dimensions of the requested block are not defined.')
+ return self._brow_lengths[ndx]
+
+ def _set_block_size(self, ndx, size):
+ if ndx in self._undefined_brows:
+ self._undefined_brows.remove(ndx)
+ self._brow_lengths[ndx] = size
+ if len(self._undefined_brows) == 0:
+ self._brow_lengths = np.asarray(self._brow_lengths, dtype=np.int64)
+ else:
+ if self._brow_lengths[ndx] != size:
+ raise ValueError('Incompatible dimensions for block {ndx}; '
+ 'got {got}; expected {exp}'.format(ndx=ndx,
+ got=size,
+ exp=self._brow_lengths[ndx]))
+
+ # Note: this operation requires communication
+ def broadcast_block_sizes(self):
+ """
+ Send sizes of all blocks to all processors. After this method is called
+ this MPIBlockVector knows it's dimensions across all blocks. This method
+ must be called before running any operations with the MPIBlockVector.
+ """
+ rank = self._mpiw.Get_rank()
+ num_processors = self._mpiw.Get_size()
+
+ local_length_data = np.empty(self.nblocks, dtype=np.int64)
+ local_length_data.fill(-1)
+ for ndx in self.owned_blocks:
+ if ndx in self._undefined_brows:
+ raise NotFullyDefinedBlockVectorError('Block {ndx} is owned by rank {rank}, '
+ 'but the dimensions for block {ndx} '
+ 'have not yet been specified in rank {rank}. '
+ 'Please specify all owned blocks.'.format(ndx=ndx,
+ rank=rank))
+ local_length_data[ndx] = self.get_block_size(ndx)
+ receive_data = np.empty(num_processors * self.nblocks, dtype=np.int64)
+ self._mpiw.Allgather(local_length_data, receive_data)
+
+ proc_dims = np.split(receive_data, num_processors)
+
+ for i in range(self.nblocks):
+ block_length = set()
+ for k in range(num_processors):
+ processor_sizes = proc_dims[k]
+ block_length.add(processor_sizes[i])
+ if len(block_length) > 2:
+ msg = 'Block {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ elif len(block_length) == 2:
+ if -1 not in block_length:
+ msg = 'Block {} has more than one dimension accross processors'.format(i)
+ raise RuntimeError(msg)
+ block_length.remove(-1)
+ elif -1 in block_length:
+ msg = 'The dimension of block {} was not specified in any process'.format(i)
+
+ # here block_length must only have one element
+ self._set_block_size(i, block_length.pop())
+
+ # Note: this requires communication but is only run in __new__
+ def _assert_correct_owners(self, root=0):
+
+ rank = self._mpiw.Get_rank()
+ num_processors = self._mpiw.Get_size()
+
+ if num_processors == 1:
+ return True
+
+ local_owners = self._rank_owner.copy()
+ receive_data = None
+ if rank == root:
+ receive_data = np.empty(self.nblocks * num_processors,
+ dtype=np.int64)
+
+ self._mpiw.Gather(local_owners, receive_data, root=root)
+
+ if rank == root:
+ owners_in_processor = np.split(receive_data, num_processors)
+ root_rank_owners = owners_in_processor[root]
+ for i in range(self.nblocks):
+ for k in range(num_processors):
+ if k != root:
+ if owners_in_processor[k][i] != root_rank_owners[i]:
+ return False
+ return True
+
+ def all(self, axis=None, out=None, keepdims=False):
+ """
+ Returns True if all elements evaluate to True.
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ local = 1
+ for i in self._owned_blocks:
+ local *= self._block_vector.get_block(i).all()
+
+ return bool(self._mpiw.allreduce(local, op=MPI.PROD))
+
+ def any(self, axis=None, out=None, keepdims=False):
+ """
+ Returns True if all elements evaluate to True.
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ local = 0
+ for i in self._owned_blocks:
+ local += self._block_vector.get_block(i).any()
+
+ return bool(self._mpiw.allreduce(local, op=MPI.SUM))
+
+ def min(self, axis=None, out=None, keepdims=False):
+ """
+ Returns the smallest value stored in the vector
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ local_min = np.inf
+ for i in self._owned_blocks:
+ lmin = self._block_vector.get_block(i).min()
+ if lmin <= local_min:
+ local_min = lmin
+ return self._mpiw.allreduce(local_min, op=MPI.MIN)
+
+ def max(self, axis=None, out=None, keepdims=False):
+ """
+ Returns the largest value stored in this MPIBlockVector
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ local_max = -np.inf
+ for i in self._owned_blocks:
+ lmax = self._block_vector.get_block(i).max()
+ if lmax >= local_max:
+ local_max = lmax
+ return self._mpiw.allreduce(local_max, op=MPI.MAX)
+
+ def sum(self, axis=None, dtype=None, out=None, keepdims=False):
+ """
+ Returns the sum of all entries in this MPIBlockVector
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ rank = self._mpiw.Get_rank()
+ indices = self._unique_owned_blocks if rank != 0 else self._owned_blocks
+
+ local_sum = 0.0
+ for i in indices:
+ local_sum += self._block_vector.get_block(i).sum(axis=axis, dtype=dtype)
+
+ return self._mpiw.allreduce(local_sum, op=MPI.SUM)
+
+ def prod(self, axis=None, dtype=None, out=None, keepdims=False):
+ """
+ Returns the product of all entries in this MPIBlockVector
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ rank = self._mpiw.Get_rank()
+ indices = self._unique_owned_blocks if rank != 0 else self._owned_blocks
+
+ local_prod = 1.0
+ for i in indices:
+ local_prod *= self._block_vector.get_block(i).prod(axis=axis, dtype=dtype)
+ return self._mpiw.allreduce(local_prod, op=MPI.PROD)
+
+ def mean(self, axis=None, dtype=None, out=None, keepdims=False):
+ """
+ Returns the average of all entries in this MPIBlockVector
+ """
+ return self.sum(out=out)/self.size
+
+ def conj(self):
+ """
+ Complex-conjugate all elements.
+ """
+ assert_block_structure(self)
+ result = self.copy_structure()
+ for i in self._owned_blocks:
+ result.set_block(i, self.get_block(i).conj())
+ return result
+
+ def conjugate(self):
+ """
+ Complex-conjugate all elements.
+ """
+ return self.conj()
+
+ def nonzero(self):
+ """
+ Returns the indices of the elements that are non-zero.
+ """
+ result = MPIBlockVector(nblocks=self.nblocks, rank_owner=self.rank_ownership, mpi_comm=self.mpi_comm)
+ assert_block_structure(self)
+ for i in self._owned_blocks:
+ result.set_block(i, self._block_vector.get_block(i).nonzero()[0])
+ result.broadcast_block_sizes()
+ return (result,)
+
+ def round(self, decimals=0, out=None):
+ """
+ Return MPIBlockVector with each element rounded to the given number of decimals
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ result = self.copy_structure()
+ for i in self._owned_blocks:
+ result.set_block(i, self._block_vector.get_block(i).round(decimals=decimals))
+ return result
+
+ def clip(self, min=None, max=None, out=None):
+ """
+ Return MPIBlockVector whose values are limited to [min, max].
+ One of max or min must be given.
+
+ Parameters
+ ----------
+ min: scalar_like, optional
+ Minimum value. If None, clipping is not performed on lower interval edge.
+ max: scalar_like, optional
+ Maximum value. If None, clipping is not performed on upper interval edge.
+
+ Returns
+ -------
+ MPIBlockVector
+
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ result = self.copy_structure()
+ for i in self._owned_blocks:
+ result.set_block(i, self._block_vector.get_block(i).clip(min=min, max=max))
+ return result
+
+ def compress(self, condition, axis=None, out=None):
+ """
+ Return selected slices of each subblock.
+
+ Parameters
+ ----------
+ condition: MPIBlockVector that selects which entries to return.
+ Determines to select (evaluate True in condition)
+
+ Returns
+ -------
+ MPIBlockVector
+
+ """
+ assert out is None, 'Out keyword not supported'
+ assert_block_structure(self)
+ result = MPIBlockVector(nblocks=self.nblocks, rank_owner=self.rank_ownership, mpi_comm=self.mpi_comm)
+ if isinstance(condition, MPIBlockVector):
+ # Note: do not need to check same size? this is checked implicitly
+ msg = 'BlockVectors must be distributed in same processors'
+ assert np.array_equal(self._rank_owner, condition._rank_owner), msg
+ assert self._mpiw == condition._mpiw, 'Need to have same communicator'
+ for i in self._owned_blocks:
+ result.set_block(i, self.get_block(i).compress(condition.get_block(i)))
+ result.broadcast_block_sizes()
+ return result
+ if isinstance(condition, BlockVector):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(condition, np.ndarray):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def copyfrom(self, other):
+ """
+ Copy entries of other into this MPIBlockVector
+
+ Parameters
+ ----------
+ other: MPIBlockVector or BlockVector
+
+ Returns
+ -------
+ None
+ """
+ if isinstance(other, MPIBlockVector):
+ assert_block_structure(other)
+ msg = 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks)
+ assert self.nblocks == other.nblocks, msg
+ msg = 'BlockVectors must be distributed in same processors'
+ assert np.array_equal(self._rank_owner, other.rank_ownership), msg
+ assert self._mpiw == other._mpiw, 'Need to have same communicator'
+
+ for i in self._owned_blocks:
+ self.set_block(i, other.get_block(i).copy())
+
+ elif isinstance(other, BlockVector):
+ block_vector_assert_block_structure(other)
+ msg = 'Number of blocks mismatch {} != {}'.format(self.nblocks, other.nblocks)
+ assert self.nblocks == other.nblocks, msg
+ for i in self._owned_blocks:
+ self.set_block(i, other.get_block(i).copy())
+ elif isinstance(other, np.ndarray):
+ assert_block_structure(self)
+ assert self.shape == other.shape, 'Dimension mismatch {} != {}'.format(self.shape, other.shape)
+ offset = 0
+ for idx in range(self.nblocks):
+ if self._owned_mask[idx]:
+ subarray = other[offset: offset + self.get_block_size(idx)]
+ if isinstance(self.get_block(idx), BlockVector):
+ self.get_block(idx).copyfrom(subarray)
+ else:
+ np.copyto(self.get_block(idx), subarray)
+ offset += self.get_block_size(idx)
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def copyto(self, other):
+ """
+ Copy entries of this MPIBlockVector into other
+
+ Parameters
+ ----------
+ other: MPIBlockVector or BlockVector
+
+ Returns
+ -------
+ None
+
+ """
+ if isinstance(other, MPIBlockVector):
+ other.copyfrom(self)
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def set_blocks(self, blocks):
+ """
+ Assigns vectors in blocks
+
+ Parameters
+ ----------
+ blocks: list
+ list of vectors
+
+ Returns
+ -------
+ None
+ """
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def clone(self, value=None, copy=True):
+ """
+ Returns a copy of this MPIBlockVector
+
+ Parameters
+ ----------
+ value: scalar, optional
+ all entries of the cloned vector are set to this value
+ copy: bool, optinal
+ if set to true makes a deepcopy of each block in this vector. default False
+
+ Returns
+ -------
+ MPIBlockVector
+ """
+ result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm)
+ result._block_vector = self._block_vector.clone(copy=copy)
+ result._brow_lengths = self._brow_lengths.copy()
+ result._undefined_brows = set(self._undefined_brows)
+ if value is not None:
+ result.fill(value)
+ return result
+
+ def copy(self, order='C'):
+ """
+ Returns a copy of the MPIBlockVector
+ """
+ result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm)
+ result._block_vector = self._block_vector.copy(order=order)
+ result._brow_lengths = self._brow_lengths.copy()
+ result._undefined_brows = set(self._undefined_brows)
+ return result
+
+ def copy_structure(self):
+ """
+ Returns a copy of the MPIBlockVector structure filled with zeros
+ """
+ result = MPIBlockVector(self.nblocks, self.rank_ownership, self.mpi_comm)
+ result._block_vector = self._block_vector.copy_structure()
+ result._brow_lengths = self._brow_lengths.copy()
+ result._undefined_brows = set(self._undefined_brows)
+ return result
+
+ def fill(self, value):
+ """
+ Fills the MPIBLockVector with a scalar value.
+
+ Parameters
+ ----------
+ value : scalar
+ All elements in the vector will be assigned this value
+
+ Returns
+ -------
+ None
+
+ """
+ assert_block_structure(self)
+ for idx in self.owned_blocks:
+ self.get_block(idx).fill(value)
+
+ def dot(self, other, out=None):
+ """
+ Returns dot product
+
+ Parameters
+ ----------
+ other : MPIBlockVector
+
+ Returns
+ -------
+ float
+
+ """
+ assert_block_structure(self)
+ assert out is None
+ if isinstance(other, MPIBlockVector):
+ assert_block_structure(other)
+ msg = 'BlockVectors must be distributed in same processors'
+ assert np.array_equal(self.rank_ownership, other.rank_ownership), msg
+ assert self.mpi_comm == other.mpi_comm, 'Need to have same communicator'
+
+ rank = self._mpiw.Get_rank()
+ indices = self._unique_owned_blocks if rank != 0 else self._owned_blocks
+ local_dot_prod = 0.0
+ for i in indices:
+ local_dot_prod += self._block_vector.get_block(i).dot(other.get_block(i))
+
+ return self._mpiw.allreduce(local_dot_prod, op=MPI.SUM)
+ elif isinstance(other, BlockVector):
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks)
+ return self.dot(other.toMPIBlockVector(self.rank_ownership, self.mpi_comm))
+ elif isinstance(other, np.ndarray):
+ assert self.shape == other.shape, 'Dimension mismatch: {} != {}'.format(self.shape, other.shape)
+ other_bv = self.copy_structure()
+ other_bv.copyfrom(other)
+ return self.dot(other_bv)
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ @staticmethod
+ def _serialize_structure(block_vector):
+ """
+ Parameters
+ ----------
+ block_vector: BlockVector
+
+ Returns
+ -------
+ list
+ """
+ serialized_structure = list()
+ for ndx in range(block_vector.nblocks):
+ blk = block_vector.get_block(ndx)
+ if isinstance(blk, BlockVector):
+ serialized_structure.append(-1)
+ serialized_structure.append(blk.nblocks)
+ serialized_structure.extend(MPIBlockVector._serialize_structure(blk))
+ elif isinstance(blk, MPIBlockVector):
+ raise NotImplementedError('Operation not supported for MPIBlockVectors containing other MPIBlockVectors')
+ elif isinstance(blk, np.ndarray):
+ serialized_structure.append(-2)
+ serialized_structure.append(blk.size)
+ else:
+ raise NotImplementedError('Unrecognized input.')
+ return serialized_structure
+
+ @staticmethod
+ def _create_from_serialized_structure(serialized_structure, structure_ndx, result):
+ """
+ Parameters
+ ----------
+ serialized_structure: np.ndarray
+ structure_ndx: int
+ result: BlockVector
+
+ Returns
+ -------
+ structure_ndx: int
+ """
+ for ndx in range(result.nblocks):
+ if serialized_structure[structure_ndx] == -1:
+ structure_ndx += 1
+ block = BlockVector(serialized_structure[structure_ndx])
+ structure_ndx += 1
+ structure_ndx = MPIBlockVector._create_from_serialized_structure(serialized_structure,
+ structure_ndx,
+ block)
+ result.set_block(ndx, block)
+ elif serialized_structure[structure_ndx] == -2:
+ structure_ndx += 1
+ result.set_block(ndx, np.zeros(serialized_structure[structure_ndx]))
+ structure_ndx += 1
+ else:
+ raise ValueError('Unrecognized structure')
+ return structure_ndx
+
+ def make_local_structure_copy(self):
+ """
+ Creates a BlockVector with the same structure as the MPIBlockVector
+
+ Returns
+ -------
+ BlockVector
+ """
+ """
+ We do this by serializing the structure, then gathering it.
+ To serialize the structure, we use an array. The first number indicates if the first block is a numpy array
+ or a BlockVector. We use -1 to indicate a BlockVector and -2 to indicate a numpy array. If the block is a
+ BlockVector, then the next number is a positive integer specifying the number of blocks in the block vector.
+ If the block is a numpy array, then the next number is a positive integer specifying the size of the array.
+ After the number of blocks in a BlockVector is specified, we follow the same procedure to specify the
+ structure of that BlockVector.
+ """
+ assert_block_structure(self)
+ serialized_structure_by_block = dict()
+ length_per_block = np.zeros(self.nblocks, dtype=np.int64)
+ rank = self._mpiw.Get_rank()
+ if rank == 0:
+ block_indices = self._owned_blocks
+ else:
+ block_indices = self._unique_owned_blocks
+ for ndx in block_indices:
+ blk = self.get_block(ndx)
+ blk_structure = list()
+ if isinstance(blk, BlockVector):
+ blk_structure.append(-1)
+ blk_structure.append(blk.nblocks)
+ blk_structure.extend(self._serialize_structure(blk))
+ elif isinstance(blk, MPIBlockVector):
+ raise NotImplementedError('Operation not supported for MPIBlockVectors containing other MPIBlockVectors')
+ elif isinstance(blk, np.ndarray):
+ blk_structure.append(-2)
+ blk_structure.append(blk.size)
+ else:
+ raise NotImplementedError('Unrecognized input.')
+ length_per_block[ndx] = len(blk_structure)
+ serialized_structure_by_block[ndx] = np.asarray(blk_structure, dtype=np.int64)
+
+ global_length_per_block = np.zeros(self.nblocks, dtype=np.int64)
+ self._mpiw.Allreduce(length_per_block, global_length_per_block)
+ local_serialized_structure = np.zeros(global_length_per_block.sum(), dtype=np.int64)
+
+ offset = 0
+ block_indices_set = set(block_indices)
+ for ndx in range(self.nblocks):
+ if ndx in block_indices_set:
+ local_serialized_structure[offset: offset+global_length_per_block[ndx]] = serialized_structure_by_block[ndx]
+ offset += global_length_per_block[ndx]
+ global_serialized_structure = np.zeros(global_length_per_block.sum(), dtype=np.int64)
+ self._mpiw.Allreduce(local_serialized_structure, global_serialized_structure)
+
+ result = BlockVector(self.nblocks)
+ structure_ndx = 0
+ self._create_from_serialized_structure(global_serialized_structure, structure_ndx, result)
+
+ return result
+
+ def make_local_copy(self):
+ """
+ Copies the MPIBlockVector into a BlockVector
+
+ Returns
+ -------
+ BlockVector
+ """
+ assert_block_structure(self)
+ result = self.make_local_structure_copy()
+
+ local_data = np.zeros(self.size)
+ global_data = np.zeros(self.size)
+
+ offset = 0
+ rank = self._mpiw.Get_rank()
+ if rank == 0:
+ block_indices = set(self._owned_blocks)
+ else:
+ block_indices = set(self._unique_owned_blocks)
+ for ndx in range(self.nblocks):
+ if ndx in block_indices:
+ blk = self.get_block(ndx)
+ if isinstance(blk, BlockVector):
+ local_data[offset: offset + self.get_block_size(ndx)] = blk.flatten()
+ elif isinstance(blk, np.ndarray):
+ local_data[offset: offset + self.get_block_size(ndx)] = blk
+ else:
+ raise ValueError('Unrecognized block type')
+ offset += self.get_block_size(ndx)
+
+ self._mpiw.Allreduce(local_data, global_data)
+ result.copyfrom(global_data)
+
+ return result
+
+ def _binary_operation_helper(self, other, operation):
+ assert_block_structure(self)
+ result = self.copy_structure()
+ if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector):
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks)
+ if isinstance(other, MPIBlockVector):
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockVectors must be distributed in same processors'
+ assert self._mpiw == other._mpiw, 'Need to have same communicator'
+ for i in self._owned_blocks:
+ result.set_block(i, operation(self.get_block(i), other.get_block(i)))
+ return result
+ elif isinstance(other, np.ndarray):
+ _tmp = self.copy_structure()
+ _tmp.copyfrom(other)
+ return self._binary_operation_helper(_tmp, operation)
+ elif np.isscalar(other):
+ for i in self._owned_blocks:
+ result.set_block(i, operation(self.get_block(i), other))
+ return result
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def _reverse_binary_operation_helper(self, other, operation):
+ assert_block_structure(self)
+ result = self.copy_structure()
+ if isinstance(other, BlockVector):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(other, np.ndarray):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif np.isscalar(other):
+ for i in self._owned_blocks:
+ result.set_block(i, operation(other, self.get_block(i)))
+ return result
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def _inplace_binary_operation_helper(self, other, operation):
+ assert_block_structure(self)
+ if isinstance(other, MPIBlockVector) or isinstance(other, BlockVector):
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks)
+ if isinstance(other, MPIBlockVector):
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockVectors must be distributed in same processors'
+ assert self._mpiw == other._mpiw, 'Need to have same communicator'
+ assert_block_structure(other)
+ else:
+ block_vector_assert_block_structure(other)
+
+ for i in self._owned_blocks:
+ blk = self.get_block(i)
+ operation(blk, other.get_block(i))
+ self.set_block(i, blk)
+ return self
+ elif isinstance(other, np.ndarray):
+ _tmp = self.copy_structure()
+ _tmp.copyfrom(other)
+ return self._inplace_binary_operation_helper(_tmp, operation)
+ elif np.isscalar(other):
+ for i in self._owned_blocks:
+ blk = self.get_block(i)
+ operation(blk, other)
+ self.set_block(i, blk)
+ return self
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def __add__(self, other):
+ return self._binary_operation_helper(other, operator.add)
+
+ def __radd__(self, other):
+ return self.__add__(other)
+
+ def __sub__(self, other):
+ return self._binary_operation_helper(other, operator.sub)
+
+ def __rsub__(self, other):
+ return self._reverse_binary_operation_helper(other, operator.sub)
+
+ def __mul__(self, other):
+ return self._binary_operation_helper(other, operator.mul)
+
+ def __rmul__(self, other):
+ return self.__mul__(other)
+
+ def __truediv__(self, other):
+ return self._binary_operation_helper(other, operator.truediv)
+
+ def __rtruediv__(self, other):
+ return self._reverse_binary_operation_helper(other, operator.truediv)
+
+ def __floordiv__(self, other):
+ return self._binary_operation_helper(other, operator.floordiv)
+
+ def __rfloordiv__(self, other):
+ return self._reverse_binary_operation_helper(other, operator.floordiv)
+
+ def __neg__(self):
+ assert_block_structure(self)
+ result = self.copy_structure()
+ for ndx in self._owned_blocks:
+ result.set_block(ndx, -self.get_block(ndx))
+ return result
+
+ def __iadd__(self, other):
+ return self._inplace_binary_operation_helper(other, operator.iadd)
+
+ def __isub__(self, other):
+ return self._inplace_binary_operation_helper(other, operator.isub)
+
+ def __imul__(self, other):
+ return self._inplace_binary_operation_helper(other, operator.imul)
+
+ def __itruediv__(self, other):
+ return self._inplace_binary_operation_helper(other, operator.itruediv)
+
+ def __div__(self, other):
+ return self.__truediv__(other)
+
+ def __rdiv__(self, other):
+ return self.__rtruediv__(other)
+
+ def __idiv__(self, other):
+ return self.__itruediv__(other)
+
+ def _comparison_helper(self, other, operation):
+ assert_block_structure(self)
+ result = self.copy_structure()
+ if isinstance(other, MPIBlockVector):
+ assert_block_structure(other)
+ assert self.nblocks == other.nblocks, \
+ 'Number of blocks mismatch: {} != {}'.format(self.nblocks, other.nblocks)
+ assert np.array_equal(self._rank_owner, other._rank_owner), \
+ 'MPIBlockVectors must be distributed in same processors'
+ assert self._mpiw == other._mpiw, 'Need to have same communicator'
+
+ for i in self._owned_blocks:
+ result.set_block(i, operation(self.get_block(i), other.get_block(i)))
+ return result
+ elif isinstance(other, BlockVector):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif isinstance(other, np.ndarray):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+ elif np.isscalar(other):
+ for i in self._owned_blocks:
+ result.set_block(i, operation(self.get_block(i), other))
+ return result
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def __le__(self, other):
+ return self._comparison_helper(other, operator.le)
+
+ def __lt__(self, other):
+ return self._comparison_helper(other, operator.lt)
+
+ def __ge__(self, other):
+ return self._comparison_helper(other, operator.ge)
+
+ def __gt__(self, other):
+ return self._comparison_helper(other, operator.gt)
+
+ def __eq__(self, other):
+ return self._comparison_helper(other, operator.eq)
+
+ def __ne__(self, other):
+ return self._comparison_helper(other, operator.ne)
+
+ def __contains__(self, item):
+ other = item
+ assert_block_structure(self)
+ if np.isscalar(other):
+ contains = False
+ for i in self._owned_blocks:
+ if other in self.get_block(i):
+ contains = True
+ return bool(self._mpiw.allreduce(contains, op=MPI.SUM))
+ else:
+ raise NotImplementedError('Operation not supported by MPIBlockVector')
+
+ def get_block(self, key):
+ owner = self._rank_owner[key]
+ rank = self._mpiw.Get_rank()
+ assert owner == rank or owner < 0, 'Block {} not own by processor {}'.format(key, rank)
+ return self._block_vector.get_block(key)
+
+ def set_block(self, key, value):
+ owner = self._rank_owner[key]
+ rank = self._mpiw.Get_rank()
+ assert owner == rank or owner < 0, \
+ 'Block {} not owned by processor {}'.format(key, rank)
+
+ self._block_vector.set_block(key, value)
+ self._set_block_size(key, value.size)
+
+ def _has_equal_structure(self, other):
+ if not (isinstance(other, MPIBlockVector) or isinstance(other, BlockVector)):
+ return False
+ if self.nblocks != other.nblocks:
+ return False
+ if isinstance(other, MPIBlockVector):
+ if (self.owned_blocks != other.owned_blocks).any():
+ return False
+ for ndx in self.owned_blocks:
+ block1 = self.get_block(ndx)
+ block2 = other.get_block(ndx)
+ if isinstance(block1, BlockVector):
+ if not isinstance(block2, BlockVector):
+ return False
+ if not block1._has_equal_structure(block2):
+ return False
+ elif isinstance(block2, BlockVector):
+ return False
+ return True
+
+ def __getitem__(self, item):
+ if not self._has_equal_structure(item):
+ raise ValueError('MIPBlockVector.__getitem__ only accepts slices in the form of MPIBlockVectors of the same structure')
+ res = self.copy_structure()
+ for ndx in self.owned_blocks:
+ block = self.get_block(ndx)
+ res.set_block(ndx, block[item.get_block(ndx)])
+
+ def __setitem__(self, key, value):
+ if not (self._has_equal_structure(key) and (self._has_equal_structure(value) or np.isscalar(value))):
+ raise ValueError(
+ 'MPIBlockVector.__setitem__ only accepts slices in the form of MPIBlockVectors of the same structure')
+ if np.isscalar(value):
+ for ndx in self.owned_blocks:
+ block = self.get_block(ndx)
+ block[key.get_block(ndx)] = value
+ else:
+ for ndx in self.owned_blocks:
+ block = self.get_block(ndx)
+ block[key.get_block(ndx)] = value.get_block(ndx)
+
+ def __str__(self):
+ msg = '{}{}:\n'.format(self.__class__.__name__, self.bshape)
+ for idx in range(self.nblocks):
+ msg += '{}: Owned by processor {}\n'.format(idx, self._rank_owner[idx])
+
+ return msg
+
+ def __repr__(self):
+ return '{}{}'.format(self.__class__.__name__, self.bshape)
+
+ def pprint(self, root=0):
+ """Prints BlockVector in pretty format"""
+ assert_block_structure(self)
+ msg = self.__repr__() + '\n'
+ num_processors = self._mpiw.Get_size()
+ local_mask = self._owned_mask.flatten()
+ receive_data = np.empty(num_processors * self.nblocks,
+ dtype=np.bool)
+ self._mpiw.Allgather(local_mask, receive_data)
+ processor_to_mask = np.split(receive_data, num_processors)
+
+ global_mask = np.zeros(self.nblocks, dtype=np.bool)
+
+ for bid in range(self.nblocks):
+ owner = self._rank_owner[bid]
+ if owner >= 0:
+ global_mask[bid] = processor_to_mask[owner][bid]
+ else:
+ # checks only the mask of one of them since all must have the same
+ global_mask[bid] = processor_to_mask[0][bid]
+
+ disp_owner = self._rank_owner[bid] if self._rank_owner[bid] >= 0 else 'All'
+ is_none = '' if global_mask[bid] else 'None'
+ repn = 'Owned by {} Shape({},){}'.format(disp_owner,
+ self._brow_lengths[bid],
+ is_none)
+ msg += '{}: {}\n'.format(bid, repn)
+ if self._mpiw.Get_rank() == root:
+ print(msg)
+
+ def __len__(self):
+ return self.nblocks
+
+ def __iter__(self):
+ raise NotImplementedError('Not supported by MPIBlockVector')
+
+ def std(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def var(self, axis=None, dtype=None, out=None, ddof=0, keepdims=False):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def cumprod(self, axis=None, dtype=None, out=None):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def cumsum(self, axis=None, dtype=None, out=None):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def tolist(self):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def flatten(self, order='C'):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def ravel(self, order='C'):
+ raise RuntimeError('Operation not supported by MPIBlockVector')
+
+ def argpartition(self, kth, axis=-1, kind='introselect', order=None):
+ BaseBlockVector.argpartition(self, kth, axis=axis, kind=kind, order=order)
+
+ def argsort(self, axis=-1, kind='quicksort', order=None):
+ BaseBlockVector.argsort(self, axis=axis, kind=kind, order=order)
+
+ def byteswap(self, inplace=False):
+ BaseBlockVector.byteswap(self, inplace=inplace)
+
+ def choose(self, choices, out=None, mode='raise'):
+ BaseBlockVector.choose(self, choices, out=out, mode=mode)
+
+ def diagonal(self, offset=0, axis1=0, axis2=1):
+ BaseBlockVector.diagonal(self, offset=offset, axis1=axis1, axis2=axis2)
+
+ def dump(self, file):
+ BaseBlockVector.dump(self, file)
+
+ def dumps(self):
+ BaseBlockVector.dumps(self)
+
+ def getfield(self, dtype, offset=0):
+ BaseBlockVector.getfield(self, dtype, offset=offset)
+
+ def item(self, *args):
+ BaseBlockVector.item(self, *args)
+
+ def itemset(self, *args):
+ BaseBlockVector.itemset(self, *args)
+
+ def newbyteorder(self, new_order='S'):
+ BaseBlockVector.newbyteorder(self, new_order=new_order)
+
+ def put(self, indices, values, mode='raise'):
+ BaseBlockVector.put(self, indices, values, mode=mode)
+
+ def partition(self, kth, axis=-1, kind='introselect', order=None):
+ BaseBlockVector.partition(self, kth, axis=axis, kind=kind, order=order)
+
+ def repeat(self, repeats, axis=None):
+ BaseBlockVector.repeat(self, repeats, axis=axis)
+
+ def reshape(self, shape, order='C'):
+ BaseBlockVector.reshape(self, shape, order=order)
+
+ def resize(self, new_shape, refcheck=True):
+ BaseBlockVector.resize(self, new_shape, refcheck=refcheck)
+
+ def searchsorted(self, v, side='left', sorter=None):
+ BaseBlockVector.searchsorted(self, v, side=side, sorter=sorter)
+
+ def setfield(self, val, dtype, offset=0):
+ BaseBlockVector.setfield(self, val, dtype, offset=offset)
+
+ def setflags(self, write=None, align=None, uic=None):
+ BaseBlockVector.setflags(self, write=write, align=align, uic=uic)
+
+ def sort(self, axis=-1, kind='quicksort', order=None):
+ BaseBlockVector.sort(self, axis=axis, kind=kind, order=order)
+
+ def squeeze(self, axis=None):
+ BaseBlockVector.squeeze(self, axis=axis)
+
+ def swapaxes(self, axis1, axis2):
+ BaseBlockVector.swapaxes(self, axis1, axis2)
+
+ def tobytes(self, order='C'):
+ BaseBlockVector.tobytes(self, order=order)
+
+ def argmax(self, axis=None, out=None):
+ BaseBlockVector.argmax(self, axis=axis, out=out)
+
+ def argmin(self, axis=None, out=None):
+ BaseBlockVector.argmax(self, axis=axis, out=out)
+
+ def take(self, indices, axis=None, out=None, mode='raise'):
+ BaseBlockVector.take(self, indices, axis=axis, out=out, mode=mode)
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py
index 5c9340c748b..580e172475a 100644
--- a/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py
+++ b/pyomo/contrib/pynumero/sparse/tests/test_block_matrix.py
@@ -9,37 +9,44 @@
# ___________________________________________________________________________
import pyutilib.th as unittest
-from .. import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_sparse as sp, scipy_available
+)
if not (numpy_available and scipy_available):
- raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
+ raise unittest.SkipTest(
+ "Pynumero needs scipy and numpy to run BlockMatrix tests")
from scipy.sparse import coo_matrix, bmat
-import numpy as np
from pyomo.contrib.pynumero.sparse import (BlockMatrix,
- BlockSymMatrix,
- BlockVector)
+ BlockVector,
+ NotFullyDefinedBlockMatrixError)
+import warnings
class TestBlockMatrix(unittest.TestCase):
def setUp(self):
row = np.array([0, 3, 1, 2, 3, 0])
col = np.array([0, 0, 1, 2, 3, 3])
- data = np.array([2, 1, 3, 4, 5, 1])
+ data = np.array([2., 1, 3, 4, 5, 1])
m = coo_matrix((data, (row, col)), shape=(4, 4))
self.block_m = m
bm = BlockMatrix(2, 2)
bm.name = 'basic_matrix'
- bm[0, 0] = m
- bm[1, 1] = m
- bm[0, 1] = m
+ bm.set_block(0, 0, m.copy())
+ bm.set_block(1, 1, m.copy())
+ bm.set_block(0, 1, m.copy())
self.basic_m = bm
+ self.dense = np.zeros((8, 8))
+ self.dense[0:4, 0:4] = m.toarray()
+ self.dense[0:4, 4:8] = m.toarray()
+ self.dense[4:8, 4:8] = m.toarray()
self.composed_m = BlockMatrix(2, 2)
- self.composed_m[0, 0] = self.block_m
- self.composed_m[1, 1] = self.basic_m
+ self.composed_m.set_block(0, 0, self.block_m.copy())
+ self.composed_m.set_block(1, 1, self.basic_m.copy())
def test_name(self):
self.assertEqual(self.basic_m.name, 'basic_matrix')
@@ -54,7 +61,6 @@ def test_shape(self):
self.assertEqual(self.basic_m.shape, shape)
def test_tocoo(self):
-
block = self.block_m
m = self.basic_m
scipy_mat = bmat([[block, block], [None, block]], format='coo')
@@ -132,8 +138,8 @@ def test_multiply(self):
m = self.basic_m
scipy_mat = bmat([[block, block], [None, block]], format='coo')
x = BlockVector(2)
- x[0] = np.ones(block.shape[1], dtype=np.float64)
- x[1] = np.ones(block.shape[1], dtype=np.float64)
+ x.set_block(0, np.ones(block.shape[1], dtype=np.float64))
+ x.set_block(1, np.ones(block.shape[1], dtype=np.float64))
res_scipy = scipy_mat.dot(x.flatten())
res_dinopy = m * x
@@ -142,39 +148,46 @@ def test_multiply(self):
self.assertListEqual(res_dinopy.tolist(), res_scipy.tolist())
self.assertListEqual(res_dinopy_flat.tolist(), res_scipy.tolist())
- dense_mat = dinopy_mat.todense()
+ dense_mat = dinopy_mat.toarray()
self.basic_m *= 5.0
- self.assertTrue(np.allclose(dense_mat, self.basic_m.todense()))
+ self.assertTrue(np.allclose(dense_mat, self.basic_m.toarray()))
+
+ def test_mul_sparse_matrix(self):
+ m = self.basic_m
+
+ flat_prod = m.tocoo() * m.tocoo()
+ prod = m * m
- flat_mat = self.basic_m.tocoo()
- result = flat_mat * flat_mat
- dense_result = result.toarray()
- mat = self.basic_m * self.basic_m.tocoo()
- dense_mat = mat.toarray()
- self.assertTrue(np.allclose(dense_mat, dense_result))
+ self.assertIsInstance(prod, BlockMatrix)
+ self.assertTrue(np.allclose(flat_prod.toarray(), prod.toarray()))
- # not supported block matrix times block matrix for now
- #with self.assertRaises(Exception) as context:
- # mat = self.basic_m * self.basic_m.tocoo()
+ m2 = m.copy_structure()
+ ones = np.ones(m.shape)
+ m2.copyfrom(ones)
+ flat_prod = m.tocoo() * m2.tocoo()
+ prod = m * m2
+
+ self.assertIsInstance(prod, BlockMatrix)
+ self.assertTrue(np.allclose(flat_prod.toarray(), prod.toarray()))
def test_getitem(self):
m = BlockMatrix(3, 3)
for i in range(3):
for j in range(3):
- self.assertIsNone(m[i, j])
+ self.assertIsNone(m.get_block(i, j))
- m[0, 1] = self.block_m
- self.assertEqual(m[0, 1].shape, self.block_m.shape)
+ m.set_block(0, 1, self.block_m)
+ self.assertEqual(m.get_block(0, 1).shape, self.block_m.shape)
def test_setitem(self):
m = BlockMatrix(2, 2)
- m[0, 1] = self.block_m
+ m.set_block(0, 1, self.block_m)
self.assertFalse(m.is_empty_block(0, 1))
- self.assertEqual(m.row_block_sizes()[0], self.block_m.shape[0])
- self.assertEqual(m.col_block_sizes()[1], self.block_m.shape[1])
- self.assertEqual(m[0, 1].shape, self.block_m.shape)
+ self.assertEqual(m._brow_lengths[0], self.block_m.shape[0])
+ self.assertEqual(m._bcol_lengths[1], self.block_m.shape[1])
+ self.assertEqual(m.get_block(0, 1).shape, self.block_m.shape)
def test_coo_data(self):
m = self.basic_m.tocoo()
@@ -195,12 +208,12 @@ def test_block_shapes(self):
self.assertEqual(shapes[i][j], self.block_m.shape)
def test_dot(self):
- A_dense = self.basic_m.todense()
+ A_dense = self.basic_m.toarray()
A_block = self.basic_m
x = np.ones(A_dense.shape[1])
block_x = BlockVector(2)
- block_x[0] = np.ones(self.block_m.shape[1])
- block_x[1] = np.ones(self.block_m.shape[1])
+ block_x.set_block(0, np.ones(self.block_m.shape[1]))
+ block_x.set_block(1, np.ones(self.block_m.shape[1]))
flat_res = A_block.dot(x).flatten()
block_res = A_block.dot(block_x)
self.assertTrue(np.allclose(A_dense.dot(x), flat_res))
@@ -210,12 +223,12 @@ def test_dot(self):
def test_reset_brow(self):
self.basic_m.reset_brow(0)
for j in range(self.basic_m.bshape[1]):
- self.assertIsNone(self.basic_m[0, j])
+ self.assertIsNone(self.basic_m.get_block(0, j))
def test_reset_bcol(self):
self.basic_m.reset_bcol(0)
for j in range(self.basic_m.bshape[0]):
- self.assertIsNone(self.basic_m[j, 0])
+ self.assertIsNone(self.basic_m.get_block(j, 0))
def test_to_scipy(self):
@@ -233,153 +246,682 @@ def test_to_scipy(self):
self.assertListEqual(dcol.tolist(), scol.tolist())
self.assertListEqual(ddata.tolist(), sdata.tolist())
- def test_has_empty_rows(self):
- self.assertFalse(self.basic_m.has_empty_rows())
+ def test_has_undefined_row_sizes(self):
+ self.assertFalse(self.basic_m.has_undefined_row_sizes())
- def test_has_empty_cols(self):
- self.assertFalse(self.basic_m.has_empty_cols())
+ def test_has_undefined_col_sizes(self):
+ self.assertFalse(self.basic_m.has_undefined_col_sizes())
def test_transpose(self):
- A_dense = self.basic_m.todense()
+ A_dense = self.basic_m.toarray()
A_block = self.basic_m
A_dense_t = A_dense.transpose()
A_block_t = A_block.transpose()
- self.assertTrue(np.allclose(A_dense_t, A_block_t.todense()))
+ self.assertTrue(np.allclose(A_dense_t, A_block_t.toarray()))
- A_dense = self.composed_m.todense()
+ A_dense = self.composed_m.toarray()
A_block = self.composed_m
A_dense_t = A_dense.transpose()
A_block_t = A_block.transpose()
- self.assertTrue(np.allclose(A_dense_t, A_block_t.todense()))
+ self.assertTrue(np.allclose(A_dense_t, A_block_t.toarray()))
def test_repr(self):
self.assertEqual(len(self.basic_m.__repr__()), 17)
- #def test_str(self):
- # self.assertEqual(len(self.basic_m.__str__()), 328)
-
def test_set_item(self):
- self.basic_m[1, 0] = None
- self.assertIsNone(self.basic_m[1, 0])
- self.basic_m[1, 1] = None
- self.assertIsNone(self.basic_m[1, 1])
- self.assertEqual(self.basic_m._brow_lengths[1], 0)
- self.basic_m[1, 1] = self.block_m
- self.assertEqual(self.basic_m._brow_lengths[1], self.block_m.shape[1])
+ self.basic_m.set_block(1, 0, None)
+ self.assertIsNone(self.basic_m.get_block(1, 0))
+ self.basic_m.set_block(1, 1, None)
+ self.assertIsNone(self.basic_m.get_block(1, 1))
+ self.assertEqual(self.basic_m._brow_lengths[1], self.block_m.shape[0])
+ self.basic_m.set_block(1, 1, self.block_m)
+ self.assertEqual(self.basic_m._brow_lengths[1], self.block_m.shape[0])
def test_add(self):
- A_dense = self.basic_m.todense()
+ A_dense = self.basic_m.toarray()
A_block = self.basic_m
aa = A_dense + A_dense
mm = A_block + A_block
- self.assertTrue(np.allclose(aa, mm.todense()))
+ self.assertTrue(np.allclose(aa, mm.toarray()))
mm = A_block.__radd__(A_block)
- self.assertTrue(np.allclose(aa, mm.todense()))
+ self.assertTrue(np.allclose(aa, mm.toarray()))
+
+ r = A_block + A_block.tocoo()
+ dense_res = A_block.toarray() + A_block.toarray()
+ self.assertIsInstance(r, BlockMatrix)
+ self.assertTrue(np.allclose(r.toarray(), dense_res))
+
+ r = A_block.tocoo() + A_block
+ dense_res = A_block.toarray() + A_block.toarray()
+ #self.assertIsInstance(r, BlockMatrix)
+ self.assertTrue(np.allclose(r.toarray(), dense_res))
+
+ r = A_block + 2 * A_block.tocoo()
+ dense_res = A_block.toarray() + 2 * A_block.toarray()
+ self.assertIsInstance(r, BlockMatrix)
+ self.assertTrue(np.allclose(r.toarray(), dense_res))
+
+ r = 2 * A_block.tocoo() + A_block
+ dense_res = 2 * A_block.toarray() + A_block.toarray()
+ #self.assertIsInstance(r, BlockMatrix)
+ self.assertTrue(np.allclose(r.toarray(), dense_res))
+
+ r = A_block.T + A_block.tocoo()
+ dense_res = A_block.toarray().T + A_block.toarray()
+ self.assertIsInstance(r, BlockMatrix)
+ self.assertTrue(np.allclose(r.toarray(), dense_res))
+
+ with self.assertRaises(Exception) as context:
+ mm = A_block.__radd__(A_block.toarray())
+
+ with self.assertRaises(Exception) as context:
+ mm = A_block + A_block.toarray()
+
+ with self.assertRaises(Exception) as context:
+ mm = A_block + 1.0
+
+ def test_add_copy(self):
+ """
+ The purpose of this test is to ensure that copying happens correctly when block matrices are added.
+ For example, when adding
+
+ [A B + [D 0
+ 0 C] E F]
+
+ we want to make sure that E and B both get copied in the result rather than just placed in the result.
+ """
+ bm = self.basic_m.copy()
+ bmT = bm.transpose()
+ res = bm + bmT
+ self.assertIsNot(res.get_block(1, 0), bmT.get_block(1, 0))
+ self.assertIsNot(res.get_block(0, 1), bm.get_block(0, 1))
+ self.assertTrue(np.allclose(res.toarray(), self.dense + self.dense.transpose()))
def test_sub(self):
- A_dense = self.basic_m.todense()
+ A_dense = self.basic_m.toarray()
A_block = self.basic_m
+ A_block2 = 2 * self.basic_m
aa = A_dense - A_dense
mm = A_block - A_block
- self.assertTrue(np.allclose(aa, mm.todense()))
- mm = A_block.__rsub__(A_block)
- self.assertTrue(np.allclose(aa, mm.todense()))
+ self.assertTrue(np.allclose(aa, mm.toarray()))
+ mm = A_block2 - A_block.tocoo()
+ self.assertTrue(np.allclose(A_block.toarray(), mm.toarray()))
-class TestSymBlockMatrix(unittest.TestCase):
+ mm = A_block2.tocoo() - A_block
+ self.assertTrue(np.allclose(A_block.toarray(), mm.toarray()))
- def setUp(self):
+ mm = A_block2.T - A_block.tocoo()
+ dense_r = A_block2.toarray().T - A_block.toarray()
+ self.assertTrue(np.allclose(dense_r, mm.toarray()))
- row = np.array([0, 1, 4, 1, 2, 7, 2, 3, 5, 3, 4, 5, 4, 7, 5, 6, 6, 7])
- col = np.array([0, 0, 0, 1, 1, 1, 2, 2, 2, 3, 3, 3, 4, 4, 5, 5, 6, 7])
- data = np.array([27, 5, 12, 56, 66, 34, 94, 31, 41, 7, 98, 72, 24, 33, 78, 47, 98, 41])
+ with self.assertRaises(Exception) as context:
+ mm = A_block - A_block.toarray()
- off_diagonal_mask = row != col
- new_row = np.concatenate([row, col[off_diagonal_mask]])
- new_col = np.concatenate([col, row[off_diagonal_mask]])
- new_data = np.concatenate([data, data[off_diagonal_mask]])
- m = coo_matrix((new_data, (new_row, new_col)), shape=(8, 8))
+ with self.assertRaises(Exception) as context:
+ mm = A_block - 1.0
- self.block00 = m
+ with self.assertRaises(Exception) as context:
+ mm = 1.0 - A_block
- row = np.array([0, 3, 1, 0])
- col = np.array([0, 3, 1, 2])
- data = np.array([4, 5, 7, 9])
- m = coo_matrix((data, (row, col)), shape=(4, 8))
+ def test_sub_copy(self):
+ """
+ The purpose of this test is to ensure that copying happens correctly when block matrices are subtracted.
+ For example, when subtracting
- self.block10 = m
+ [A B - [D 0
+ 0 C] E F]
- row = np.array([0, 1, 2, 3])
- col = np.array([0, 1, 2, 3])
- data = np.array([1, 1, 1, 1])
- m = coo_matrix((data, (row, col)), shape=(4, 4))
+ we want to make sure that E and B both get copied in the result rather than just placed in the result.
+ """
+ bm = self.basic_m.copy()
+ bmT = 2 * bm.transpose()
+ res = bm - bmT
+ self.assertIsNot(res.get_block(1, 0), bmT.get_block(1, 0))
+ self.assertIsNot(res.get_block(0, 1), bm.get_block(0, 1))
+ self.assertTrue(np.allclose(res.toarray(), self.dense - 2 * self.dense.transpose()))
- self.block11 = m
+ def test_neg(self):
- bm = BlockSymMatrix(2)
- bm.name = 'basic_matrix'
- bm[0, 0] = self.block00
- bm[1, 0] = self.block10
- bm[1, 1] = self.block11
- self.basic_m = bm
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m
- def test_tocoo(self):
- m = self.basic_m.tocoo()
- a = m.toarray()
- self.assertTrue(np.allclose(a, a.T, atol=1e-3))
+ aa = -A_dense
+ mm = -A_block
+
+ self.assertTrue(np.allclose(aa, mm.toarray()))
+
+ def test_copyfrom(self):
+ bm0 = self.basic_m.copy()
+ bm = bm0.copy_structure()
+ self.assertFalse(np.allclose(bm.toarray(), self.dense))
+ bm.copyfrom(bm0.tocoo())
+ self.assertTrue(np.allclose(bm.toarray(), self.dense))
+
+ flat = np.ones((8, 8))
+ bm.copyfrom(flat)
+ self.assertTrue(np.allclose(flat, bm.toarray()))
+
+ bm.copyfrom(bm0)
+ self.assertTrue(np.allclose(bm.toarray(), self.dense))
+
+ bm.get_block(0, 0).data.fill(1.0)
+ self.assertAlmostEqual(bm0.toarray()[0, 0], 2) # this tests that a deep copy was done
+ self.assertAlmostEqual(bm.toarray()[0, 0], 1)
+
+ bm.copyfrom(bm0, deep=False)
+ bm.get_block(0, 0).data.fill(1.0)
+ self.assertAlmostEqual(bm0.toarray()[0, 0], 1) # this tests that a shallow copy was done
+ self.assertAlmostEqual(bm.toarray()[0, 0], 1)
+
+ def test_copyto(self):
+ bm0 = self.basic_m.copy()
+ coo = bm0.tocoo()
+ coo.data.fill(1.0)
+ csr = coo.tocsr()
+ csc = coo.tocsc()
+ self.assertFalse(np.allclose(coo.toarray(), self.dense))
+ self.assertFalse(np.allclose(csr.toarray(), self.dense))
+ self.assertFalse(np.allclose(csc.toarray(), self.dense))
+ bm0.copyto(coo)
+ bm0.copyto(csr)
+ bm0.copyto(csc)
+ self.assertTrue(np.allclose(coo.toarray(), self.dense))
+ self.assertTrue(np.allclose(csr.toarray(), self.dense))
+ self.assertTrue(np.allclose(csc.toarray(), self.dense))
+
+ flat = np.ones((8, 8))
+ bm0.copyto(flat)
+ self.assertTrue(np.allclose(flat, self.dense))
+
+ bm = bm0.copy_structure()
+ bm0.copyto(bm)
+ self.assertTrue(np.allclose(bm.toarray(), self.dense))
+
+ bm.get_block(0, 0).data.fill(1.0)
+ self.assertAlmostEqual(bm0.toarray()[0, 0], 2) # this tests that a deep copy was done
+ self.assertAlmostEqual(bm.toarray()[0, 0], 1)
+
+ bm0.copyto(bm, deep=False)
+ bm.get_block(0, 0).data.fill(1.0)
+ self.assertAlmostEqual(bm0.toarray()[0, 0], 1) # this tests that a shallow copy was done
+ self.assertAlmostEqual(bm.toarray()[0, 0], 1)
+
+ def test_copy(self):
+ clone = self.basic_m.copy()
+ self.assertTrue(np.allclose(clone.toarray(), self.dense))
+ clone.get_block(0, 0).data.fill(1)
+ self.assertAlmostEqual(clone.toarray()[0, 0], 1)
+ self.assertAlmostEqual(self.basic_m.toarray()[0, 0], 2)
+
+ bm = self.basic_m.copy()
+ clone = bm.copy(deep=False)
+ self.assertTrue(np.allclose(clone.toarray(), self.dense))
+ clone.get_block(0, 0).data.fill(1)
+ self.assertAlmostEqual(clone.toarray()[0, 0], 1)
+ self.assertAlmostEqual(bm.toarray()[0, 0], 1)
+
+ def test_iadd(self):
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m.copy()
+ A_dense += A_dense
+ A_block += A_block
+
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m.copy()
+ A_dense += A_dense
+ A_block += A_block.tocoo()
+
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m.copy()
+ A_block += 2 * A_block.tocoo()
+
+ self.assertTrue(np.allclose(A_block.toarray(), 3 * A_dense))
+
+ with self.assertRaises(Exception) as context:
+ A_block += 1.0
+
+ def test_isub(self):
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m
+ A_dense -= A_dense
+ A_block -= A_block
- def test_coo_data(self):
- m = self.basic_m.tocoo()
- data = self.basic_m.coo_data()
- self.assertListEqual(m.data.tolist(), data.tolist())
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
- def test_multiply(self):
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m
+ A_dense -= A_dense
+ A_block -= A_block.tocoo()
- # test scalar multiplication
- m = self.basic_m * 5.0
- dense_m = m.todense()
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
- b00 = self.block00.tocoo()
- b11 = self.block11.tocoo()
- b10 = self.block10
- scipy_m = bmat([[b00, b10.transpose()], [b10, b11]], format='coo')
- dense_scipy_m = scipy_m.todense() * 5.0
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m.copy()
+ A_block -= 2 * A_block.tocoo()
- self.assertTrue(np.allclose(dense_scipy_m, dense_m, atol=1e-3))
+ self.assertTrue(np.allclose(A_block.toarray(), -A_dense))
- m = 5.0 * self.basic_m
- dense_m = m.todense()
+ with self.assertRaises(Exception) as context:
+ A_block -= 1.0
+
+ def test_imul(self):
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m
+ print(A_dense)
+ print(A_block.toarray())
+ A_dense *= 3
+ print(A_dense)
+ print(A_block.toarray())
+ A_block *= 3.
+ print(A_dense)
+ print(A_block.toarray())
+
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
+
+ with self.assertRaises(Exception) as context:
+ A_block *= A_block
+
+ with self.assertRaises(Exception) as context:
+ A_block *= A_block.tocoo()
+
+ with self.assertRaises(Exception) as context:
+ A_block *= A_block.toarray()
+
+ def test_itruediv(self):
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m.copy()
+ A_dense /= 3
+ A_block /= 3.
+
+ self.assertTrue(np.allclose(A_block.toarray(), A_dense))
+
+ with self.assertRaises(Exception) as context:
+ A_block /= A_block
+
+ with self.assertRaises(Exception) as context:
+ A_block /= A_block.tocoo()
+
+ with self.assertRaises(Exception) as context:
+ A_block /= A_block.toarray()
+
+ def test_truediv(self):
+
+ A_dense = self.basic_m.toarray()
+ A_block = self.basic_m
+ B_block = A_block / 3.
+ self.assertTrue(np.allclose(B_block.toarray(), A_dense/3.))
+
+ with self.assertRaises(Exception) as context:
+ b = A_block / A_block
+
+ with self.assertRaises(Exception) as context:
+ b = A_block / A_block.tocoo()
+
+ with self.assertRaises(Exception) as context:
+ b = A_block / A_block.toarray()
+
+ with self.assertRaises(Exception) as context:
+ B_block = 3./ A_block
+
+ def test_eq(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A_flat = self.basic_m.tocoo()
+ A_block = self.basic_m
+
+ A_bool_flat = A_flat == 2.0
+ A_bool_block = A_block == 2.0
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat == A_flat
+ A_bool_block = A_block == A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+
+ A_bool_flat = 2.0 != A_flat
+ A_bool_block = 2.0 != A_block
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ def test_ne(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A_flat = self.basic_m.tocoo()
+ A_block = self.basic_m
- self.assertTrue(np.allclose(dense_scipy_m, dense_m, atol=1e-3))
+ A_bool_flat = A_flat != 2.0
+ A_bool_block = A_block != 2.0
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = 2.0 != A_flat
+ A_bool_block = 2.0 != A_block
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat != A_flat
+ A_bool_block = A_block != A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ def test_le(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A_flat = self.basic_m.tocoo()
+ A_block = self.basic_m
+
+ A_bool_flat = A_flat <= 2.0
+ A_bool_block = A_block <= 2.0
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ # A_bool_flat = 2.0 <= A_flat
+ # A_bool_block = 2.0 <= A_block
+ # self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ # A_bool_block.toarray()))
+
+ A_bool_flat = A_flat <= A_flat
+ A_bool_block = A_block <= A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat <= 2 * A_flat
+ A_bool_block = A_block <= 2 * A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = 2.0 >= A_flat
+ A_bool_block = 2.0 >= A_block
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ def test_lt(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A_flat = self.basic_m.tocoo()
+ A_block = self.basic_m
+
+ A_bool_flat = A_flat < 2.0
+ A_bool_block = A_block < 2.0
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ # A_bool_flat = 2.0 <= A_flat
+ # A_bool_block = 2.0 <= A_block
+ # self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ # A_bool_block.toarray()))
+
+ A_bool_flat = A_flat < A_flat
+ A_bool_block = A_block < A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat < 2 * A_flat
+ A_bool_block = A_block < 2 * A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = 2.0 > A_flat
+ A_bool_block = 2.0 > A_block
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ def test_ge(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A_flat = self.basic_m.tocoo()
+ A_block = self.basic_m
+
+ A_bool_flat = A_flat >= 2.0
+ A_bool_block = A_block >= 2.0
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = 2.0 <= A_flat
+ A_bool_block = 2.0 <= A_block
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat >= A_flat
+ A_bool_block = A_block >= A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ A_bool_flat = A_flat >= 0.5 * A_flat
+ A_bool_block = A_block >= 0.5 * A_block
+
+ self.assertTrue(np.allclose(A_bool_flat.toarray(),
+ A_bool_block.toarray()))
+
+ def test_gt(self):
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ A = self.basic_m.copy()
+ B = 2 * A.transpose()
+
+ res = A > B
+ expected = A.toarray() > B.toarray()
+ self.assertTrue(np.allclose(res.toarray(), expected))
+
+ def test_abs(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = -1.0 * np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+
+ self.block_m = m
+
+ bm = BlockMatrix(2, 2)
+ bm.set_block(0, 0, m)
+ bm.set_block(1, 1, m)
+ bm.set_block(0, 1, m)
+
+ abs_flat = abs(bm.tocoo())
+ abs_mat = abs(bm)
+
+ self.assertIsInstance(abs_mat, BlockMatrix)
+ self.assertTrue(np.allclose(abs_flat.toarray(),
+ abs_mat.toarray()))
+
+ def test_getcol(self):
- # test matrix vector product
m = self.basic_m
- x = BlockVector(m.bshape[1])
- for i in range(m.bshape[1]):
- x[i] = np.ones(m.col_block_sizes()[i], dtype=np.float64)
- dinopy_res = m * x
- scipy_res = scipy_m * x.flatten()
- self.assertListEqual(dinopy_res.tolist(), scipy_res.tolist())
- dinopy_res = m * x.flatten()
- scipy_res = scipy_m * x.flatten()
+ flat_mat = m.tocoo()
+ flat_col = flat_mat.getcol(2)
+ block_col = m.getcol(2)
+ self.assertTrue(np.allclose(flat_col.toarray().flatten(),
+ block_col.flatten()))
- self.assertListEqual(dinopy_res.tolist(), scipy_res.tolist())
+ flat_col = flat_mat.getcol(4)
+ block_col = m.getcol(4)
+ self.assertTrue(np.allclose(flat_col.toarray().flatten(),
+ block_col.flatten()))
- self.basic_m *= 5.0
- self.assertTrue(np.allclose(self.basic_m.todense(), dense_m, atol=1e-3))
- # ToDo: Add test for transpose
+ flat_col = flat_mat.getcol(6)
+ block_col = m.getcol(6)
+ self.assertTrue(np.allclose(flat_col.toarray().flatten(),
+ block_col.flatten()))
+
+ def test_getrow(self):
+ m = self.basic_m
+ flat_mat = m.tocoo()
+ flat_row = flat_mat.getrow(2)
+ block_row = m.getrow(2)
+ self.assertTrue(np.allclose(flat_row.toarray().flatten(),
+ block_row.flatten()))
+ flat_row = flat_mat.getrow(7)
+ block_row = m.getrow(7)
+ self.assertTrue(np.allclose(flat_row.toarray().flatten(),
+ block_row.flatten()))
+ def test_nonzero(self):
+ m = self.basic_m
+ flat_mat = m.tocoo()
+ flat_row, flat_col = flat_mat.nonzero()
+ with self.assertRaises(Exception) as context:
+ block_row, block_col = m.nonzero()
+
+ def test_get_block_column_index(self):
+
+ m = BlockMatrix(2,4)
+ m.set_block(0, 0, coo_matrix((3, 2)))
+ m.set_block(0, 1, coo_matrix((3, 4)))
+ m.set_block(0, 2, coo_matrix((3, 3)))
+ m.set_block(0, 3, coo_matrix((3, 6)))
+ m.set_block(1, 3, coo_matrix((5, 6)))
+
+ bcol = m.get_block_column_index(8)
+ self.assertEqual(bcol, 2)
+ bcol = m.get_block_column_index(5)
+ self.assertEqual(bcol, 1)
+ bcol = m.get_block_column_index(14)
+ self.assertEqual(bcol, 3)
+
+ def test_get_block_row_index(self):
+
+ m = BlockMatrix(2,4)
+ m.set_block(0, 0, coo_matrix((3, 2)))
+ m.set_block(0, 1, coo_matrix((3, 4)))
+ m.set_block(0, 2, coo_matrix((3, 3)))
+ m.set_block(0, 3, coo_matrix((3, 6)))
+ m.set_block(1, 3, coo_matrix((5, 6)))
+
+ brow = m.get_block_row_index(0)
+ self.assertEqual(brow, 0)
+ brow = m.get_block_row_index(6)
+ self.assertEqual(brow, 1)
+
+ def test_matrix_multiply(self):
+ """
+ Test
+
+ [A B C * [G J = [A*G + B*H + C*I A*J + B*K + C*L
+ D E F] H K D*G + E*H + F*I D*J + E*K + F*L]
+ I L]
+ """
+ np.random.seed(0)
+ A = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ B = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ C = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ D = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ E = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ F = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ G = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ H = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ I = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ J = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ K = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+ L = sp.csr_matrix(np.random.normal(0, 10, (2, 2)))
+
+ bm1 = BlockMatrix(2, 3)
+ bm2 = BlockMatrix(3, 2)
+
+ bm1.set_block(0, 0, A)
+ bm1.set_block(0, 1, B)
+ bm1.set_block(0, 2, C)
+ bm1.set_block(1, 0, D)
+ bm1.set_block(1, 1, E)
+ bm1.set_block(1, 2, F)
+
+ bm2.set_block(0, 0, G)
+ bm2.set_block(1, 0, H)
+ bm2.set_block(2, 0, I)
+ bm2.set_block(0, 1, J)
+ bm2.set_block(1, 1, K)
+ bm2.set_block(2, 1, L)
+
+ got = (bm1 * bm2).toarray()
+ exp00 = (A * G + B * H + C * I).toarray()
+ exp01 = (A * J + B * K + C * L).toarray()
+ exp10 = (D * G + E * H + F * I).toarray()
+ exp11 = (D * J + E * K + F * L).toarray()
+ exp = np.zeros((4, 4))
+ exp[0:2, 0:2] = exp00
+ exp[0:2, 2:4] = exp01
+ exp[2:4, 0:2] = exp10
+ exp[2:4, 2:4] = exp11
+
+ self.assertTrue(np.allclose(got, exp))
+
+ def test_dimensions(self):
+ bm = BlockMatrix(2, 2)
+ self.assertTrue(bm.has_undefined_row_sizes())
+ self.assertTrue(bm.has_undefined_col_sizes())
+ with self.assertRaises(NotFullyDefinedBlockMatrixError):
+ shape = bm.shape
+ with self.assertRaises(NotFullyDefinedBlockMatrixError):
+ bm.set_block(0, 0, BlockMatrix(2, 2))
+ with self.assertRaises(NotFullyDefinedBlockMatrixError):
+ row_sizes = bm.row_block_sizes()
+ with self.assertRaises(NotFullyDefinedBlockMatrixError):
+ col_sizes = bm.col_block_sizes()
+ bm2 = BlockMatrix(2, 2)
+ bm2.set_block(0, 0, coo_matrix((2, 2)))
+ bm2.set_block(1, 1, coo_matrix((2, 2)))
+ bm3 = bm2.copy()
+ bm.set_block(0, 0, bm2)
+ bm.set_block(1, 1, bm3)
+ self.assertFalse(bm.has_undefined_row_sizes())
+ self.assertFalse(bm.has_undefined_col_sizes())
+ self.assertEqual(bm.shape, (8, 8))
+ bm.set_block(0, 0, None)
+ self.assertFalse(bm.has_undefined_row_sizes())
+ self.assertFalse(bm.has_undefined_col_sizes())
+ self.assertEqual(bm.shape, (8, 8))
+ self.assertTrue(np.all(bm.row_block_sizes() == np.ones(2)*4))
+ self.assertTrue(np.all(bm.col_block_sizes() == np.ones(2)*4))
+ self.assertTrue(np.all(bm.row_block_sizes(copy=False) == np.ones(2)*4))
+ self.assertTrue(np.all(bm.col_block_sizes(copy=False) == np.ones(2)*4))
+
+ def test_transpose_with_empty_rows(self):
+ m = BlockMatrix(2, 2)
+ m.set_row_size(0, 2)
+ m.set_row_size(1, 2)
+ m.set_col_size(0, 2)
+ m.set_col_size(1, 2)
+ mt = m.transpose()
+ self.assertEqual(mt.get_row_size(0), 2)
+ self.assertEqual(mt.get_row_size(1), 2)
+ self.assertEqual(mt.get_col_size(0), 2)
+ self.assertEqual(mt.get_col_size(1), 2)
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py
index 7744cc26357..d6aebd6a049 100644
--- a/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py
+++ b/pyomo/contrib/pynumero/sparse/tests/test_block_vector.py
@@ -7,34 +7,37 @@
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+from __future__ import division
import sys
import pyutilib.th as unittest
-import pyomo.contrib.pynumero as pn
-if not (pn.sparse.numpy_available and pn.sparse.scipy_available):
- raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
-
-import numpy as np
-from pyomo.contrib.pynumero.sparse.block_vector import BlockVector
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_available
+)
+if not (numpy_available and scipy_available):
+ raise unittest.SkipTest(
+ "Pynumero needs scipy and numpy to run BlockVector tests")
+from pyomo.contrib.pynumero.sparse.block_vector import (
+ BlockVector, NotFullyDefinedBlockVectorError
+)
class TestBlockVector(unittest.TestCase):
def test_constructor(self):
- v = BlockVector(4)
- self.assertEqual(v.nblocks, 4)
- self.assertEqual(v.bshape, (4,))
- self.assertEqual(v.size, 0)
+ v = BlockVector(2)
+ self.assertEqual(v.nblocks, 2)
+ self.assertEqual(v.bshape, (2,))
+ with self.assertRaises(NotFullyDefinedBlockVectorError):
+ v_size = v.size
- v[0] = np.ones(2)
- v[1] = np.ones(4)
+ v.set_block(0, np.ones(2))
+ v.set_block(1, np.ones(4))
self.assertEqual(v.size, 6)
self.assertEqual(v.shape, (6,))
- v[0] = None
- self.assertEqual(v.size, 4)
- self.assertEqual(v.shape, (4,))
- self.assertEqual(v.ndim, 1)
+ with self.assertRaises(AssertionError):
+ v.set_block(0, None)
with self.assertRaises(Exception) as context:
BlockVector('hola')
@@ -44,7 +47,7 @@ def setUp(self):
self.ones = BlockVector(3)
self.list_sizes_ones = [2, 4, 3]
for idx, s in enumerate(self.list_sizes_ones):
- self.ones[idx] = np.ones(s)
+ self.ones.set_block(idx, np.ones(s))
def test_block_sizes(self):
self.assertListEqual(self.ones.block_sizes().tolist(), self.list_sizes_ones)
@@ -63,13 +66,14 @@ def test_mean(self):
v = self.ones
self.assertEqual(v.mean(), flat_v.mean())
v = BlockVector(2)
- self.assertEqual(v.mean(), 0.0)
+ with self.assertRaises(NotFullyDefinedBlockVectorError):
+ v_mean = v.mean()
def test_sum(self):
self.assertEqual(self.ones.sum(), self.ones.size)
v = BlockVector(2)
- v[0] = np.arange(5)
- v[1] = np.arange(9)
+ v.set_block(0, np.arange(5))
+ v.set_block(1, np.arange(9))
self.assertEqual(v.sum(), 46)
def test_all(self):
@@ -77,15 +81,15 @@ def test_all(self):
v = BlockVector(2)
a = np.ones(5)
b = np.ones(3)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertTrue(v.all())
v = BlockVector(2)
a = np.zeros(5)
b = np.zeros(3)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertFalse(v.all())
def test_any(self):
@@ -93,15 +97,15 @@ def test_any(self):
v = BlockVector(2)
a = np.zeros(5)
b = np.ones(3)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertTrue(v.any())
v = BlockVector(2)
a = np.zeros(5)
b = np.zeros(3)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertFalse(v.any())
def test_argpartition(self):
@@ -140,49 +144,44 @@ def test_clip(self):
b = np.ones(3)*5.0
c = np.ones(3)*10.0
- v[0] = a
- v[1] = b
- v[2] = c
+ v.set_block(0, a)
+ v.set_block(1, b)
+ v.set_block(2, c)
- v2[0] = np.ones(5) * 4.0
- v2[1] = np.ones(3) * 5.0
- v2[2] = np.ones(3) * 9.0
+ v2.set_block(0, np.ones(5) * 4.0)
+ v2.set_block(1, np.ones(3) * 5.0)
+ v2.set_block(2, np.ones(3) * 9.0)
vv = v.clip(4.0, 9.0)
self.assertEqual(vv.nblocks, v.nblocks)
for bid, blk in enumerate(vv):
- self.assertTrue(np.allclose(blk, v2[bid]))
-
- with self.assertRaises(NotImplementedError) as ctx:
- vv = v.clip(4.0, 9.0, out=v2)
+ self.assertTrue(np.allclose(blk, v2.get_block(bid)))
def test_compress(self):
v = self.ones
- with self.assertRaises(NotImplementedError) as ctx:
- vv = v.compress(1, out=1)
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
c = v.compress(v < 1)
v2 = BlockVector(2)
b = np.zeros(9)
- v2[0] = np.ones(0)
- v2[1] = b
+ v2.set_block(0, np.ones(0))
+ v2.set_block(1, b)
self.assertEqual(c.nblocks, v.nblocks)
for bid, blk in enumerate(c):
- self.assertTrue(np.allclose(blk, v2[bid]))
+ self.assertTrue(np.allclose(blk, v2.get_block(bid)))
flags = v < 1
c = v.compress(flags.flatten())
self.assertEqual(c.nblocks, v.nblocks)
for bid, blk in enumerate(c):
- self.assertTrue(np.allclose(blk, v2[bid]))
+ self.assertTrue(np.allclose(blk, v2.get_block(bid)))
with self.assertRaises(Exception) as context:
v.compress(1.0)
@@ -192,25 +191,25 @@ def test_nonzero(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
n = v.nonzero()
v2 = BlockVector(2)
- v2[0] = np.arange(5)
- v2[1] = np.zeros(0)
+ v2.set_block(0, np.arange(5))
+ v2.set_block(1, np.zeros(0))
self.assertEqual(n[0].nblocks, v.nblocks)
for bid, blk in enumerate(n[0]):
- self.assertTrue(np.allclose(blk, v2[bid]))
+ self.assertTrue(np.allclose(blk, v2.get_block(bid)))
def test_ptp(self):
v = BlockVector(2)
a = np.arange(5)
b = np.arange(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
vv = np.arange(9)
self.assertEqual(vv.ptp(), v.ptp())
@@ -220,25 +219,25 @@ def test_round(self):
v = BlockVector(2)
a = np.ones(5)*1.1
b = np.ones(9)*1.1
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
vv = v.round()
self.assertEqual(vv.nblocks, v.nblocks)
a = np.ones(5)
b = np.ones(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
for bid, blk in enumerate(vv):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_std(self):
v = BlockVector(2)
a = np.arange(5)
b = np.arange(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
vv = np.concatenate([a, b])
self.assertEqual(vv.std(), v.std())
@@ -249,7 +248,7 @@ def test_conj(self):
self.assertEqual(vv.nblocks, v.nblocks)
self.assertEqual(vv.shape, v.shape)
for bid, blk in enumerate(vv):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_conjugate(self):
v = self.ones
@@ -257,11 +256,11 @@ def test_conjugate(self):
self.assertEqual(vv.nblocks, v.nblocks)
self.assertEqual(vv.shape, v.shape)
for bid, blk in enumerate(vv):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_diagonal(self):
v = self.ones
- with self.assertRaises(ValueError) as ctx:
+ with self.assertRaises(NotImplementedError) as ctx:
vv = v.diagonal()
def test_getfield(self):
@@ -350,8 +349,8 @@ def test_prod(self):
a = np.arange(5)
b = np.arange(9)
c = np.concatenate([a, b])
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertEqual(v.prod(), c.prod())
def test_max(self):
@@ -360,8 +359,8 @@ def test_max(self):
a = np.arange(5)
b = np.arange(9)
c = np.concatenate([a, b])
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertEqual(v.max(), c.max())
def test_min(self):
@@ -370,8 +369,8 @@ def test_min(self):
a = np.arange(5)
b = np.arange(9)
c = np.concatenate([a, b])
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertEqual(v.min(), c.min())
def test_tolist(self):
@@ -379,8 +378,8 @@ def test_tolist(self):
a = np.arange(5)
b = np.arange(9)
c = np.concatenate([a, b])
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertListEqual(v.tolist(), c.tolist())
def test_flatten(self):
@@ -388,16 +387,16 @@ def test_flatten(self):
a = np.arange(5)
b = np.arange(9)
c = np.concatenate([a, b])
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertListEqual(v.flatten().tolist(), c.tolist())
def test_fill(self):
v = BlockVector(2)
a = np.arange(5)
b = np.arange(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
v.fill(1.0)
c = np.ones(v.size)
self.assertListEqual(v.tolist(), c.tolist())
@@ -413,24 +412,36 @@ def test_size(self):
size = sum(self.list_sizes_ones)
self.assertEqual(self.ones.size, size)
+ def test_length(self):
+ size = sum(self.list_sizes_ones)
+ self.assertEqual(len(self.ones), self.ones.nblocks)
+
def test_argmax(self):
- v = BlockVector(2)
- v[0] = np.arange(5)
- v[1] = np.arange(10, 15)
- self.assertEqual(v.argmax(), v.size-1)
+ v = BlockVector(3)
+ a = np.array([3, 2, 1])
+ v.set_block(0, a.copy())
+ v.set_block(1, a.copy())
+ v.set_block(2, a.copy())
+ v.get_block(1)[1] = 5
+ argmax = v.argmax()
+ self.assertEqual(argmax, 4)
def test_argmin(self):
- v = BlockVector(2)
- v[0] = np.arange(5)
- v[1] = np.arange(10, 15)
- self.assertEqual(v.argmin(), 0)
+ v = BlockVector(3)
+ a = np.array([3, 2, 1])
+ v.set_block(0, a.copy())
+ v.set_block(1, a.copy())
+ v.set_block(2, a.copy())
+ v.get_block(1)[1] = -5
+ argmin = v.argmin()
+ self.assertEqual(argmin, 4)
def test_cumprod(self):
v = BlockVector(3)
- v[0] = np.arange(1, 5)
- v[1] = np.arange(5, 10)
- v[2] = np.arange(10, 15)
+ v.set_block(0, np.arange(1, 5))
+ v.set_block(1, np.arange(5, 10))
+ v.set_block(2, np.arange(10, 15))
c = np.arange(1, 15)
res = v.cumprod()
self.assertIsInstance(res, BlockVector)
@@ -439,9 +450,9 @@ def test_cumprod(self):
def test_cumsum(self):
v = BlockVector(3)
- v[0] = np.arange(1, 5)
- v[1] = np.arange(5, 10)
- v[2] = np.arange(10, 15)
+ v.set_block(0, np.arange(1, 5))
+ v.set_block(1, np.arange(5, 10))
+ v.set_block(2, np.arange(10, 15))
c = np.arange(1, 15)
res = v.cumsum()
self.assertIsInstance(res, BlockVector)
@@ -455,7 +466,7 @@ def test_clone(self):
x = v.clone(4)
self.assertListEqual(x.tolist(), [4]*v.size)
y = x.clone(copy=False)
- y[2][-1] = 6
+ y.get_block(2)[-1] = 6
d = np.ones(y.size)*4
d[-1] = 6
self.assertListEqual(y.tolist(), d.tolist())
@@ -531,41 +542,51 @@ def test_rmul(self):
result = v.flatten() * v1
self.assertTrue(np.allclose(result.flatten(), v.flatten() * v1.flatten()))
- # @unittest.skipIf(sys.version_info < (3, 0), 'not supported in this veresion')
- # def test_truediv(self):
- # v = self.ones
- # v1 = v.clone(5, copy=True)
- # result = v / v1
- # self.assertListEqual(result.tolist(), [1/5] * v.size)
- # result = v / v1.flatten()
- # self.assertTrue(np.allclose(result.flatten(), v.flatten() / v1.flatten()))
- #
- # @unittest.skipIf(sys.version_info < (3, 0), 'not supported in this veresion')
- # def test_rtruediv(self):
- # v = self.ones
- # v1 = v.clone(5, copy=True)
- # result = v1.__rtruediv__(v)
- # self.assertListEqual(result.tolist(), [1 / 5] * v.size)
- # result = v.flatten() / v1
- # self.assertTrue(np.allclose(result.flatten(), v.flatten() / v1.flatten()))
- #
- # def test_floordiv(self):
- # v = self.ones
- # v.fill(2)
- # v1 = v.clone(5, copy=True)
- # result = v1 // v
- # self.assertListEqual(result.tolist(), [5 // 2] * v.size)
- # result = v // v1.flatten()
- # self.assertTrue(np.allclose(result.flatten(), v.flatten() // v1.flatten()))
- #
- # def test_rfloordiv(self):
- # v = self.ones
- # v.fill(2)
- # v1 = v.clone(5, copy=True)
- # result = v.__rfloordiv__(v1)
- # self.assertListEqual(result.tolist(), [5 // 2] * v.size)
- # result = v.flatten() // v1
- # self.assertTrue(np.allclose(result.flatten(), v.flatten() // v1.flatten()))
+ def test_truediv(self):
+ v = self.ones
+ v1 = v.clone(5.0, copy=True)
+ result = v / v1
+ self.assertListEqual(result.tolist(), [1.0/5.0] * v.size)
+ result = v / v1.flatten()
+ self.assertTrue(np.allclose(result.flatten(), v.flatten() / v1.flatten()))
+ result = 5.0 / v1
+ self.assertTrue(np.allclose(result.flatten(), v.flatten()))
+ result = v1 / 5.0
+ self.assertTrue(np.allclose(result.flatten(), v.flatten()))
+
+ def test_rtruediv(self):
+ v = self.ones
+ v1 = v.clone(5.0, copy=True)
+ result = v1.__rtruediv__(v)
+ self.assertListEqual(result.tolist(), [1.0 / 5.0] * v.size)
+ result = v.flatten() / v1
+ self.assertTrue(np.allclose(result.flatten(), v.flatten() / v1.flatten()))
+ result = 5.0 / v1
+ self.assertTrue(np.allclose(result.flatten(), v.flatten()))
+ result = v1 / 5.0
+ self.assertTrue(np.allclose(result.flatten(), v.flatten()))
+
+ def test_floordiv(self):
+ v = self.ones
+ v.fill(2.0)
+ v1 = v.clone(5.0, copy=True)
+ result = v1 // v
+ self.assertListEqual(result.tolist(), [5.0 // 2.0] * v.size)
+ result = v // v1.flatten()
+ self.assertTrue(np.allclose(result.flatten(), v.flatten() // v1.flatten()))
+
+ def test_rfloordiv(self):
+ v = self.ones
+ v.fill(2.0)
+ v1 = v.clone(5.0, copy=True)
+ result = v.__rfloordiv__(v1)
+ self.assertListEqual(result.tolist(), [5.0 // 2.0] * v.size)
+ result = v.flatten() // v1
+ self.assertTrue(np.allclose(result.flatten(), v.flatten() // v1.flatten()))
+ result = 2.0 // v1
+ self.assertTrue(np.allclose(result.flatten(), np.zeros(v1.size)))
+ result = v1 // 2.0
+ self.assertTrue(np.allclose(result.flatten(), np.ones(v1.size)*2.0))
def test_iadd(self):
v = self.ones
@@ -581,29 +602,35 @@ def test_iadd(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ a_copy = a.copy()
+ b_copy = b.copy()
+
+ v.set_block(0, a)
+ v.set_block(1, b)
v += 1.0
- self.assertTrue(np.allclose(v[0], a + 1))
- self.assertTrue(np.allclose(v[1], b + 1))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy + 1))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy + 1))
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ a_copy = a.copy()
+ b_copy = b.copy()
+
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
- v2[0] = np.ones(5)
- v2[1] = np.ones(9)
+ v2.set_block(0, np.ones(5))
+ v2.set_block(1, np.ones(9))
v += v2
- self.assertTrue(np.allclose(v[0], a + 1))
- self.assertTrue(np.allclose(v[1], b + 1))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy + 1))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy + 1))
- self.assertTrue(np.allclose(v2[0], np.ones(5)))
- self.assertTrue(np.allclose(v2[1], np.ones(9)))
+ self.assertTrue(np.allclose(v2.get_block(0), np.ones(5)))
+ self.assertTrue(np.allclose(v2.get_block(1), np.ones(9)))
with self.assertRaises(Exception) as context:
v += 'hola'
@@ -622,29 +649,33 @@ def test_isub(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
v -= 5.0
- self.assertTrue(np.allclose(v[0], a - 5.0))
- self.assertTrue(np.allclose(v[1], b - 5.0))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy - 5.0))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy - 5.0))
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
- v2[0] = np.ones(5)
- v2[1] = np.ones(9)
+ v2.set_block(0, np.ones(5))
+ v2.set_block(1, np.ones(9))
v -= v2
- self.assertTrue(np.allclose(v[0], a - 1))
- self.assertTrue(np.allclose(v[1], b - 1))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy - 1))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy - 1))
- self.assertTrue(np.allclose(v2[0], np.ones(5)))
- self.assertTrue(np.allclose(v2[1], np.ones(9)))
+ self.assertTrue(np.allclose(v2.get_block(0), np.ones(5)))
+ self.assertTrue(np.allclose(v2.get_block(1), np.ones(9)))
with self.assertRaises(Exception) as context:
v -= 'hola'
@@ -662,30 +693,79 @@ def test_imul(self):
v = BlockVector(2)
a = np.ones(5)
- b = np.arange(9)
- v[0] = a
- v[1] = b
+ b = np.arange(9, dtype=np.float64)
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
v *= 2.0
- self.assertTrue(np.allclose(v[0], a * 2.0))
- self.assertTrue(np.allclose(v[1], b * 2.0))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy * 2.0))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy * 2.0))
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
- v2[0] = np.ones(5) * 2
- v2[1] = np.ones(9) * 2
+ v2.set_block(0, np.ones(5) * 2)
+ v2.set_block(1, np.ones(9) * 2)
v *= v2
- self.assertTrue(np.allclose(v[0], a * 2))
- self.assertTrue(np.allclose(v[1], b * 2))
+ self.assertTrue(np.allclose(v.get_block(0), a_copy * 2))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy * 2))
+
+ self.assertTrue(np.allclose(v2.get_block(0), np.ones(5) * 2))
+ self.assertTrue(np.allclose(v2.get_block(1), np.ones(9) * 2))
+
+ with self.assertRaises(Exception) as context:
+ v *= 'hola'
- self.assertTrue(np.allclose(v2[0], np.ones(5) * 2))
- self.assertTrue(np.allclose(v2[1], np.ones(9) * 2))
+ def test_itruediv(self):
+ v = self.ones
+ v /= 3
+ self.assertTrue(np.allclose(v.flatten(), np.ones(v.size)/3))
+ v.fill(1.0)
+ v /= v
+ self.assertTrue(np.allclose(v.flatten(), np.ones(v.size)))
+ v.fill(1.0)
+ v /= np.ones(v.size) * 2
+ self.assertTrue(np.allclose(v.flatten(), np.ones(v.size) / 2))
+
+ v = BlockVector(2)
+ a = np.ones(5)
+ b = np.arange(9, dtype=np.float64)
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
+ v /= 2.0
+
+ self.assertTrue(np.allclose(v.get_block(0), a_copy / 2.0))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy / 2.0))
+
+ v = BlockVector(2)
+ a = np.ones(5)
+ b = np.zeros(9)
+ a_copy = a.copy()
+ b_copy = b.copy()
+ v.set_block(0, a)
+ v.set_block(1, b)
+
+ v2 = BlockVector(2)
+ v2.set_block(0, np.ones(5) * 2)
+ v2.set_block(1, np.ones(9) * 2)
+
+ v /= v2
+ self.assertTrue(np.allclose(v.get_block(0), a_copy / 2))
+ self.assertTrue(np.allclose(v.get_block(1), b_copy / 2))
+
+ self.assertTrue(np.allclose(v2.get_block(0), np.ones(5) * 2))
+ self.assertTrue(np.allclose(v2.get_block(1), np.ones(9) * 2))
with self.assertRaises(Exception) as context:
v *= 'hola'
@@ -693,36 +773,38 @@ def test_imul(self):
def test_getitem(self):
v = self.ones
for i, s in enumerate(self.list_sizes_ones):
- self.assertEqual(v[i].size, s)
- self.assertEqual(v[i].shape, (s,))
- self.assertListEqual(v[i].tolist(), np.ones(s).tolist())
+ self.assertEqual(v.get_block(i).size, s)
+ self.assertEqual(v.get_block(i).shape, (s,))
+ self.assertListEqual(v.get_block(i).tolist(), np.ones(s).tolist())
def test_setitem(self):
v = self.ones
for i, s in enumerate(self.list_sizes_ones):
- v[i] = np.ones(s) * i
+ v.set_block(i, np.ones(s) * i)
for i, s in enumerate(self.list_sizes_ones):
- self.assertEqual(v[i].size, s)
- self.assertEqual(v[i].shape, (s,))
+ self.assertEqual(v.get_block(i).size, s)
+ self.assertEqual(v.get_block(i).shape, (s,))
res = np.ones(s) * i
- self.assertListEqual(v[i].tolist(), res.tolist())
+ self.assertListEqual(v.get_block(i).tolist(), res.tolist())
def test_set_blocks(self):
v = self.ones
blocks = [np.ones(s)*i for i, s in enumerate(self.list_sizes_ones)]
v.set_blocks(blocks)
for i, s in enumerate(self.list_sizes_ones):
- self.assertEqual(v[i].size, s)
- self.assertEqual(v[i].shape, (s,))
+ self.assertEqual(v.get_block(i).size, s)
+ self.assertEqual(v.get_block(i).shape, (s,))
res = np.ones(s) * i
- self.assertListEqual(v[i].tolist(), res.tolist())
+ self.assertListEqual(v.get_block(i).tolist(), res.tolist())
def test_has_none(self):
v = self.ones
self.assertFalse(v.has_none)
- v[0] = None
+ v = BlockVector(3)
+ v.set_block(0, np.ones(2))
+ v.set_block(2, np.ones(3))
self.assertTrue(v.has_none)
- v[0] = np.ones(2)
+ v.set_block(1, np.ones(2))
self.assertFalse(v.has_none)
def test_copyfrom(self):
@@ -733,15 +815,15 @@ def test_copyfrom(self):
v2 = BlockVector(len(self.list_sizes_ones))
for i, s in enumerate(self.list_sizes_ones):
- v2[i] = np.ones(s)*i
+ v2.set_block(i, np.ones(s)*i)
v.copyfrom(v2)
for idx, blk in enumerate(v2):
- self.assertListEqual(blk.tolist(), v2[idx].tolist())
+ self.assertListEqual(blk.tolist(), v2.get_block(idx).tolist())
v3 = BlockVector(2)
v4 = v.clone(2)
- v3[0] = v4
- v3[1] = np.zeros(3)
+ v3.set_block(0, v4)
+ v3.set_block(1, np.zeros(3))
self.assertListEqual(v3.tolist(), v4.tolist() + [0]*3)
def test_copyto(self):
@@ -761,174 +843,174 @@ def test_gt(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v > 0
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
flags = v > np.zeros(v.size)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
vv = v.copy()
vv.fill(0.0)
flags = v > vv
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_ge(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v >= 0
- v[1] = b + 1
+ v.set_block(1, b + 1)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
- v[1] = b - 1
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
+ v.set_block(1, b - 1)
flags = v >= np.zeros(v.size)
- v[1] = b
+ v.set_block(1, b)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
- v[1] = b - 1
+ v.set_block(1, b - 1)
vv = v.copy()
vv.fill(0.0)
flags = v >= vv
- v[1] = b
+ v.set_block(1, b)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_lt(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v < 1
- v[0] = a-1
- v[1] = b+1
+ v.set_block(0, a-1)
+ v.set_block(1, b+1)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
- v[0] = a + 1
- v[1] = b - 1
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
+ v.set_block(0, a + 1)
+ v.set_block(1, b - 1)
flags = v < np.ones(v.size)
- v[0] = a - 1
- v[1] = b + 1
+ v.set_block(0, a - 1)
+ v.set_block(1, b + 1)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
- v[0] = a + 1
- v[1] = b - 1
+ v.set_block(0, a + 1)
+ v.set_block(1, b - 1)
vv = v.copy()
vv.fill(1.0)
flags = v < vv
- v[0] = a - 1
- v[1] = b + 1
+ v.set_block(0, a - 1)
+ v.set_block(1, b + 1)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_le(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v <= 1
- v[1] = b + 1
+ v.set_block(1, b + 1)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
flags = v <= v
vv = v.copy()
vv.fill(1.0)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, vv[bid]))
+ self.assertTrue(np.allclose(blk, vv.get_block(bid)))
flags = v <= v.flatten()
vv = v.copy()
vv.fill(1.0)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, vv[bid]))
+ self.assertTrue(np.allclose(blk, vv.get_block(bid)))
def test_eq(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v == 1
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
flags = v == np.ones(v.size)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
vv = v.copy()
vv.fill(1.0)
flags = v == vv
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_ne(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
flags = v != 0
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
flags = v != np.zeros(v.size)
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
vv = v.copy()
vv.fill(0.0)
flags = v != vv
self.assertEqual(v.nblocks, flags.nblocks)
for bid, blk in enumerate(flags):
- self.assertTrue(np.allclose(blk, v[bid]))
+ self.assertTrue(np.allclose(blk, v.get_block(bid)))
def test_contains(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
self.assertTrue(0 in v)
self.assertFalse(3 in v)
@@ -938,18 +1020,28 @@ def test_copy(self):
v = BlockVector(2)
a = np.ones(5)
b = np.zeros(9)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = v.copy()
self.assertTrue(np.allclose(v.flatten(), v2.flatten()))
+ def test_copy_structure(self):
+ v = BlockVector(2)
+ a = np.ones(5)
+ b = np.zeros(9)
+ v.set_block(0, a)
+ v.set_block(1, b)
+ v2 = v.copy_structure()
+ self.assertEqual(v.get_block(0).size, v2.get_block(0).size)
+ self.assertEqual(v.get_block(1).size, v2.get_block(1).size)
+
def test_unary_ufuncs(self):
v = BlockVector(2)
a = np.ones(3) * 0.5
b = np.ones(2) * 0.8
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
@@ -965,13 +1057,13 @@ def test_unary_ufuncs(self):
np.conjugate, np.reciprocal]
for fun in unary_funcs:
- v2[0] = fun(v[0])
- v2[1] = fun(v[1])
+ v2.set_block(0, fun(v.get_block(0)))
+ v2.set_block(1, fun(v.get_block(1)))
res = fun(v)
self.assertIsInstance(res, BlockVector)
self.assertEqual(res.nblocks, 2)
for i in range(2):
- self.assertTrue(np.allclose(res[i], v2[i]))
+ self.assertTrue(np.allclose(res.get_block(i), v2.get_block(i)))
other_funcs = [np.cumsum, np.cumprod, np.cumproduct]
@@ -989,14 +1081,14 @@ def test_reduce_ufuncs(self):
v = BlockVector(2)
a = np.ones(3) * 0.5
b = np.ones(2) * 0.8
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
reduce_funcs = [np.sum, np.max, np.min, np.prod, np.mean]
for fun in reduce_funcs:
self.assertAlmostEqual(fun(v), fun(v.flatten()))
- other_funcs = [np.all, np.any, np.std, np.ptp, np.argmax, np.argmin]
+ other_funcs = [np.all, np.any, np.std, np.ptp]
for fun in other_funcs:
self.assertAlmostEqual(fun(v), fun(v.flatten()))
@@ -1005,14 +1097,14 @@ def test_binary_ufuncs(self):
v = BlockVector(2)
a = np.ones(3) * 0.5
b = np.ones(2) * 0.8
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
a2 = np.ones(3) * 3.0
b2 = np.ones(2) * 2.8
- v2[0] = a2
- v2[1] = b2
+ v2.set_block(0, a2)
+ v2.set_block(1, b2)
binary_ufuncs = [np.add, np.multiply, np.divide, np.subtract,
np.greater, np.greater_equal, np.less,
@@ -1044,14 +1136,14 @@ def test_binary_ufuncs(self):
v = BlockVector(2)
a = np.ones(3, dtype=bool)
b = np.ones(2, dtype=bool)
- v[0] = a
- v[1] = b
+ v.set_block(0, a)
+ v.set_block(1, b)
v2 = BlockVector(2)
a2 = np.zeros(3, dtype=bool)
b2 = np.zeros(2, dtype=bool)
- v2[0] = a2
- v2[1] = b2
+ v2.set_block(0, a2)
+ v2.set_block(1, b2)
binary_ufuncs = [np.logical_and, np.logical_or, np.logical_xor]
for fun in binary_ufuncs:
@@ -1059,5 +1151,20 @@ def test_binary_ufuncs(self):
res = fun(v, v2)
self.assertTrue(np.allclose(flat_res, res.flatten()))
+ def test_min_with_empty_blocks(self):
+ b = BlockVector(3)
+ b.set_block(0, np.zeros(3))
+ b.set_block(1, np.zeros(0))
+ b.set_block(2, np.zeros(3))
+ self.assertEqual(b.min(), 0)
+
+ def test_max_with_empty_blocks(self):
+ b = BlockVector(3)
+ b.set_block(0, np.zeros(3))
+ b.set_block(1, np.zeros(0))
+ b.set_block(2, np.zeros(3))
+ self.assertEqual(b.max(), 0)
+
+
if __name__ == '__main__':
unittest.main()
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_coomatrix.py b/pyomo/contrib/pynumero/sparse/tests/test_coomatrix.py
deleted file mode 100644
index 69280e9c938..00000000000
--- a/pyomo/contrib/pynumero/sparse/tests/test_coomatrix.py
+++ /dev/null
@@ -1,36 +0,0 @@
-# ___________________________________________________________________________
-#
-# Pyomo: Python Optimization Modeling Objects
-# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
-# rights in this software.
-# This software is distributed under the 3-clause BSD License.
-# ___________________________________________________________________________
-import sys
-import os
-import pyutilib.th as unittest
-
-try:
- from scipy.sparse import csr_matrix, csc_matrix, coo_matrix, identity
- import numpy as np
-except ImportError:
- raise unittest.SkipTest(
- "Pynumero needs scipy and numpy to run COO matrix tests")
-
-from pyomo.contrib.pynumero.sparse.coo import (diagonal_matrix,
- empty_matrix)
-
-@unittest.skipIf(os.name in ['nt', 'dos'], "Do not test on windows")
-class TestEmptyMatrix(unittest.TestCase):
-
- def test_constructor(self):
-
- m = empty_matrix(3, 3)
- self.assertEqual(m.shape, (3, 3))
- self.assertEqual(m.nnz, 0)
-
-
-
-
-
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py b/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py
index 2613f4b2e53..b7c876b4a96 100644
--- a/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py
+++ b/pyomo/contrib/pynumero/sparse/tests/test_intrinsics.py
@@ -10,11 +10,13 @@
import sys
import pyutilib.th as unittest
-from .. import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_available
+)
if not (numpy_available and scipy_available):
- raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
+ raise unittest.SkipTest(
+ "Pynumero needs scipy and numpy to run Sparse intrinsict tests")
-import numpy as np
from pyomo.contrib.pynumero.sparse import BlockVector
import pyomo.contrib.pynumero as pn
@@ -26,8 +28,10 @@ def setUp(self):
self.v2 = np.array([4.4, 5.5, 6.6, 7.7])
self.v3 = np.array([1.1, 2.2, 3.3])*2
self.v4 = np.array([4.4, 5.5, 6.6, 7.7])*2
- self.bv = BlockVector([self.v1, self.v2])
- self.bv2 = BlockVector([self.v3, self.v4])
+ self.bv = BlockVector(2)
+ self.bv2 = BlockVector(2)
+ self.bv.set_blocks([self.v1, self.v2])
+ self.bv2.set_blocks([self.v3, self.v4])
def test_where(self):
@@ -35,7 +39,7 @@ def test_where(self):
condition = bv >= 4.5
res = pn.where(condition)[0]
for bid, blk in enumerate(res):
- self.assertTrue(np.allclose(blk, pn.where(bv[bid] >= 4.5)))
+ self.assertTrue(np.allclose(blk, pn.where(bv.get_block(bid) >= 4.5)))
flat_condition = condition.flatten()
res = pn.where(condition, 2.0, 1.0)
@@ -54,7 +58,8 @@ def test_where(self):
res_flat = pn.where(flat_condition, np.ones(bv.size) * 2.0, np.ones(bv.size))
self.assertTrue(np.allclose(res.flatten(), res_flat))
- bones = BlockVector([np.ones(3), np.ones(4)])
+ bones = BlockVector(2)
+ bones.set_blocks([np.ones(3), np.ones(4)])
res = pn.where(condition, bones * 2.0, 1.0)
res_flat = pn.where(flat_condition, np.ones(bv.size) * 2.0, 1.0)
@@ -78,34 +83,69 @@ def test_isin(self):
test_bv = BlockVector(2)
a = np.array([1.1, 3.3])
b = np.array([5.5, 7.7])
- test_bv[0] = a
- test_bv[1] = b
+ test_bv.set_block(0, a)
+ test_bv.set_block(1, b)
res = pn.isin(bv, test_bv)
for bid, blk in enumerate(bv):
- self.assertEqual(blk.size, res[bid].size)
- res_flat = np.isin(blk, test_bv[bid])
- self.assertTrue(np.allclose(res[bid], res_flat))
+ self.assertEqual(blk.size, res.get_block(bid).size)
+ res_flat = np.isin(blk, test_bv.get_block(bid))
+ self.assertTrue(np.allclose(res.get_block(bid), res_flat))
c = np.concatenate([a, b])
res = pn.isin(bv, c)
for bid, blk in enumerate(bv):
- self.assertEqual(blk.size, res[bid].size)
+ self.assertEqual(blk.size, res.get_block(bid).size)
res_flat = np.isin(blk, c)
- self.assertTrue(np.allclose(res[bid], res_flat))
+ self.assertTrue(np.allclose(res.get_block(bid), res_flat))
res = pn.isin(bv, test_bv, invert=True)
for bid, blk in enumerate(bv):
- self.assertEqual(blk.size, res[bid].size)
- res_flat = np.isin(blk, test_bv[bid], invert=True)
- self.assertTrue(np.allclose(res[bid], res_flat))
+ self.assertEqual(blk.size, res.get_block(bid).size)
+ res_flat = np.isin(blk, test_bv.get_block(bid), invert=True)
+ self.assertTrue(np.allclose(res.get_block(bid), res_flat))
c = np.concatenate([a, b])
res = pn.isin(bv, c, invert=True)
for bid, blk in enumerate(bv):
- self.assertEqual(blk.size, res[bid].size)
+ self.assertEqual(blk.size, res.get_block(bid).size)
res_flat = np.isin(blk, c, invert=True)
- self.assertTrue(np.allclose(res[bid], res_flat))
+ self.assertTrue(np.allclose(res.get_block(bid), res_flat))
# ToDo: try np.copy on a blockvector
+ def test_intersect1d(self):
+
+ vv1 = np.array([1.1, 3.3])
+ vv2 = np.array([4.4, 7.7])
+ bvv = BlockVector(2)
+ bvv.set_blocks([vv1, vv2])
+ res = pn.intersect1d(self.bv, bvv)
+ self.assertIsInstance(res, BlockVector)
+ self.assertTrue(np.allclose(res.get_block(0), vv1))
+ self.assertTrue(np.allclose(res.get_block(1), vv2))
+ vv3 = np.array([1.1, 7.7])
+ res = pn.intersect1d(self.bv, vv3)
+ self.assertIsInstance(res, BlockVector)
+ self.assertTrue(np.allclose(res.get_block(0), np.array([1.1])))
+ self.assertTrue(np.allclose(res.get_block(1), np.array([7.7])))
+ res = pn.intersect1d(vv3, self.bv)
+ self.assertIsInstance(res, BlockVector)
+ self.assertTrue(np.allclose(res.get_block(0), np.array([1.1])))
+ self.assertTrue(np.allclose(res.get_block(1), np.array([7.7])))
+
+ def test_setdiff1d(self):
+
+ vv1 = np.array([1.1, 3.3])
+ vv2 = np.array([4.4, 7.7])
+ bvv = BlockVector(2)
+ bvv.set_blocks([vv1, vv2])
+ res = pn.setdiff1d(self.bv, bvv)
+ self.assertIsInstance(res, BlockVector)
+ self.assertTrue(np.allclose(res.get_block(0), np.array([2.2])))
+ self.assertTrue(np.allclose(res.get_block(1), np.array([5.5, 6.6])))
+ vv3 = np.array([1.1, 7.7])
+ res = pn.setdiff1d(self.bv, vv3)
+ self.assertIsInstance(res, BlockVector)
+ self.assertTrue(np.allclose(res.get_block(0), np.array([2.2, 3.3])))
+ self.assertTrue(np.allclose(res.get_block(1), np.array([4.4, 5.5, 6.6])))
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py
new file mode 100644
index 00000000000..2beea888532
--- /dev/null
+++ b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_matrix.py
@@ -0,0 +1,1155 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import warnings
+import pyutilib.th as unittest
+
+from pyomo.contrib.pynumero.dependencies import (
+ numpy_available, scipy_available, numpy as np
+)
+
+SKIPTESTS=[]
+if numpy_available and scipy_available:
+ from scipy.sparse import coo_matrix, bmat
+else:
+ SKIPTESTS.append(
+ "Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests"
+ )
+
+try:
+ from mpi4py import MPI
+ comm = MPI.COMM_WORLD
+ if comm.Get_size() < 3:
+ SKIPTESTS.append(
+ "Pynumero needs at least 3 processes to run BlockMatrix MPI tests"
+ )
+except ImportError:
+ SKIPTESTS.append("Pynumero needs mpi4py to run BlockMatrix MPI tests")
+
+if not SKIPTESTS:
+ from pyomo.contrib.pynumero.sparse import BlockVector, BlockMatrix
+ from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector
+ from pyomo.contrib.pynumero.sparse.mpi_block_matrix import (
+ MPIBlockMatrix, NotFullyDefinedBlockMatrixError
+ )
+
+
+@unittest.category("mpi")
+class TestMPIBlockMatrix(unittest.TestCase):
+
+ # Because the setUpClass is called before decorators around the
+ # class itself, we need to put the skipIf on the class setup and not
+ # the class.
+
+ @classmethod
+ @unittest.skipIf(SKIPTESTS, SKIPTESTS)
+ def setUpClass(cls):
+ # test problem 1
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+
+ rank = comm.Get_rank()
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+
+ # create serial matrix image
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+ cls.square_serial_mat = serial_bm
+
+ bm.broadcast_block_sizes()
+ cls.square_mpi_mat = bm
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+
+ cls.square_mpi_mat_no_broadcast = bm
+
+ # create matrix with shared blocks
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.set_block(0, 1, m)
+
+ bm.broadcast_block_sizes()
+ cls.square_mpi_mat2 = bm
+
+ # create serial matrix image
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+ serial_bm.set_block(0, 1, m)
+ cls.square_serial_mat2 = serial_bm
+
+ row = np.array([0, 1, 2, 3])
+ col = np.array([0, 1, 0, 1])
+ data = np.array([1., 1., 1., 1.])
+ m2 = coo_matrix((data, (row, col)), shape=(4, 2))
+
+ rank_ownership = [[0, -1, 0], [-1, 1, -1]]
+ bm = MPIBlockMatrix(2, 3, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ bm.set_block(0, 2, m2)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+ cls.rectangular_mpi_mat = bm
+
+ bm = BlockMatrix(2, 3)
+ bm.set_block(0, 0, m)
+ bm.set_block(0, 2, m2)
+ bm.set_block(1, 1, m)
+ cls.rectangular_serial_mat = bm
+
+ def test_bshape(self):
+ self.assertEqual(self.square_mpi_mat.bshape, (2, 2))
+ self.assertEqual(self.rectangular_mpi_mat.bshape, (2, 3))
+
+ def test_shape(self):
+ self.assertEqual(self.square_mpi_mat.shape, (8, 8))
+ self.assertEqual(self.rectangular_mpi_mat.shape, (8, 10))
+ with self.assertRaises(NotFullyDefinedBlockMatrixError):
+ self.assertEqual(self.square_mpi_mat_no_broadcast.shape, (8, 8))
+
+ def test_tocoo(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.tocoo()
+
+ def test_tocsr(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.tocsr()
+
+ def test_tocsc(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.tocsc()
+
+ def test_todia(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.todia()
+
+ def test_tobsr(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.tobsr()
+
+ def test_toarray(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.toarray()
+
+ def test_coo_data(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.coo_data()
+
+ def test_getitem(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ if rank == 0:
+ self.assertTrue((m == self.square_mpi_mat.get_block(0, 0)).toarray().all())
+ if rank == 1:
+ self.assertTrue((m == self.square_mpi_mat.get_block(1, 1)).toarray().all())
+
+ self.assertTrue((m == self.square_mpi_mat2.get_block(0, 1)).toarray().all())
+
+ def test_setitem(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+
+ bm.set_block(0, 1, m)
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ self.assertTrue((m == bm.get_block(0, 1)).toarray().all())
+
+ def test_nnz(self):
+ self.assertEqual(self.square_mpi_mat.nnz, 12)
+ self.assertEqual(self.square_mpi_mat2.nnz, 18)
+ self.assertEqual(self.rectangular_mpi_mat.nnz, 16)
+
+ def test_block_shapes(self):
+
+ m, n = self.square_mpi_mat.bshape
+ mpi_shapes = self.square_mpi_mat.block_shapes()
+ serial_shapes = self.square_serial_mat.block_shapes()
+ for i in range(m):
+ for j in range(n):
+ self.assertEqual(serial_shapes[i][j], mpi_shapes[i][j])
+
+ def test_reset_brow(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ self.assertTrue(np.allclose(serial_bm.row_block_sizes(),
+ bm.row_block_sizes()))
+ bm.reset_brow(0)
+ serial_bm.reset_brow(0)
+ self.assertTrue(np.allclose(serial_bm.row_block_sizes(),
+ bm.row_block_sizes()))
+
+ bm.reset_brow(1)
+ serial_bm.reset_brow(1)
+ self.assertTrue(np.allclose(serial_bm.row_block_sizes(),
+ bm.row_block_sizes()))
+
+ def test_reset_bcol(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ self.assertTrue(np.allclose(serial_bm.row_block_sizes(),
+ bm.row_block_sizes()))
+ bm.reset_bcol(0)
+ serial_bm.reset_bcol(0)
+ self.assertTrue(np.allclose(serial_bm.col_block_sizes(),
+ bm.col_block_sizes()))
+
+ bm.reset_bcol(1)
+ serial_bm.reset_bcol(1)
+ self.assertTrue(np.allclose(serial_bm.col_block_sizes(),
+ bm.col_block_sizes()))
+
+ def test_has_empty_rows(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.has_empty_rows()
+
+ def test_has_empty_cols(self):
+ with self.assertRaises(Exception) as context:
+ self.square_mpi_mat.has_empty_cols()
+
+ def test_transpose(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.rectangular_mpi_mat
+
+ res = mat1.transpose()
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat1.bshape[1], res.bshape[0])
+ self.assertEqual(mat1.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat1.get_block(j, i).toarray()))
+
+ res = mat2.transpose()
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat2.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat2.bshape[1], res.bshape[0])
+ self.assertEqual(mat2.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat2.get_block(j, i).toarray()))
+
+ res = mat1.transpose(copy=True)
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat1.bshape[1], res.bshape[0])
+ self.assertEqual(mat1.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat1.get_block(j, i).toarray()))
+
+ res = mat2.transpose(copy=True)
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat2.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat2.bshape[1], res.bshape[0])
+ self.assertEqual(mat2.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat2.get_block(j, i).toarray()))
+
+ res = mat1.T
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat1.bshape[1], res.bshape[0])
+ self.assertEqual(mat1.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat1.get_block(j, i).toarray()))
+
+ res = mat2.T
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat2.rank_ownership, res.rank_ownership.T))
+ self.assertEqual(mat2.bshape[1], res.bshape[0])
+ self.assertEqual(mat2.bshape[0], res.bshape[1])
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray().T,
+ mat2.get_block(j, i).toarray()))
+
+ def test_add(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ res = mat1 + mat1
+ serial_res = serial_mat1 + serial_mat1
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ res = mat1 + mat2
+ serial_res = serial_mat1 + serial_mat2
+ self.assertIsInstance(res, MPIBlockMatrix)
+ rows, columns = np.nonzero(res.ownership_mask)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 + serial_mat2
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat2 + mat1
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 + serial_mat2.tocoo()
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat2.tocoo() + mat1
+
+ def test_sub(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ res = mat1 - mat1
+ serial_res = serial_mat1 - serial_mat1
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ res = mat1 - mat2
+ serial_res = serial_mat1 - serial_mat2
+ self.assertIsInstance(res, MPIBlockMatrix)
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 - serial_mat2
+ with self.assertRaises(Exception) as context:
+ res = serial_mat2 - mat1
+ with self.assertRaises(Exception) as context:
+ res = mat1 - serial_mat2.tocoo()
+ with self.assertRaises(Exception) as context:
+ res = serial_mat2.tocoo() - mat1
+
+ def test_mul(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ rank = comm.Get_rank()
+
+ bv1 = MPIBlockVector(2, [0, 1], comm)
+
+ if rank == 0:
+ bv1.set_block(0, np.arange(4, dtype=np.float64))
+ if rank == 1:
+ bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ bv1.broadcast_block_sizes()
+
+ serial_bv1 = BlockVector(2)
+ serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
+ serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+
+ res = mat1 * bv1
+ serial_res = serial_mat1 * serial_bv1
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ res = mat2 * bv1
+ serial_res = serial_mat2 * serial_bv1
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ bv1 = MPIBlockVector(2, [0, -1], comm)
+
+ if rank == 0:
+ bv1.set_block(0, np.arange(4, dtype=np.float64))
+ bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ bv1.broadcast_block_sizes()
+
+ res = mat1 * bv1
+ serial_res = serial_mat1 * serial_bv1
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ res = mat2 * bv1
+ serial_res = serial_mat2 * serial_bv1
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ # rectangular matrix
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ bv1 = MPIBlockVector(3, [0, 1, 2], comm)
+
+ if rank == 0:
+ bv1.set_block(0, np.arange(4, dtype=np.float64))
+ if rank == 1:
+ bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ if rank == 2:
+ bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)
+
+ bv1.broadcast_block_sizes()
+
+ serial_bv1 = BlockVector(3)
+ serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
+ serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ serial_bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)
+
+ # with warnings.catch_warnings():
+ # warnings.simplefilter("ignore")
+ res = mat1 * bv1
+ serial_res = serial_mat1 * serial_bv1
+
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(serial_res.nblocks, 2)
+ self.assertEqual(res.nblocks, 2)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ bv1 = MPIBlockVector(3, [0, 1, 0], comm)
+
+ if rank == 0:
+ bv1.set_block(0, np.arange(4, dtype=np.float64))
+ bv1.set_block(2, np.arange(2, dtype=np.float64) + 8)
+ if rank == 1:
+ bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ bv1.broadcast_block_sizes()
+
+ res = mat1 * bv1
+ serial_res = serial_mat1 * serial_bv1
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ res = mat1 * 3.0
+ serial_res = serial_mat1 * 3.0
+ self.assertIsInstance(res, MPIBlockMatrix)
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ res = 3.0 * mat1
+ serial_res = serial_mat1 * 3.0
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ def test_div(self):
+
+ mat1 = self.square_mpi_mat
+ serial_mat1 = self.square_serial_mat
+
+ res = mat1 / 3.0
+ serial_res = serial_mat1 / 3.0
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ def test_dot(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ rank = comm.Get_rank()
+
+ bv1 = MPIBlockVector(2, [0, 1], comm)
+
+ if rank == 0:
+ bv1.set_block(0, np.arange(4, dtype=np.float64))
+ if rank == 1:
+ bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+ bv1.broadcast_block_sizes()
+
+ serial_bv1 = BlockVector(2)
+ serial_bv1.set_block(0, np.arange(4, dtype=np.float64))
+ serial_bv1.set_block(1, np.arange(4, dtype=np.float64) + 4)
+
+ res = mat1.dot(bv1)
+ serial_res = serial_mat1.dot(serial_bv1)
+ self.assertIsInstance(res, BlockVector)
+ self.assertEqual(res.nblocks, serial_res.nblocks)
+ for bid in range(serial_res.nblocks):
+ self.assertTrue(np.allclose(res.get_block(bid),
+ serial_res.get_block(bid)))
+
+ def test_iadd(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m.copy())
+ if rank == 1:
+ bm.set_block(1, 1, m.copy())
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m.copy())
+ serial_bm.set_block(1, 1, m.copy())
+
+ bm += bm
+ serial_bm += serial_bm
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if bm.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(bm.get_block(i, j).toarray(),
+ serial_bm.get_block(i, j).toarray()))
+
+ with self.assertRaises(Exception) as context:
+ bm += serial_bm
+
+ serial_bm2 = BlockMatrix(2, 2)
+ serial_bm2.set_block(0, 0, m.copy())
+ serial_bm2.set_block(0, 1, m.copy())
+ serial_bm2.set_block(1, 1, m.copy())
+
+ with self.assertRaises(Exception) as context:
+ bm += serial_bm2
+
+ def test_isub(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m.copy())
+ if rank == 1:
+ bm.set_block(1, 1, m.copy())
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m.copy())
+ serial_bm.set_block(1, 1, m.copy())
+
+ bm -= bm
+ serial_bm -= serial_bm
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if bm.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(bm.get_block(i, j).toarray(),
+ serial_bm.get_block(i, j).toarray()))
+
+ with self.assertRaises(Exception) as context:
+ bm -= serial_bm
+
+ def test_imul(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ bm *= 2.0
+ serial_bm *= 2.0
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if bm.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(bm.get_block(i, j).toarray(),
+ serial_bm.get_block(i, j).toarray()))
+
+ def test_idiv(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ bm /= 2.0
+ serial_bm /= 2.0
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if bm.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(bm.get_block(i, j).toarray(),
+ serial_bm.get_block(i, j).toarray()))
+
+ def test_neg(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ res = -bm
+ serial_res = -serial_bm
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+
+ def test_abs(self):
+
+ row = np.array([0, 3, 1, 2, 3, 0])
+ col = np.array([0, 0, 1, 2, 3, 3])
+ data = np.array([2., 1, 3, 4, 5, 1])
+ m = coo_matrix((data, (row, col)), shape=(4, 4))
+ rank = comm.Get_rank()
+
+ # create mpi matrix
+ rank_ownership = [[0, -1], [-1, 1]]
+ bm = MPIBlockMatrix(2, 2, rank_ownership, comm)
+ if rank == 0:
+ bm.set_block(0, 0, m)
+ if rank == 1:
+ bm.set_block(1, 1, m)
+ bm.broadcast_block_sizes()
+
+ serial_bm = BlockMatrix(2, 2)
+ serial_bm.set_block(0, 0, m)
+ serial_bm.set_block(1, 1, m)
+
+ res = abs(bm)
+ serial_res = abs(serial_bm)
+
+ rows, columns = np.nonzero(bm.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+
+ def test_eq(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 == mat2
+ serial_res = serial_mat1 == serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 == serial_mat2
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 == mat1
+ serial_res = serial_mat1 == serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 == serial_mat1
+
+ def test_ne(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 != mat2
+ serial_res = serial_mat1 != serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 != serial_mat2
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 != mat1
+ serial_res = serial_mat1 != serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 != serial_mat1
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat1 != mat1
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 != 2
+ serial_res = serial_mat1 != 2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ def test_le(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 <= mat2
+ serial_res = serial_mat1 <= serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 <= serial_mat2
+ serial_res = serial_mat1 <= serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 <= mat1
+ serial_res = serial_mat1 <= serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 <= serial_mat1
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat1 <= mat1
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 <= 2
+ serial_res = serial_mat1 <= 2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ def test_lt(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 < mat2
+ serial_res = serial_mat1 < serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 < serial_mat2
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 < mat1
+ serial_res = serial_mat1 < serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 < serial_mat1
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat1 < mat1
+
+ def test_ge(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 >= mat2
+ serial_res = serial_mat1 >= serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 >= serial_mat2
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 >= mat1
+ serial_res = serial_mat1 >= serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 >= serial_mat1
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat1 >= mat1
+
+ def test_gt(self):
+
+ mat1 = self.square_mpi_mat
+ mat2 = self.square_mpi_mat2
+
+ serial_mat1 = self.square_serial_mat
+ serial_mat2 = self.square_serial_mat2
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 > mat2
+ serial_res = serial_mat1 > serial_mat2
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 > serial_mat2
+
+ mat1 = self.rectangular_mpi_mat
+ serial_mat1 = self.rectangular_serial_mat
+
+ with warnings.catch_warnings():
+ warnings.simplefilter("ignore")
+ res = mat1 > mat1
+ serial_res = serial_mat1 > serial_mat1
+
+ self.assertIsInstance(res, MPIBlockMatrix)
+ self.assertTrue(np.allclose(mat1.rank_ownership, res.rank_ownership))
+ rows, columns = np.nonzero(res.ownership_mask)
+ for i, j in zip(rows, columns):
+ if res.get_block(i, j) is not None:
+ self.assertTrue(np.allclose(res.get_block(i, j).toarray(),
+ serial_res.get_block(i, j).toarray()))
+ else:
+ self.assertIsNone(serial_res.get_block(i, j))
+
+ with self.assertRaises(Exception) as context:
+ res = mat1 > serial_mat1
+
+ with self.assertRaises(Exception) as context:
+ res = serial_mat1 > mat1
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py
new file mode 100644
index 00000000000..db6fd4ce836
--- /dev/null
+++ b/pyomo/contrib/pynumero/sparse/tests/test_mpi_block_vector.py
@@ -0,0 +1,1710 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import pyutilib.th as unittest
+
+from pyomo.contrib.pynumero.dependencies import (
+ numpy_available, scipy_available, numpy as np
+)
+
+SKIPTESTS=[]
+if numpy_available and scipy_available:
+ from scipy.sparse import coo_matrix, bmat
+else:
+ SKIPTESTS.append(
+ "Pynumero needs scipy and numpy>=1.13.0 to run BlockMatrix tests"
+ )
+
+try:
+ from mpi4py import MPI
+ comm = MPI.COMM_WORLD
+ if comm.Get_size() < 3:
+ SKIPTESTS.append(
+ "Pynumero needs at least 3 processes to run BlockVector MPI tests"
+ )
+except ImportError:
+ SKIPTESTS.append("Pynumero needs mpi4py to run BlockVector MPI tests")
+
+if not SKIPTESTS:
+ from pyomo.contrib.pynumero.sparse import BlockVector
+ from pyomo.contrib.pynumero.sparse.mpi_block_vector import MPIBlockVector
+
+
+@unittest.category("mpi")
+class TestMPIBlockVector(unittest.TestCase):
+
+ # Because the setUpClass is called before decorators around the
+ # class itself, we need to put the skipIf on the class setup and not
+ # the class.
+
+ @classmethod
+ @unittest.skipIf(SKIPTESTS, SKIPTESTS)
+ def setUpClass(cls):
+ # test problem 1
+
+ v1 = MPIBlockVector(4, [0,1,0,1], comm)
+
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3))
+ v1.set_block(2, np.ones(3))
+ if rank == 1:
+ v1.set_block(1, np.zeros(2))
+ v1.set_block(3, np.ones(2))
+
+ cls.v1 = v1
+ cls.v1.broadcast_block_sizes()
+ v2 = MPIBlockVector(7, [0,0,1,1,2,2,-1], comm)
+
+ rank = comm.Get_rank()
+ if rank == 0:
+ v2.set_block(0, np.ones(2))
+ v2.set_block(1, np.ones(2))
+ if rank == 1:
+ v2.set_block(2, np.zeros(3))
+ v2.set_block(3, np.zeros(3))
+ if rank == 2:
+ v2.set_block(4, np.ones(4) * 2.0)
+ v2.set_block(5, np.ones(4) * 2.0)
+ v2.set_block(6, np.ones(2) * 3)
+
+ cls.v2 = v2
+ cls.v2.broadcast_block_sizes()
+
+ def test_nblocks(self):
+ v1 = self.v1
+ self.assertEqual(v1.nblocks, 4)
+ v2 = self.v2
+ self.assertEqual(v2.nblocks, 7)
+
+ def test_bshape(self):
+ v1 = self.v1
+ self.assertEqual(v1.bshape[0], 4)
+ v2 = self.v2
+ self.assertEqual(v2.bshape[0], 7)
+
+ def test_size(self):
+ v1 = self.v1
+ self.assertEqual(v1.size, 10)
+ v2 = self.v2
+ self.assertEqual(v2.size, 20)
+
+ def test_shape(self):
+ v1 = self.v1
+ self.assertEqual(v1.shape[0], 10)
+ v2 = self.v2
+ self.assertEqual(v2.shape[0], 20)
+
+ def test_ndim(self):
+ v1 = self.v1
+ self.assertEqual(v1.ndim, 1)
+
+ def test_has_none(self):
+ v = MPIBlockVector(4, [0,1,0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3))
+ v.set_block(2, np.ones(3))
+ self.assertTrue(v.has_none)
+ self.assertFalse(self.v1.has_none)
+
+ def test_any(self):
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3))
+ if rank == 1:
+ v.set_block(1, np.zeros(3))
+ v.broadcast_block_sizes()
+ self.assertTrue(v.any())
+ self.assertTrue(self.v1.any())
+ self.assertTrue(self.v2.any())
+
+ def test_all(self):
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3))
+ if rank == 1:
+ v.set_block(1, np.zeros(3))
+ v.broadcast_block_sizes()
+ self.assertFalse(v.all())
+ if rank == 1:
+ v.set_block(1, np.ones(3))
+ self.assertTrue(v.all())
+ self.assertFalse(self.v1.all())
+ self.assertFalse(self.v2.all())
+
+ def test_min(self):
+ v = MPIBlockVector(2, [0, 1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 10)
+ if rank == 1:
+ v.set_block(1, np.arange(3))
+ v.broadcast_block_sizes()
+ self.assertEqual(v.min(), 0.0)
+ if rank == 1:
+ v.set_block(1, -np.arange(3))
+ self.assertEqual(v.min(), -2.0)
+
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 10)
+ if rank == 1:
+ v.set_block(1, np.arange(3))
+ v.set_block(2, -np.arange(6))
+ v.broadcast_block_sizes()
+ self.assertEqual(v.min(), -5.0)
+ self.assertEqual(self.v1.min(), 0.0)
+ self.assertEqual(self.v2.min(), 0.0)
+
+ def test_max(self):
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 10)
+ if rank == 1:
+ v.set_block(1, np.arange(3))
+ v.broadcast_block_sizes()
+ self.assertEqual(v.max(), 12.0)
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 10)
+ if rank == 1:
+ v.set_block(1, np.arange(3))
+ v.set_block(2, np.arange(60))
+ v.broadcast_block_sizes()
+ self.assertEqual(v.max(), 59.0)
+ self.assertEqual(self.v1.max(), 1.0)
+ self.assertEqual(self.v2.max(), 3.0)
+
+ def test_sum(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(3) + 3)
+ v.set_block(2, np.arange(3) + 6)
+ v.broadcast_block_sizes()
+
+ b = np.arange(9)
+ self.assertEqual(b.sum(), v.sum())
+ self.assertEqual(self.v1.sum(), 8)
+ self.assertEqual(self.v2.sum(), 26)
+
+ def test_prod(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(2))
+ if rank == 1:
+ v.set_block(1, np.ones(3))
+ v.set_block(2, np.ones(3))
+ v.broadcast_block_sizes()
+ self.assertEqual(1.0, v.prod())
+ if rank == 1:
+ v.set_block(1, np.ones(3) * 2)
+ self.assertEqual(8.0, v.prod())
+ if rank == 0:
+ v.set_block(0, np.ones(2) * 3)
+ self.assertEqual(72.0, v.prod())
+ self.assertEqual(0.0, self.v1.prod())
+ self.assertEqual(0.0, self.v2.prod())
+
+ def test_conj(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(2))
+ if rank == 1:
+ v.set_block(1, np.ones(3))
+ v.set_block(2, np.ones(3))
+ v.broadcast_block_sizes()
+ res = v.conj()
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ for j in v.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(j), v.get_block(j).conj()))
+
+ def test_conjugate(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(2))
+ if rank == 1:
+ v.set_block(1, np.ones(3))
+ v.set_block(2, np.ones(3))
+ v.broadcast_block_sizes()
+ res = v.conjugate()
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ for j in v._owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(j), v.get_block(j).conjugate()))
+
+ def test_nonzero(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.array([0,1,2]))
+ if rank == 1:
+ v.set_block(1, np.array([0,0,2]))
+ v.set_block(2, np.ones(3))
+ v.broadcast_block_sizes()
+ res = v.nonzero()[0]
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(res.get_block(0), np.array([1,2])))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.get_block(1), np.array([2])))
+ self.assertTrue(np.allclose(res.get_block(2), np.arange(3)))
+
+ res = self.v1.nonzero()[0]
+ if rank == 0:
+ self.assertTrue(np.allclose(res.get_block(0), np.arange(3)))
+ self.assertTrue(np.allclose(res.get_block(2), np.arange(3)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.get_block(1), np.arange(0)))
+ self.assertTrue(np.allclose(res.get_block(3), np.arange(2)))
+
+ def test_round(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 0.01)
+ if rank == 1:
+ v.set_block(1, np.arange(3) + 3 + 0.01)
+ v.set_block(2, np.arange(3) + 6 + 0.01)
+ v.broadcast_block_sizes()
+
+ res = v.round()
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(3)+3, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(3)+6, res.get_block(2)))
+
+ def test_clip(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(3) + 3)
+ v.set_block(2, np.arange(3) + 6)
+ v.broadcast_block_sizes()
+
+ res = v.clip(min=2.0)
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.array([2,2,2]), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(3)+3, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(3)+6, res.get_block(2)))
+
+ res = v.clip(min=2.0, max=5.0)
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.array([2,2,2]), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.array([3,4,5]), res.get_block(1)))
+ self.assertTrue(np.allclose(np.array([5,5,5]), res.get_block(2)))
+
+ v1 = self.v1
+ res = v1.clip(max=0.5)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.ones(3) * 0.5, res.get_block(0)))
+ self.assertTrue(np.allclose(np.ones(3) * 0.5, res.get_block(2)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.zeros(2), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2) * 0.5, res.get_block(3)))
+
+ def test_compress(self):
+
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ cond = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ cond.set_block(0, np.array([False, False, True]))
+ if rank == 1:
+ cond.set_block(1, np.array([True, True, True, False]))
+ cond.set_block(2, np.array([True, True]))
+ cond.broadcast_block_sizes()
+
+ res = v.compress(cond)
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(res.nblocks, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.array([2]), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.array([0, 1, 2]), res.get_block(1)))
+ self.assertTrue(np.allclose(np.array([0, 1]), res.get_block(2)))
+
+ cond = BlockVector(3)
+ cond.set_block(0, np.array([False, False, True]))
+ cond.set_block(1, np.array([True, True, True, False]))
+ cond.set_block(2, np.array([True, True]))
+
+ with self.assertRaises(Exception) as context:
+ res = v.compress(cond)
+
+ with self.assertRaises(Exception) as context:
+ res = v.compress(cond.flatten())
+
+ def test_owned_blocks(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+
+ owned = v.owned_blocks
+ rank = comm.Get_rank()
+ if rank == 0:
+ self.assertTrue(np.allclose(np.array([0, 2]), owned))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.array([1, 2]), owned))
+
+ owned = self.v1.owned_blocks
+ if rank == 0:
+ self.assertTrue(np.allclose(np.array([0, 2]), owned))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.array([1, 3]), owned))
+
+ def test_shared_blocks(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+
+ shared = v.shared_blocks
+ self.assertTrue(np.allclose(np.array([2]), shared))
+
+ def test_clone(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+
+ vv = v.clone()
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ def test_copy(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+
+ vv = v.copy()
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ def test_copyto(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ vv = MPIBlockVector(3, [0, 1, -1], comm)
+ v.copyto(vv)
+
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ def test_fill(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v.fill(7.0)
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ self.assertTrue(np.allclose(np.array([2]), v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(np.ones(3)*7.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.ones(4)*7.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2)*7.0, v.get_block(2)))
+
+ def test_dot(self):
+
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ all_v = np.concatenate([np.arange(3), np.arange(4), np.arange(2)])
+ expected = all_v.dot(all_v)
+
+ self.assertAlmostEqual(expected, v.dot(v))
+ vv = BlockVector(3)
+ vv.set_blocks([np.arange(3), np.arange(4), np.arange(2)])
+ self.assertAlmostEqual(expected, v.dot(vv))
+ self.assertAlmostEqual(expected, v.dot(vv.flatten()))
+
+ def test_add(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ res = v + v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3)*2, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4)*2, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2)*2, res.get_block(2)))
+
+ res = v + 5.0
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) + 5.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) + 5.0, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) + 5.0, res.get_block(2)))
+
+ res = 5.0 + v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) + 5.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) + 5.0, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) + 5.0, res.get_block(2)))
+
+ def test_sub(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ res = v - v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2), res.get_block(2)))
+
+ res = 5.0 - v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(5.0 - np.arange(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(5.0 - np.arange(4), res.get_block(1)))
+ self.assertTrue(np.allclose(5.0 - np.arange(2), res.get_block(2)))
+
+ res = v - 5.0
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) - 5.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) - 5.0, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) - 5.0, res.get_block(2)))
+
+ def test_mul(self):
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ res = v * v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) * np.arange(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) * np.arange(4), res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * np.arange(2), res.get_block(2)))
+
+ res = v * 2.0
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) * 2.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) * 2.0, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * 2.0, res.get_block(2)))
+
+ res = 2.0 * v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(3) * 2.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.arange(4) * 2.0, res.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * 2.0, res.get_block(2)))
+
+ def test_truediv(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 1.0)
+ if rank == 1:
+ v.set_block(1, np.arange(4) + 1.0)
+ v.set_block(2, np.arange(2) + 1.0)
+ v.broadcast_block_sizes()
+
+ res = v / v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2), res.get_block(2)))
+
+ res = v / 2.0
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose((np.arange(3) + 1.0)/2.0, res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose((np.arange(4) + 1.0)/2.0, res.get_block(1)))
+ self.assertTrue(np.allclose((np.arange(2) + 1.0)/2.0, res.get_block(2)))
+
+ res = 2.0 / v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(2.0/(np.arange(3) + 1.0), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(2.0/(np.arange(4) + 1.0), res.get_block(1)))
+ self.assertTrue(np.allclose(2.0/(np.arange(2) + 1.0), res.get_block(2)))
+
+ def test_floordiv(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 1.0)
+ if rank == 1:
+ v.set_block(1, np.arange(4) + 1.0)
+ v.set_block(2, np.arange(2) + 1.0)
+ v.broadcast_block_sizes()
+
+ res = v // v
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.arange(3) + 1.0,
+ np.arange(4) + 1.0,
+ np.arange(2) + 1.0])
+
+ res1 = v // 2.0
+ res2 = bv // 2.0
+ self.assertTrue(isinstance(res1, MPIBlockVector))
+ self.assertEqual(3, res1.nblocks)
+ self.assertTrue(np.allclose(res1.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res1.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(res1.get_block(0), res2.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res1.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(res1.get_block(1), res2.get_block(1)))
+ self.assertTrue(np.allclose(res1.get_block(2), res2.get_block(2)))
+
+ res1 = 2.0 // v
+ res2 = 2.0 // bv
+ self.assertTrue(isinstance(res1, MPIBlockVector))
+ self.assertEqual(3, res1.nblocks)
+ self.assertTrue(np.allclose(res1.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res1.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(res1.get_block(0), res2.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res1.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(res1.get_block(1), res2.get_block(1)))
+ self.assertTrue(np.allclose(res1.get_block(2), res2.get_block(2)))
+
+ def test_isum(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v += v
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) * 2.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) * 2.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * 2.0, v.get_block(2)))
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3, dtype='d'))
+ if rank == 1:
+ v.set_block(1, np.arange(4, dtype='d'))
+ v.set_block(2, np.arange(2, dtype='d'))
+ v.broadcast_block_sizes()
+
+ v += 7.0
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) + 7.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) + 7.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) + 7.0, v.get_block(2)))
+
+ def test_isub(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v -= v
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.zeros(3), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.zeros(4), v.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2), v.get_block(2)))
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3, dtype='d'))
+ if rank == 1:
+ v.set_block(1, np.arange(4, dtype='d'))
+ v.set_block(2, np.arange(2, dtype='d'))
+ v.broadcast_block_sizes()
+
+ v -= 7.0
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) - 7.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) - 7.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) - 7.0, v.get_block(2)))
+
+ def test_imul(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v *= v
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) * np.arange(3), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) * np.arange(4), v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * np.arange(2), v.get_block(2)))
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3, dtype='d'))
+ if rank == 1:
+ v.set_block(1, np.arange(4, dtype='d'))
+ v.set_block(2, np.arange(2, dtype='d'))
+ v.broadcast_block_sizes()
+
+ v *= 7.0
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) * 7.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) * 7.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) * 7.0, v.get_block(2)))
+
+ def test_itruediv(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 1.0)
+ if rank == 1:
+ v.set_block(1, np.arange(4) + 1.0)
+ v.set_block(2, np.arange(2) + 1.0)
+ v.broadcast_block_sizes()
+
+ v /= v
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.ones(3), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.ones(4), v.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2), v.get_block(2)))
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3) + 1.0)
+ if rank == 1:
+ v.set_block(1, np.arange(4) + 1.0)
+ v.set_block(2, np.arange(2) + 1.0)
+ v.broadcast_block_sizes()
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3, dtype='d'))
+ if rank == 1:
+ v.set_block(1, np.arange(4, dtype='d'))
+ v.set_block(2, np.arange(2, dtype='d'))
+ v.broadcast_block_sizes()
+
+ v /= 2.0
+ self.assertTrue(isinstance(v, MPIBlockVector))
+ self.assertEqual(3, v.nblocks)
+ if rank == 0:
+ self.assertTrue(np.allclose(np.arange(3) / 2.0, v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(np.arange(4) / 2.0, v.get_block(1)))
+ self.assertTrue(np.allclose(np.arange(2) / 2.0, v.get_block(2)))
+
+ def test_le(self):
+ v = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v <= v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v <= bv
+
+ with self.assertRaises(Exception) as context:
+ res = bv >= v
+
+ with self.assertRaises(Exception) as context:
+ res = v <= bv.flatten()
+
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() >= v
+
+ res = v <= 3.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ res = 3.0 >= v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ def test_lt(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v < v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v < bv
+
+ with self.assertRaises(Exception) as context:
+ res = bv > v
+
+ with self.assertRaises(Exception) as context:
+ res = v < bv.flatten()
+
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() > v
+
+ res = v < 3.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ res = 3.0 > v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ def test_ge(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v >= v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v >= bv
+
+ with self.assertRaises(Exception) as context:
+ res = bv <= v
+
+ with self.assertRaises(Exception) as context:
+ res = v >= bv.flatten()
+
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() <= v
+
+ res = v >= 3.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ res = 3.0 <= v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ def test_gt(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v > v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v > bv
+
+ with self.assertRaises(Exception) as context:
+ res = bv < v
+
+ with self.assertRaises(Exception) as context:
+ res = v > bv.flatten()
+
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() < v
+
+ res = v > 3.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ res = 3.0 < v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ def test_eq(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v == v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v == bv
+
+ with self.assertRaises(Exception) as context:
+ res = bv == v
+
+ with self.assertRaises(Exception) as context:
+ res = v == bv.flatten()
+
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() == v
+
+ res = v == 8.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ res = 8.0 == v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ def test_ne(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 8)
+ if rank == 1:
+ v.set_block(1, np.ones(4) * 2)
+ v.set_block(2, np.ones(2) * 4)
+ v.broadcast_block_sizes()
+
+ v1 = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v1.set_block(0, np.ones(3) * 2)
+ if rank == 1:
+ v1.set_block(1, np.ones(4) * 8)
+ v1.set_block(2, np.ones(2) * 4)
+ v1.broadcast_block_sizes()
+
+ res = v != v1
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.zeros(2, dtype=bool), res.get_block(2)))
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.ones(3) * 2,
+ np.ones(4) * 8,
+ np.ones(2) * 4])
+
+ with self.assertRaises(Exception) as context:
+ res = v != bv
+ with self.assertRaises(Exception) as context:
+ res = bv != v
+ with self.assertRaises(Exception) as context:
+ res = v != bv.flatten()
+ with self.assertRaises(Exception) as context:
+ res = bv.flatten() != v
+
+ res = v != 8.0
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ res = 8.0 != v
+
+ self.assertTrue(isinstance(res, MPIBlockVector))
+ self.assertEqual(3, res.nblocks)
+ self.assertTrue(np.allclose(res.shared_blocks, v.shared_blocks))
+
+ if rank == 0:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.zeros(3, dtype=bool), res.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(res.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(np.ones(4, dtype=bool), res.get_block(1)))
+ self.assertTrue(np.allclose(np.ones(2, dtype=bool), res.get_block(2)))
+
+ def test_unary_ufuncs(self):
+
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 0.5)
+ if rank == 1:
+ v.set_block(1, np.ones(2) * 0.8)
+
+ bv = BlockVector(2)
+ a = np.ones(3) * 0.5
+ b = np.ones(2) * 0.8
+ bv.set_block(0, a)
+ bv.set_block(1, b)
+
+ unary_funcs = [np.log10, np.sin, np.cos, np.exp, np.ceil,
+ np.floor, np.tan, np.arctan, np.arcsin,
+ np.arccos, np.sinh, np.cosh, np.abs,
+ np.tanh, np.arcsinh, np.arctanh,
+ np.fabs, np.sqrt, np.log, np.log2,
+ np.absolute, np.isfinite, np.isinf, np.isnan,
+ np.log1p, np.logical_not, np.exp2, np.expm1,
+ np.sign, np.rint, np.square, np.positive,
+ np.negative, np.rad2deg, np.deg2rad,
+ np.conjugate, np.reciprocal]
+
+ bv2 = BlockVector(2)
+ for fun in unary_funcs:
+ bv2.set_block(0, fun(bv.get_block(0)))
+ bv2.set_block(1, fun(bv.get_block(1)))
+ res = fun(v)
+ self.assertIsInstance(res, MPIBlockVector)
+ self.assertEqual(res.nblocks, 2)
+ for i in res.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(i), bv2.get_block(i)))
+
+ with self.assertRaises(Exception) as context:
+ np.cbrt(v)
+
+ with self.assertRaises(Exception) as context:
+ np.cumsum(v)
+
+ with self.assertRaises(Exception) as context:
+ np.cumprod(v)
+
+ with self.assertRaises(Exception) as context:
+ np.cumproduct(v)
+
+ def test_reduce_ufuncs(self):
+
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 0.5)
+ if rank == 1:
+ v.set_block(1, np.ones(2) * 0.8)
+ v.broadcast_block_sizes()
+
+ bv = BlockVector(2)
+ bv.set_block(0, np.ones(3) * 0.5)
+ bv.set_block(1, np.ones(2) * 0.8)
+
+ reduce_funcs = [np.sum, np.max, np.min, np.prod, np.mean, np.all, np.any]
+ for fun in reduce_funcs:
+ self.assertAlmostEqual(fun(v), fun(bv.flatten()))
+
+ def test_binary_ufuncs(self):
+
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3) * 0.5)
+ if rank == 1:
+ v.set_block(1, np.ones(2) * 0.8)
+
+ v2 = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v2.set_block(0, np.ones(3) * 3.0)
+ if rank == 1:
+ v2.set_block(1, np.ones(2) * 2.8)
+
+ bv = BlockVector(2)
+ bv.set_block(0, np.ones(3) * 0.5)
+ bv.set_block(1, np.ones(2) * 0.8)
+
+ bv2 = BlockVector(2)
+ bv2.set_block(0, np.ones(3) * 3.0)
+ bv2.set_block(1, np.ones(2) * 2.8)
+
+ binary_ufuncs = [np.add, np.multiply, np.divide, np.subtract,
+ np.greater, np.greater_equal, np.less,
+ np.less_equal, np.not_equal,
+ np.maximum, np.minimum,
+ np.fmax, np.fmin, np.equal,
+ np.logaddexp, np.logaddexp2, np.remainder,
+ np.heaviside, np.hypot]
+
+ for fun in binary_ufuncs:
+ serial_res = fun(bv, bv2)
+ res = fun(v, v2)
+
+ self.assertIsInstance(res, MPIBlockVector)
+ self.assertEqual(res.nblocks, 2)
+ for i in res.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(i), serial_res.get_block(i)))
+
+ serial_res = fun(bv, bv2)
+ with self.assertRaises(Exception) as context:
+ res = fun(v, bv2)
+
+ serial_res = fun(bv, bv2)
+ with self.assertRaises(Exception) as context:
+ res = fun(bv, v2)
+
+ serial_res = fun(bv, 2.0)
+ res = fun(v, 2.0)
+
+ self.assertIsInstance(res, MPIBlockVector)
+ self.assertEqual(res.nblocks, 2)
+ for i in res.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(i), serial_res.get_block(i)))
+
+ serial_res = fun(2.0, bv)
+ res = fun(2.0, v)
+
+ self.assertIsInstance(res, MPIBlockVector)
+ self.assertEqual(res.nblocks, 2)
+ for i in res.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(i), serial_res.get_block(i)))
+
+
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3, dtype=bool))
+ if rank == 1:
+ v.set_block(1, np.ones(2, dtype=bool))
+
+ v2 = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v2.set_block(0, np.zeros(3, dtype=bool))
+ if rank == 1:
+ v2.set_block(1, np.zeros(2, dtype=bool))
+
+ bv = BlockVector(2)
+ bv.set_block(0, np.ones(3, dtype=bool))
+ bv.set_block(1, np.ones(2, dtype=bool))
+
+ bv2 = BlockVector(2)
+ bv2.set_block(0, np.zeros(3, dtype=bool))
+ bv2.set_block(1, np.zeros(2, dtype=bool))
+
+ binary_ufuncs = [np.logical_and, np.logical_or, np.logical_xor]
+ for fun in binary_ufuncs:
+ serial_res = fun(bv, bv2)
+ res = fun(v, v2)
+ self.assertIsInstance(res, MPIBlockVector)
+ self.assertEqual(res.nblocks, 2)
+ for i in res.owned_blocks:
+ self.assertTrue(np.allclose(res.get_block(i), serial_res.get_block(i)))
+
+ serial_res = fun(bv, bv2)
+ with self.assertRaises(Exception) as context:
+ res = fun(v, bv2)
+
+ serial_res = fun(bv, bv2)
+ with self.assertRaises(Exception) as context:
+ res = fun(bv, v2)
+
+ def test_contains(self):
+
+ v = MPIBlockVector(2, [0,1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3))
+ if rank == 1:
+ v.set_block(1, np.zeros(2))
+ v.broadcast_block_sizes()
+
+ self.assertTrue(0 in v)
+ self.assertFalse(3 in v)
+
+ def test_len(self):
+
+ v = MPIBlockVector(2, [0,1], comm)
+
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.ones(3))
+ if rank == 1:
+ v.set_block(1, np.zeros(2))
+ v.broadcast_block_sizes()
+ self.assertEqual(len(v), 2)
+
+ def test_copyfrom(self):
+
+ v = MPIBlockVector(3, [0,1,-1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ v.set_block(0, np.arange(3))
+ if rank == 1:
+ v.set_block(1, np.arange(4))
+ v.set_block(2, np.arange(2))
+ v.broadcast_block_sizes()
+
+ bv = BlockVector(3)
+ bv.set_blocks([np.arange(3), np.arange(4), np.arange(2)])
+ vv = MPIBlockVector(3, [0, 1, -1], comm)
+ vv.copyfrom(v)
+
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ vv = MPIBlockVector(3, [0, 1, -1], comm)
+ vv.copyfrom(bv)
+
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ vv = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ vv.set_block(0, np.arange(3) + 1)
+ if rank == 1:
+ vv.set_block(1, np.arange(4) + 1)
+ vv.set_block(2, np.arange(2) + 1)
+
+ vv.copyfrom(bv)
+
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+ vv = MPIBlockVector(3, [0, 1, -1], comm)
+ rank = comm.Get_rank()
+ if rank == 0:
+ vv.set_block(0, np.arange(3) + 1)
+ if rank == 1:
+ vv.set_block(1, np.arange(4) + 1)
+ vv.set_block(2, np.arange(2) + 1)
+
+ vv.copyfrom(v)
+
+ self.assertTrue(isinstance(vv, MPIBlockVector))
+ self.assertEqual(vv.nblocks, v.nblocks)
+ self.assertTrue(np.allclose(vv.shared_blocks, v.shared_blocks))
+ if rank == 0:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(0), v.get_block(0)))
+ if rank == 1:
+ self.assertTrue(np.allclose(vv.owned_blocks, v.owned_blocks))
+ self.assertTrue(np.allclose(vv.get_block(1), v.get_block(1)))
+ self.assertTrue(np.allclose(vv.get_block(2), v.get_block(2)))
+
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py b/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py
index 8be61212357..c3a4fb6ad7a 100644
--- a/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py
+++ b/pyomo/contrib/pynumero/sparse/tests/test_sparse_utils.py
@@ -9,14 +9,14 @@
# ___________________________________________________________________________
import pyutilib.th as unittest
-from .. import numpy_available, scipy_available
+from pyomo.contrib.pynumero.dependencies import (
+ numpy as np, numpy_available, scipy_available
+)
if not (numpy_available and scipy_available):
raise unittest.SkipTest("Pynumero needs scipy and numpy to run NLP tests")
from scipy.sparse import coo_matrix, bmat
-import numpy as np
-from pyomo.contrib.pynumero.sparse import BlockSymMatrix
from pyomo.contrib.pynumero.sparse.utils import is_symmetric_dense, is_symmetric_sparse
class TestSparseUtils(unittest.TestCase):
@@ -49,13 +49,6 @@ def setUp(self):
self.block11 = m
- bm = BlockSymMatrix(2)
- bm.name = 'basic_matrix'
- bm[0, 0] = self.block00
- bm[1, 0] = self.block10
- bm[1, 1] = self.block11
- self.basic_m = bm
-
def test_is_symmetric_dense(self):
m = self.block00.toarray()
@@ -69,8 +62,6 @@ def test_is_symmetric_sparse(self):
self.assertTrue(is_symmetric_sparse(m))
m = self.block00.toarray()
self.assertTrue(is_symmetric_sparse(m))
- m = self.basic_m
- self.assertTrue(is_symmetric_sparse(m))
m = self.block11
self.assertTrue(is_symmetric_sparse(m))
m = self.block10
@@ -85,6 +76,3 @@ def test_is_symmetric_sparse(self):
with self.assertRaises(Exception) as context:
self.assertTrue(is_symmetric_sparse(range(5)))
-
-
-
diff --git a/pyomo/contrib/pynumero/src/AmplInterface.cpp b/pyomo/contrib/pynumero/src/AmplInterface.cpp
new file mode 100644
index 00000000000..001899205fa
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/AmplInterface.cpp
@@ -0,0 +1,587 @@
+/**___________________________________________________________________________
+ *
+ * Pyomo: Python Optimization Modeling Objects
+ * Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+ * Under the terms of Contract DE-NA0003525 with National Technology and
+ * Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+ * rights in this software.
+ * This software is distributed under the 3-clause BSD License.
+ * ___________________________________________________________________________
+**/
+#include "AmplInterface.hpp"
+#include "AssertUtils.hpp"
+#include "asl_pfgh.h"
+#include "getstub.h"
+
+#include
+#include
+
+AmplInterface::AmplInterface()
+ : _p_asl(NULL), // pointer to the ASL struct
+ _obj_direction(1), // minimize by default
+ nnz_hes_lag_(-1) // cache this since sphsetup called only once
+{}
+
+char* new_char_p_from_std_str(std::string str)
+{
+ size_t len = str.size();
+ char* ret = new char[len + 1];
+ //strcpy(ret, str.c_str());
+ std::copy(str.begin(), str.end(), ret);
+ ret[len] = '\0';
+ return ret;
+ //return const_cast(str.c_str());
+}
+
+void AmplInterface::initialize(const char *nlfilename)
+{
+ // The includes from the Ampl Solver Library
+ // have a number of macros that expand to include
+ // the local variable "asl".
+ // For example:
+ // #define X0 asl->i.X0_
+ // Therefore, in many of these methods, you will
+ // often see the assignment the asl pointer followed
+ // by calls to the macros from the ASL.
+
+ // TODO: add possible options later
+ std::vector options;
+ typedef std::vector::iterator iter;
+
+ std::string cp_nlfilename(nlfilename);
+
+ // translate options to command input
+ std::vector arguments;
+ arguments.push_back("pynumero");
+ arguments.push_back(cp_nlfilename);
+ for (iter it=options.begin(); it != options.end(); ++it) {
+ arguments.push_back(*it);
+ }
+
+ std::vector argv;
+
+ for (iter it=arguments.begin(); it != arguments.end(); ++it) {
+ argv.push_back(it->data());
+ }
+ argv.push_back(NULL);
+
+ // Allocate memory for the asl structure
+ ASL_pfgh *asl = (ASL_pfgh *) ASL_alloc(ASL_read_pfgh);
+ _p_asl = asl; // store this pointer to write back to "asl" when necessary
+ _ASSERT_(_p_asl);
+
+ // Create the Option_Info structure - see getstub.h (more entries
+ // than in hooking.pdf)
+ //
+ // TODO: should allow many of these to be passed in to initialize (so
+ // different solvers can set them appropriately).
+ oi = new Option_Info;
+ oi->sname = new_char_p_from_std_str("solver_exe_name_not_set");
+ oi->bsname = new_char_p_from_std_str("Solver_name_not_set");
+ oi->opname = new_char_p_from_std_str("solver_options_env_var_not_set");
+ oi->keywds = NULL;
+ oi->n_keywds = 0;
+ oi->flags = 0;
+ oi->version = NULL;
+ oi->usage = NULL;
+ oi->kwf = NULL;
+ oi->feq = NULL;
+ oi->options = NULL;
+ oi->n_options = 0;
+ oi->driver_date = 0;
+ oi->wantsol = 0;
+ oi->nS = 0;
+ oi->S = NULL;
+ oi->uinfo = NULL;
+ oi->asl = NULL;
+ oi->eqsign = NULL;
+ oi->n_badopts = 0;
+ oi->option_echo = 0;
+ oi->nnl = 0;
+
+ // read the options and get the name of the .nl file (stub)
+ char *stub = getstops(const_cast(argv.data()), oi);
+
+ delete[] oi->sname;
+ oi->sname = NULL;
+ delete[] oi->bsname;
+ oi->bsname = NULL;
+ delete[] oi->opname;
+ oi->opname = NULL;
+ // this pointer may need to be stored for the call to write_sol
+ //delete oi;
+
+ FILE *nl = this->open_nl(asl, stub);
+ _ASSERT_(nl != NULL);
+
+ // want initial values for the variables and the
+ // multipliers
+ want_xpi0 = 1 | 2;
+ // allocate space in the ASL structure for the initial values
+ X0 = new double[n_var];
+ havex0 = new char[n_var];
+ pi0 = new double[n_con];
+ havepi0 = new char[n_con];
+
+ _ASSERT_EXIT_(n_var > 0,
+ "Problem does not have any continuous variables");
+ _ASSERT_EXIT_(nbv == 0 && niv == 0,
+ "PyNumero does not support discrete variables");
+ _ASSERT_EXIT_(nwv == 0 && nlnc == 0 && lnc == 0,
+ "PyNumero does not support network constraints");
+ _ASSERT_EXIT_(n_cc == 0,
+ "PyNumero does not support complementarities");
+
+ // call ASL to parse the nl file
+ int retcode = pfgh_read(nl, ASL_findgroups);
+ _ASSERT_EXIT_(retcode == ASL_readerr_none,
+ "Error reading the ASL .nl file");
+
+ // determine maximization or minimization
+ _ASSERT_EXIT_(n_obj == 1,
+ "PyNumero supports single objective problems only");
+ _obj_direction = 1;
+ if (objtype[0] != 0) {
+ _obj_direction = -1;
+ }
+
+ // see comments in https://github.com/ampl/mp/blob/master/src/asl/solvers/changes
+ // void hesset(int flags, int obj, int nnobj, int con, int nncon)
+ // tells AMPL which objectives and constraints to include when building the
+ // Hessian structure. Seems like:
+ // obj is the obj. number to start,
+ // nnobj is the number past that to include
+ // con is the constraint number to start
+ // nncon is the number past that to include
+ // we only support single objective problems
+ hesset(1, 0, 1, 0, nlc);
+
+ // setup the structure for the Hessian of the Lagrangian
+ nnz_hes_lag_ = sphsetup(-1, 1, 1, 1); // num obj, factor on obj, flag
+ // to indicate if multipliers
+ // supplied, and flag for upper
+ // triangular
+}
+
+AmplInterface::~AmplInterface() {
+ ASL_pfgh *asl = _p_asl;
+ delete[] X0;
+ X0 = NULL;
+ delete[] havex0;
+ havex0 = NULL;
+ delete[] pi0;
+ pi0 = NULL;
+ delete[] havepi0;
+ havepi0 = NULL;
+ delete oi;
+
+ if (asl) {
+ ASL *p_asl_to_free = (ASL *) _p_asl;
+ ASL_free(&p_asl_to_free);
+ _p_asl = NULL;
+ }
+}
+
+int AmplInterface::get_n_vars() const {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ int n_x;
+ n_x = n_var;
+ return n_x;
+}
+
+int AmplInterface::get_n_constraints() const {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ int n_c;
+ n_c = n_con;
+ return n_c;
+}
+
+int AmplInterface::get_nnz_jac_g() const {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ int nnz_jac_g;
+ nnz_jac_g = nzc;
+ return nnz_jac_g;
+}
+
+int AmplInterface::get_nnz_hessian_lag() const {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(asl);
+ int nnz_hes_lag;
+ nnz_hes_lag = nnz_hes_lag_;
+ return nnz_hes_lag;
+}
+
+void AmplInterface::get_lower_bounds_x(double *invec, int n) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(n == n_var);
+ for (int i = 0; i < n; i++) {
+ invec[i] = LUv[2 * i];
+ }
+}
+
+void AmplInterface::get_upper_bounds_x(double *invec, int n) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(n == n_var);
+
+ for (int i = 0; i < n; i++) {
+ invec[i] = LUv[2 * i + 1];
+ }
+}
+
+void AmplInterface::get_lower_bounds_g(double *invec, int m) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(m == n_con);
+ for (int i = 0; i < m; i++) {
+ invec[i] = LUrhs[2 * i];
+ }
+}
+
+void AmplInterface::get_upper_bounds_g(double *invec, int m) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(m == n_con);
+
+ for (int i = 0; i < m; i++) {
+ invec[i] = LUrhs[2 * i + 1];
+ }
+}
+
+void AmplInterface::get_init_x(double *invec, int n) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(n == n_var);
+
+ for (int i = 0; i < n; i++) {
+ if (havex0[i]) {
+ invec[i] = X0[i];
+ } else {
+ invec[i] = 0.0;
+ }
+ }
+}
+
+void AmplInterface::get_init_multipliers(double *invec, int n) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+
+ // get dual starting point
+ if (n_con == 0) { return; } // unconstrained problem or do not want
+ // to use the exist dual values
+ _ASSERT_(n == n_con);
+
+ for (int i = 0; i < n; i++) {
+ if (havepi0[i]) {
+ invec[i] = pi0[i];
+ } else {
+ invec[i] = 0.0;
+ }
+ }
+}
+
+bool AmplInterface::eval_f(double *const_x, int nx, double& f) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(n_obj == 1 && "AMPL problem must have a single objective function");
+
+ fint nerror = 1;
+ double retval = objval(obj_no, (double *) const_x, &nerror);
+
+ if (nerror != 0) {
+ return false;
+ }
+ f = _obj_direction * retval;
+ return true;
+
+}
+
+bool AmplInterface::eval_deriv_f(double *const_x, double *deriv_f, int nx) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(n_obj == 1 && "AMPL problem must have a single objective function");
+
+ fint nerror = 1;
+ objgrd(obj_no, (double *) const_x, deriv_f, &nerror);
+
+ if (nerror != 0) {
+ return false;
+ }
+
+ if (_obj_direction == -1) {
+ for (int i = 0; i < nx; i++) {
+ deriv_f[i] *= -1.0;
+ }
+ }
+ return true;
+}
+
+bool AmplInterface::eval_g(double *const_x, int nx, double *g, int ng) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(nx == n_var);
+ _ASSERT_(ng == n_con);
+
+ fint nerror = 1;
+ conval((double *) const_x, g, &nerror);
+ if (nerror != 0) {
+ return false;
+ }
+ return true;
+}
+
+void AmplInterface::struct_jac_g(int *irow, int *jcol, int nnz_jac_g) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(nnz_jac_g == nzc);
+ _ASSERT_(irow && jcol);
+
+ // get the non zero structure of the Jacobian of g wrt x
+ for (int i = 0; i < n_con; i++) {
+ for (cgrad *cg = Cgrad[i]; cg; cg = cg->next) {
+ irow[cg->goff] = i + 1;
+ jcol[cg->goff] = cg->varno + 1;
+ }
+ }
+}
+
+bool AmplInterface::eval_jac_g(double *const_x, int nx, double *jac_g_values, int nnz_jac_g) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(nx == n_var);
+ _ASSERT_(nnz_jac_g == nzc);
+ _ASSERT_(jac_g_values);
+
+ fint nerror = 1;
+ jacval((double *) const_x, jac_g_values, &nerror);
+ if (nerror != 0) {
+ return false;
+ }
+ return true;
+}
+
+void AmplInterface::struct_hes_lag(int *irow, int *jcol, int nnz_hes_lag) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(nnz_hes_lag_ == nnz_hes_lag);
+
+ int idx = 0;
+ for (int i = 0; i < n_var; i++) {
+ for (int j = sputinfo->hcolstarts[i]; j < sputinfo->hcolstarts[i + 1]; j++) {
+ irow[idx] = i + 1;
+ jcol[idx] = sputinfo->hrownos[j] + 1;
+ idx++;
+ }
+ }
+}
+
+bool AmplInterface::eval_hes_lag(double *const_x,
+ int nx,
+ double *const_lam,
+ int nc,
+ double *hes_lag,
+ int nnz_hes_lag,
+ double obj_factor) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(_p_asl);
+ _ASSERT_(nx == n_var);
+ _ASSERT_(nc == n_con);
+ _ASSERT_(n_obj == 1);
+ _ASSERT_(nnz_hes_lag_ == nnz_hes_lag);
+
+ double OW = _obj_direction * obj_factor;
+ sphes(hes_lag, -1, &OW, (double *) const_lam);
+ return true;
+}
+
+void AmplInterface::finalize_solution(int ampl_solve_result_num, char* msg, double *const_x, int nx, double *const_lam, int nc) {
+ ASL_pfgh *asl = _p_asl;
+ _ASSERT_(asl);
+ _ASSERT_(const_x && const_lam);
+
+ // set the AMPL solver status'
+ _ASSERT_MSG_(ampl_solve_result_num >= 0 && ampl_solve_result_num < 600,
+ "ampl_solve_result_num must be between 0 and 599 in AmplInterface::finalize_solution");
+
+ write_sol(msg, const_cast(const_x), const_cast(const_lam), 0);
+}
+
+AmplInterfaceFile::AmplInterfaceFile()
+ : AmplInterface()
+{}
+
+FILE* AmplInterfaceFile::open_nl(ASL_pfgh *asl, char* stub)
+{
+#if defined(_WIN32) || defined(_WIN64)
+#else
+ _ASSERT_EXIT_(stub, "No .nl file was specified.");
+#endif
+ return jac0dim(stub, (int) strlen(stub));
+}
+
+AmplInterfaceStr::AmplInterfaceStr(char* nl, size_t size)
+ : AmplInterface(),
+ nl_content(nl),
+ nl_size(size)
+{}
+
+// THIS METHOD IS DIABLED FOR NOW
+FILE* AmplInterfaceStr::open_nl(ASL_pfgh *asl, char* stub)
+{
+ // Ignore the stub and use the cached NL file content
+ //#if defined(__APPLE__) && defined(__MACH__)
+ //FILE* nl = fmemopen(this->nl_content, this->nl_size, "rb");
+ //return jac0dim_FILE(nl);
+ return NULL;
+ // #elif defined(_WIN32)
+ //return NULL;
+ //#else
+ //FILE* nl = fmemopen(this->nl_content, this->nl_size, "rb");
+ //return jac0dim_FILE(nl);
+ //return NULL;
+ //#endif
+
+}
+
+extern "C" {
+ PYNUMERO_ASL_EXPORT AmplInterface*
+ EXTERNAL_AmplInterface_new_file(char *nlfilename) {
+ AmplInterface* ans = new AmplInterfaceFile();
+ ans->initialize(nlfilename);
+ return ans;
+ }
+
+ PYNUMERO_ASL_EXPORT AmplInterface*
+ EXTERNAL_AmplInterface_new_str(char *nl, size_t size) {
+ AmplInterface* ans = new AmplInterfaceStr(nl, size);
+ ans->initialize("membuf.nl");
+ return ans;
+ }
+
+ PYNUMERO_ASL_EXPORT AmplInterface*
+ EXTERNAL_AmplInterface_new(char *nlfilename) {
+ return EXTERNAL_AmplInterface_new_file(nlfilename);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ int EXTERNAL_AmplInterface_n_vars(AmplInterface *p_ai) {
+ return p_ai->get_n_vars();
+ }
+
+ PYNUMERO_ASL_EXPORT
+ int EXTERNAL_AmplInterface_n_constraints(AmplInterface *p_ai) {
+ return p_ai->get_n_constraints();
+ }
+
+ PYNUMERO_ASL_EXPORT
+ int EXTERNAL_AmplInterface_nnz_jac_g(AmplInterface *p_ai) {
+ return p_ai->get_nnz_jac_g();
+ }
+
+ PYNUMERO_ASL_EXPORT
+ int EXTERNAL_AmplInterface_nnz_hessian_lag(AmplInterface *p_ai) {
+ return p_ai->get_nnz_hessian_lag();
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_x_lower_bounds
+ ( AmplInterface *p_ai, double *invec, int n ) {
+ p_ai->get_lower_bounds_x(invec, n);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_x_upper_bounds
+ ( AmplInterface *p_ai, double *invec, int n ) {
+ p_ai->get_upper_bounds_x(invec, n);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_g_lower_bounds
+ ( AmplInterface *p_ai, double *invec, int m ) {
+ p_ai->get_lower_bounds_g(invec, m);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_g_upper_bounds
+ ( AmplInterface *p_ai, double *invec, int m ) {
+ p_ai->get_upper_bounds_g(invec, m);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_get_init_x
+ ( AmplInterface *p_ai, double *invec, int n ) {
+ p_ai->get_init_x(invec, n);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_get_init_multipliers
+ ( AmplInterface *p_ai, double *invec, int n ) {
+ p_ai->get_init_multipliers(invec, n);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ bool EXTERNAL_AmplInterface_eval_f
+ ( AmplInterface *p_ai, double *invec, int n, double& f ) {
+ return p_ai->eval_f(invec, n, f);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ bool EXTERNAL_AmplInterface_eval_deriv_f
+ ( AmplInterface *p_ai, double *const_x, double *deriv_f, int nx ) {
+ return p_ai->eval_deriv_f(const_x, deriv_f, nx);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ bool EXTERNAL_AmplInterface_eval_g
+ ( AmplInterface *p_ai, double *const_x, int nx, double *g, int ng ) {
+ return p_ai->eval_g(const_x, nx, g, ng);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_struct_jac_g
+ ( AmplInterface *p_ai, int *irow, int *jcol, int nnz_jac_g ) {
+ p_ai->struct_jac_g(irow, jcol, nnz_jac_g);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ bool EXTERNAL_AmplInterface_eval_jac_g
+ ( AmplInterface *p_ai, double *const_x, int nx, double *jac_g_values,
+ int nnz_jac_g ) {
+ return p_ai->eval_jac_g(const_x, nx, jac_g_values, nnz_jac_g);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_struct_hes_lag
+ ( AmplInterface *p_ai, int *irow, int *jcol, int nnz_hes_lag ) {
+ p_ai->struct_hes_lag(irow, jcol, nnz_hes_lag);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ bool EXTERNAL_AmplInterface_eval_hes_lag
+ ( AmplInterface *p_ai, double *const_x, int nx, double *const_lam,
+ int nc, double *hes_lag, int nnz_hes_lag, double obj_factor ) {
+ return p_ai->eval_hes_lag(const_x, nx, const_lam, nc, hes_lag,
+ nnz_hes_lag, obj_factor);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_finalize_solution
+ ( AmplInterface *p_ai, int ampl_solve_result_num, char* msg,
+ double *const_x, int nx, double *const_lam, int nc ) {
+ p_ai->finalize_solution(ampl_solve_result_num, msg,
+ const_x, nx, const_lam, nc);
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_free_memory(AmplInterface *p_ai) {
+ p_ai->~AmplInterface();
+ }
+
+ PYNUMERO_ASL_EXPORT
+ void EXTERNAL_AmplInterface_dummy(AmplInterface *p_ai) {
+ std::cout<<"hola\n";
+ }
+}
diff --git a/pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.hpp b/pyomo/contrib/pynumero/src/AmplInterface.hpp
similarity index 91%
rename from pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.hpp
rename to pyomo/contrib/pynumero/src/AmplInterface.hpp
index 49c491a3ded..fccd143bdc9 100644
--- a/pyomo/contrib/pynumero/cmake/asl_interface/src/AmplInterface.hpp
+++ b/pyomo/contrib/pynumero/src/AmplInterface.hpp
@@ -13,13 +13,23 @@
#include
+#if defined(_WIN32) || defined(_WIN64)
+# if defined(BUILDING_PYNUMERO_ASL)
+# define PYNUMERO_ASL_EXPORT __declspec(dllexport)
+# else
+# define PYNUMERO_ASL_EXPORT __declspec(dllimport)
+# endif
+#else
+# define PYNUMERO_ASL_EXPORT
+#endif
+
// Forward declaration for ASL structure
struct ASL_pfgh;
struct Option_Info;
// This class provides the C++ side of the
// PyNumero interface to AMPL
-class AmplInterface {
+class PYNUMERO_ASL_EXPORT AmplInterface {
public:
AmplInterface();
virtual ~AmplInterface();
@@ -123,7 +133,7 @@ class AmplInterface {
};
// File-based specialization of AmplInterface
-class AmplInterfaceFile : public AmplInterface {
+class PYNUMERO_ASL_EXPORT AmplInterfaceFile : public AmplInterface {
public:
AmplInterfaceFile();
@@ -131,7 +141,7 @@ class AmplInterfaceFile : public AmplInterface {
};
// String-based specialization of AmplInterface
-class AmplInterfaceStr : public AmplInterface {
+class PYNUMERO_ASL_EXPORT AmplInterfaceStr : public AmplInterface {
public:
AmplInterfaceStr(char* nl, size_t size);
diff --git a/pyomo/contrib/pynumero/cmake/asl_interface/src/AssertUtils.hpp b/pyomo/contrib/pynumero/src/AssertUtils.hpp
similarity index 85%
rename from pyomo/contrib/pynumero/cmake/asl_interface/src/AssertUtils.hpp
rename to pyomo/contrib/pynumero/src/AssertUtils.hpp
index 4b63ed9957f..f64ccd6e7eb 100644
--- a/pyomo/contrib/pynumero/cmake/asl_interface/src/AssertUtils.hpp
+++ b/pyomo/contrib/pynumero/src/AssertUtils.hpp
@@ -13,6 +13,7 @@
#include
#include
+#include
#define _ASSERT_ assert
#define _ASSERT_MSG_ assert_msg
@@ -20,22 +21,31 @@
#define _ASSERTION_FAILURE_ assertion_failure
inline void assert_msg(bool cond, const std::string &msg) {
- if (!cond) {
+ #if defined(_WIN32) || defined(_WIN64)
+ #else
+ if (!cond) {
std::cout << "Assertion Failed: " << msg.c_str() << std::endl;
}
assert(msg.c_str() && cond);
+ #endif
}
inline void assert_exit(bool cond, const std::string &msg, int exitcode = 1) {
+ #if defined(_WIN32) || defined(_WIN64)
+ #else
if (!(cond)) {
std::cout << msg << std::endl;
exit(exitcode);
}
+ #endif
}
inline void assertion_failure(const std::string &msg) {
+ #if defined(_WIN32) || defined(_WIN64)
+ #else
std::cout << "Assertion Failed: " << msg.c_str() << std::endl;
assert(msg.c_str() && false);
+ #endif
}
#endif
diff --git a/pyomo/contrib/pynumero/src/CMakeLists.txt b/pyomo/contrib/pynumero/src/CMakeLists.txt
new file mode 100644
index 00000000000..001e1319175
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/CMakeLists.txt
@@ -0,0 +1,213 @@
+cmake_minimum_required(VERSION 3.0)
+# CMake 3.0 added GIT_SUBMODULES to ExternalProject_ADD, and without it
+# the Ampl/MP checkout fails because one of the submodules (gecode) is a
+# private repository.
+
+PROJECT( pynumero )
+
+include(ExternalProject)
+
+# Targets in this project
+OPTION(BUILD_ASL "Build the PyNumero ASL interface" ON)
+OPTION(BUILD_MA27 "Build the PyNumero ma27 interface" OFF)
+OPTION(BUILD_MA57 "Build the PyNumero ma57 interface" OFF)
+
+# Dependencies that we manage / can install
+SET(AMPLMP_TAG "3.1.0" CACHE STRING
+ "AMPL/MP git tag/branch to checkout and build")
+OPTION(BUILD_AMPLMP
+ "Download and build AMPL/MP ${AMPLMP_TAG} from GitHub" OFF)
+
+# Other build / environment options
+OPTION(BUILD_AMPLMP_IF_NEEDED
+ "Automatically enable AMPLMP build if ASL not found" OFF)
+MARK_AS_ADVANCED(BUILD_AMPLMP_IF_NEEDED)
+
+#OPTION(STATIC_LINK "STATIC_LINK" OFF)
+
+# If we build AMPLMP, then we will get a dependency on dlopen
+FIND_LIBRARY(DL_LIBRARY dl)
+
+# We need the ASL and HSL libraries. We can get them from Ipopt,
+# AMPL/MP, or ASL (netlib)
+SET(IPOPT_DIR "" CACHE PATH "Path to compiled Ipopt installation")
+SET(AMPLMP_DIR "" CACHE PATH "Path to compiled AMPL/MP installation")
+#SET(ASL_NETLIB_DIR "" CACHE PATH "Path to compiled ASL (netlib) installation")
+SET(MA27_OBJECT "" CACHE FILEPATH
+ "Path to compiled ma27d.o object. Must be compiled with -fPIC.")
+
+# Use pkg-config to get the ASL/HSL directories from the Ipopt/COIN-OR build
+FIND_PACKAGE(PkgConfig)
+IF( PKG_CONFIG_FOUND )
+ SET(_TMP "$ENV{PKG_CONFIG_PATH}")
+ SET(ENV{PKG_CONFIG_PATH} "${IPOPT_DIR}/lib/pkgconfig:$ENV{PKG_CONFIG_PATH}")
+ pkg_check_modules(PC_COINASL QUIET coinasl)
+ pkg_check_modules(PC_COINHSL QUIET coinhsl)
+ SET(ENV{PKG_CONFIG_PATH} "${_TMP}")
+ENDIF()
+
+# cmake does not search LD_LIBRARY_PATH by default. So that libraries
+# like HSL can be added through mechanisms like 'environment modules',
+# we will explicitly add LD_LIBRARY_PATH to teh search path
+string(REPLACE ":" ";" LD_LIBRARY_DIR_LIST
+ $ENV{LD_LIBRARY_PATH}:$ENV{DYLD_LIBRARY_PATH}
+ )
+
+# Note: the directory search order is intentional: first the modules we
+# are creating, then directories specifically set by the user, and
+# finally automatically located installations (e.g., from pkg-config)
+FIND_PATH(ASL_INCLUDE_DIR asl_pfgh.h
+ HINTS "${CMAKE_INSTALL_PREFIX}/include"
+ "${IPOPT_DIR}/include/coin-or/asl"
+ "${IPOPT_DIR}/include/coin/ThirdParty"
+ "${AMPLMP_DIR}/include"
+ "${PC_COINASL_INCLUDEDIR}"
+ "${PC_COINASL_INCLUDE_DIRS}"
+ PATH_SUFFIXES asl
+)
+FIND_LIBRARY(ASL_LIBRARY NAMES coinasl asl
+ HINTS "${CMAKE_INSTALL_PREFIX}/lib"
+ "${IPOPT_DIR}/lib"
+ "${AMPLMP_DIR}/lib"
+ "${PC_COINASL_LIBDIR}"
+ "${PC_COINASL_LIBRARY_DIRS}"
+ ${LD_LIBRARY_DIR_LIST}
+)
+FIND_LIBRARY(MA27_LIBRARY NAMES coinhsl libcoinhsl ma27 libma27
+ HINTS "${CMAKE_INSTALL_PREFIX}/lib"
+ "${IPOPT_DIR}/lib"
+ "${PC_COINHSL_LIBDIR}"
+ "${PC_COINHSL_LIBRARY_DIRS}"
+ "${MA27_DIR}"
+ "${MA27_DIR}/lib"
+ ${LD_LIBRARY_DIR_LIST}
+)
+FIND_LIBRARY(MA57_LIBRARY NAMES coinhsl libcoinhsl ma57 libma57
+ HINTS "${CMAKE_INSTALL_PREFIX}/lib"
+ "${IPOPT_DIR}/lib"
+ "${PC_COINHSL_LIBDIR}"
+ "${PC_COINHSL_LIBRARY_DIRS}"
+ "${MA57_DIR}"
+ "${MA57_DIR}/lib"
+ ${LD_LIBRARY_DIR_LIST}
+)
+
+# If we were able to find the HSL, we will automatically enable the ma27
+# interface, as all versions of the HSL library contain ma27.
+IF( MA27_LIBRARY OR MA27_OBJECT )
+ set_property(CACHE BUILD_MA27 PROPERTY VALUE ON)
+ENDIF()
+
+# If BUILD_AMPLMP_IF_NEEDED is set and we couldn't find / weren't
+# pointed to an ASL build, then we will forcibly enable the AMPLMP build
+# to provide the ASL.
+IF( BUILD_AMPLMP_IF_NEEDED AND (NOT ASL_LIBRARY OR NOT ASL_INCLUDE_DIR) )
+ set_property(CACHE BUILD_AMPLMP PROPERTY VALUE ON)
+ENDIF()
+
+IF( BUILD_AMPLMP )
+ get_filename_component(ABS_INSTALL_PREFIX "${CMAKE_INSTALL_PREFIX}" ABSOLUTE)
+ ExternalProject_Add(amplmp
+ GIT_TAG ${AMPLMP_TAG}
+ GIT_REPOSITORY https://github.com/ampl/mp.git
+ # We don't need *any* submodules, but leaving it as an empty string
+ # doesn't disable it as suggested by the documentation. A
+ # "workaround" from the web is to specify an existing directory that
+ # is *not* a submodule
+ GIT_SUBMODULES test
+ CMAKE_CACHE_ARGS -DCMAKE_INSTALL_PREFIX:STRING=${ABS_INSTALL_PREFIX}
+ UPDATE_DISCONNECTED TRUE
+ # 3.1.0 needs to be patched to compile with recent compilers,
+ # notably ubuntu 18.04. The patch applies a backport of fmtlib/fmt
+ # abbefd7; see https://github.com/fmtlib/fmt/issues/398
+ # The patch also disables AMPL/MP tests to speed up compilation.
+ PATCH_COMMAND git apply
+ ${CMAKE_CURRENT_SOURCE_DIR}/amplmp-${AMPLMP_TAG}.patch
+ )
+ # Update the ASL paths (if necessary). Since these do not (yet)
+ # exist, we need to bypass find_path / find_library and explicitly set
+ # the directories that this build will create. However, we will only
+ # do this if the paths have not already been set (so users can always
+ # override what we do here)
+ IF(NOT ASL_INCLUDE_DIR OR NOT ASL_LIBRARY)
+ set_property(CACHE ASL_INCLUDE_DIR PROPERTY VALUE
+ "${ABS_INSTALL_PREFIX}/include/asl")
+ IF( WIN32 )
+ set_property(CACHE ASL_LIBRARY PROPERTY VALUE
+ "${ABS_INSTALL_PREFIX}/lib/asl.lib")
+ ELSE()
+ set_property(CACHE ASL_LIBRARY PROPERTY VALUE
+ "${ABS_INSTALL_PREFIX}/lib/libasl.a")
+ ENDIF()
+ ENDIF()
+ENDIF()
+
+set(PYNUMERO_ASL_SOURCES
+ "AmplInterface.cpp"
+ "AmplInterface.hpp"
+ "AssertUtils.hpp"
+)
+
+IF( BUILD_ASL )
+ ADD_LIBRARY( pynumero_ASL SHARED ${PYNUMERO_ASL_SOURCES} )
+ TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${ASL_LIBRARY} )
+ if ( DL_LIBRARY )
+ TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${DL_LIBRARY} )
+ ENDIF()
+ TARGET_INCLUDE_DIRECTORIES( pynumero_ASL
+ PUBLIC ${ASL_INCLUDE_DIR}
+ INTERFACE . )
+ TARGET_COMPILE_DEFINITIONS( pynumero_ASL PRIVATE BUILDING_PYNUMERO_ASL )
+ SET_TARGET_PROPERTIES( pynumero_ASL PROPERTIES ENABLE_EXPORTS 1 )
+ INSTALL( TARGETS pynumero_ASL LIBRARY DESTINATION lib
+ RUNTIME DESTINATION lib )
+ IF( BUILD_AMPLMP )
+ # If we are building AMPL/MP, it is possible that we are linking
+ # against it, so we will add the appropriate dependency
+ add_dependencies(pynumero_ASL amplmp)
+ ENDIF()
+ENDIF()
+
+#
+# build hsl interfaces
+#
+set(PYNUMERO_MA27_SOURCES
+ "ma27Interface.cpp"
+)
+
+IF( BUILD_MA27 )
+ ADD_LIBRARY( pynumero_MA27 SHARED ${PYNUMERO_MA27_SOURCES} )
+ IF( MA27_OBJECT )
+ TARGET_LINK_LIBRARIES( pynumero_MA27 ${MA27_OBJECT} )
+ ELSE()
+ TARGET_LINK_LIBRARIES( pynumero_MA27 ${MA27_LIBRARY} )
+ ENDIF()
+ if ( DL_LIBRARY )
+ TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${DL_LIBRARY} )
+ ENDIF()
+ TARGET_COMPILE_DEFINITIONS( pynumero_MA27 PRIVATE BUILDING_PYNUMERO_MA27 )
+ SET_TARGET_PROPERTIES( pynumero_MA27 PROPERTIES ENABLE_EXPORTS 1 )
+ INSTALL(TARGETS pynumero_MA27 LIBRARY DESTINATION lib
+ RUNTIME DESTINATION lib )
+ENDIF()
+
+set(PYNUMERO_MA57_SOURCES
+ "ma57Interface.cpp"
+)
+
+IF( BUILD_MA57 )
+ ADD_LIBRARY( pynumero_MA57 SHARED ${PYNUMERO_MA57_SOURCES} )
+ TARGET_LINK_LIBRARIES( pynumero_MA57 ${MA57_LIBRARY} )
+ if ( DL_LIBRARY )
+ TARGET_LINK_LIBRARIES( pynumero_ASL PUBLIC ${DL_LIBRARY} )
+ ENDIF()
+ TARGET_COMPILE_DEFINITIONS( pynumero_MA27 PRIVATE BUILDING_PYNUMERO_MA57 )
+ SET_TARGET_PROPERTIES( pynumero_MA57 PROPERTIES ENABLE_EXPORTS 1 )
+ INSTALL(TARGETS pynumero_MA57 LIBRARY DESTINATION lib
+ RUNTIME DESTINATION lib )
+ENDIF()
+
+#
+# build the tests for the interfaces
+#
+add_subdirectory(tests)
diff --git a/pyomo/contrib/pynumero/src/amplmp-3.1.0.patch b/pyomo/contrib/pynumero/src/amplmp-3.1.0.patch
new file mode 100644
index 00000000000..e0746624f00
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/amplmp-3.1.0.patch
@@ -0,0 +1,48 @@
+diff --git a/CMakeLists.txt b/CMakeLists.txt
+index 523faa7..2523b22 100644
+--- a/CMakeLists.txt
++++ b/CMakeLists.txt
+@@ -388,9 +388,6 @@ ** Disable AMPL testing
+ enable_cxx11(benchmark)
+ endif ()
+
+-enable_testing()
+-add_subdirectory(test)
+-
+ install(DIRECTORY include/mp DESTINATION include)
+ install(TARGETS mp DESTINATION lib RUNTIME DESTINATION bin)
+ install(FILES LICENSE.rst DESTINATION share/mp)
+diff --git a/include/mp/format.h b/include/mp/format.h
+index c5d09b5..4f5f20e 100644
+--- a/include/mp/format.h
++++ b/include/mp/format.h
+@@ -1747,21 +1747,21 @@ backport of fmtlib/fmt abbefd7; see fmtlib/fmt#398
+ typedef typename BasicWriter::CharPtr CharPtr;
+ Char fill = internal::CharTraits::cast(spec_.fill());
+ CharPtr out = CharPtr();
+- const unsigned CHAR_WIDTH = 1;
+- if (spec_.width_ > CHAR_WIDTH) {
++ const unsigned CHAR_SIZE = 1;
++ if (spec_.width_ > CHAR_SIZE) {
+ out = writer_.grow_buffer(spec_.width_);
+ if (spec_.align_ == ALIGN_RIGHT) {
+- std::uninitialized_fill_n(out, spec_.width_ - CHAR_WIDTH, fill);
+- out += spec_.width_ - CHAR_WIDTH;
++ std::uninitialized_fill_n(out, spec_.width_ - CHAR_SIZE, fill);
++ out += spec_.width_ - CHAR_SIZE;
+ } else if (spec_.align_ == ALIGN_CENTER) {
+ out = writer_.fill_padding(out, spec_.width_,
+- internal::check(CHAR_WIDTH), fill);
++ internal::check(CHAR_SIZE), fill);
+ } else {
+- std::uninitialized_fill_n(out + CHAR_WIDTH,
+- spec_.width_ - CHAR_WIDTH, fill);
++ std::uninitialized_fill_n(out + CHAR_SIZE,
++ spec_.width_ - CHAR_SIZE, fill);
+ }
+ } else {
+- out = writer_.grow_buffer(CHAR_WIDTH);
++ out = writer_.grow_buffer(CHAR_SIZE);
+ }
+ *out = internal::CharTraits::cast(value);
+ }
diff --git a/pyomo/contrib/pynumero/src/ma27Interface.cpp b/pyomo/contrib/pynumero/src/ma27Interface.cpp
new file mode 100644
index 00000000000..624c7edd6f3
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/ma27Interface.cpp
@@ -0,0 +1,285 @@
+#include
+#include
+#include
+#include
+
+// This would normally be in a header file, but as we do not need one,
+// we will explicitly include it here.
+#if defined(_WIN32) || defined(_WIN64)
+# if defined(BUILDING_PYNUMERO_MA27)
+# define PYNUMERO_HSL_EXPORT __declspec(dllexport)
+# else
+# define PYNUMERO_HSL_EXPORT __declspec(dllimport)
+# endif
+#else
+# define PYNUMERO_HSL_EXPORT
+#endif
+
+// Forward declaration of MA27 fortran routines
+extern "C" {
+ void ma27id_(int* ICNTL, double* CNTL);
+ void ma27ad_(int *N, int *NZ, int *IRN, int* ICN,
+ int *IW, int* LIW, int* IKEEP, int *IW1,
+ int* NSTEPS, int* IFLAG, int* ICNTL,
+ double* CNTL, int *INFO, double* OPS);
+ void ma27bd_(int *N, int *NZ, int *IRN, int* ICN,
+ double* A, int* LA, int* IW, int* LIW,
+ int* IKEEP, int* NSTEPS, int* MAXFRT,
+ int* IW1, int* ICNTL, double* CNTL,
+ int* INFO);
+ void ma27cd_(int *N, double* A, int* LA, int* IW,
+ int* LIW, double* W, int* MAXFRT,
+ double* RHS, int* IW1, int* NSTEPS,
+ int* ICNTL, int* INFO);
+} // extern "C"
+
+void abort_bad_memory(int status) {
+ printf("Bad memory allocation in MA27 C interface. Aborting.");
+ exit(status);
+}
+
+
+struct MA27_struct {
+ // Constructor: set defaults, initialize cached arrays to NULL
+ MA27_struct():
+ LA(0),
+ LIW_a(0),
+ LIW_b(0),
+ NSTEPS(0),
+ IFLAG(0),
+ MAXFRT(0),
+ IW_factor(1.2),
+ A_factor(2.0),
+ OPS(0),
+ IW_a(NULL),
+ IW_b(NULL),
+ IKEEP(NULL),
+ A(NULL)
+ {
+ ma27id_(this->ICNTL, this->CNTL);
+ }
+ // Destructor: delete all cached arrays
+ virtual ~MA27_struct() {
+ if ( this->A ) {
+ delete[] this->A;
+ }
+ if ( this->IW_a ) {
+ delete[] this->IW_a;
+ }
+ if ( this->IW_b ) {
+ delete[] this->IW_b;
+ }
+ if ( this->IKEEP ) {
+ delete[] this->IKEEP;
+ }
+ }
+
+ int LA, LIW_a, LIW_b, NSTEPS, IFLAG, MAXFRT;
+ double IW_factor, A_factor, OPS;
+ int* IW_a;
+ int* IW_b;
+ // Use different arrays for IW that is sent to MA27A and that sent to
+ // MA27B because IW must be discarded after MA27A but kept after MA27B.
+ // If these arrays are the same, and a symbolic factorization is performed
+ // after a numeric factorization (e.g. on a new matrix), user-defined
+ // and MA27B-defined allocations of IW can be conflated.
+ int* IKEEP;
+ double* A;
+ int ICNTL[30], INFO[20];
+ double CNTL[5];
+};
+
+extern "C" {
+
+ PYNUMERO_HSL_EXPORT
+ MA27_struct* new_MA27_struct(void) {
+ MA27_struct* ma27 = new MA27_struct;
+ if (ma27 == NULL) { abort_bad_memory(1); }
+ // Return pointer to ma27 that Python program can pass to other
+ // functions in this code
+ return ma27;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void free_MA27_struct(MA27_struct* ma27) {
+ delete ma27;
+ }
+
+ // Functions for setting/accessing INFO/CNTL arrays:
+ PYNUMERO_HSL_EXPORT
+ void set_icntl(MA27_struct* ma27, int i, int val) {
+ ma27->ICNTL[i] = val;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ int get_icntl(MA27_struct* ma27, int i) {
+ return ma27->ICNTL[i];
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void set_cntl(MA27_struct* ma27, int i, double val) {
+ ma27->CNTL[i] = val;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ double get_cntl(MA27_struct* ma27, int i) {
+ return ma27->CNTL[i];
+ }
+
+ PYNUMERO_HSL_EXPORT
+ int get_info(MA27_struct* ma27, int i) {
+ return ma27->INFO[i];
+ }
+
+ // Functions for allocating WORK/FACT arrays:
+ PYNUMERO_HSL_EXPORT
+ void alloc_iw_a(MA27_struct* ma27, int l) {
+ if ( ma27->IW_a ) {
+ delete[] ma27->IW_a;
+ }
+ ma27->LIW_a = l;
+ ma27->IW_a = new int[l];
+ if (ma27->IW_a == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void alloc_iw_b(MA27_struct* ma27, int l) {
+ if ( ma27->IW_b ) {
+ delete[] ma27->IW_b;
+ }
+ ma27->LIW_b = l;
+ ma27->IW_b = new int[l];
+ if (ma27->IW_b == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void alloc_a(MA27_struct* ma27, int l) {
+ if ( ma27->A ) {
+ delete[] ma27->A;
+ }
+ ma27->LA = l;
+ ma27->A = new double[l];
+ if (ma27->A == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void do_symbolic_factorization(MA27_struct* ma27, int N, int NZ,
+ int* IRN, int* ICN) {
+ // Arrays, presumably supplied from Python, are assumed to have base-
+ // zero indices. Convert to base-one before sending to Fortran.
+ for (int i=0; iIW_a ) {
+ int min_size = 2*NZ + 3*N + 1;
+ int size = (int)(ma27->IW_factor*min_size);
+ alloc_iw_a(ma27, size);
+ }
+
+ if ( ma27->IKEEP ) {
+ delete[] ma27->IKEEP;
+ }
+ ma27->IKEEP = new int[3*N];
+ if (ma27->IKEEP == NULL) { abort_bad_memory(1); }
+ int* IW1 = new int[2*N];
+ if (IW1 == NULL) { abort_bad_memory(1); }
+
+ ma27ad_(&N,
+ &NZ,
+ IRN,
+ ICN,
+ ma27->IW_a,
+ &(ma27->LIW_a),
+ ma27->IKEEP,
+ IW1,
+ &(ma27->NSTEPS),
+ &(ma27->IFLAG),
+ ma27->ICNTL,
+ ma27->CNTL,
+ ma27->INFO,
+ &(ma27->OPS));
+
+ delete[] IW1;
+ delete[] ma27->IW_a;
+ ma27->IW_a = NULL;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void do_numeric_factorization(MA27_struct* ma27, int N, int NZ,
+ int* IRN, int* ICN, double* A) {
+
+ // Convert indices to base-one for Fortran
+ for (int i=0; iA ) {
+ int info5 = ma27->INFO[5-1];
+ int size = (int)(ma27->A_factor*info5);
+ alloc_a(ma27, size);
+ // A is now allocated
+ }
+ // Regardless of ma27->A's previous allocation status, copy values from A.
+ memcpy(ma27->A, A, NZ*sizeof(double));
+
+ if ( ! ma27->IW_b ) {
+ int info6 = ma27->INFO[6-1];
+ int size = (int)(ma27->IW_factor*info6);
+ alloc_iw_b(ma27, size);
+ }
+
+ int* IW1 = new int[N];
+ if (IW1 == NULL) { abort_bad_memory(1); }
+
+ ma27bd_(&N,
+ &NZ,
+ IRN,
+ ICN,
+ ma27->A,
+ &(ma27->LA),
+ ma27->IW_b,
+ &(ma27->LIW_b),
+ ma27->IKEEP,
+ &(ma27->NSTEPS),
+ &(ma27->MAXFRT),
+ IW1,
+ ma27->ICNTL,
+ ma27->CNTL,
+ ma27->INFO);
+
+ delete[] IW1;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void do_backsolve(MA27_struct* ma27, int N, double* RHS) {
+
+ double* W = new double[ma27->MAXFRT];
+ if (W == NULL) { abort_bad_memory(1); }
+ int* IW1 = new int[ma27->NSTEPS];
+ if (IW1 == NULL) { abort_bad_memory(1); }
+
+ ma27cd_(
+ &N,
+ ma27->A,
+ &(ma27->LA),
+ ma27->IW_b,
+ &(ma27->LIW_b),
+ W,
+ &(ma27->MAXFRT),
+ RHS,
+ IW1,
+ &(ma27->NSTEPS),
+ ma27->ICNTL,
+ ma27->INFO
+ );
+
+ delete[] IW1;
+ delete[] W;
+ }
+
+} // extern "C"
diff --git a/pyomo/contrib/pynumero/src/ma57Interface.cpp b/pyomo/contrib/pynumero/src/ma57Interface.cpp
new file mode 100644
index 00000000000..99b98ef6215
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/ma57Interface.cpp
@@ -0,0 +1,411 @@
+#include
+#include
+#include
+
+// This would normally be in a header file, but as we do not need one,
+// we will explicitly include it here.
+#if defined(_WIN32) || defined(_WIN64)
+# if defined(BUILDING_PYNUMERO_MA57)
+# define PYNUMERO_HSL_EXPORT __declspec(dllexport)
+# else
+# define PYNUMERO_HSL_EXPORT __declspec(dllimport)
+# endif
+#else
+# define PYNUMERO_HSL_EXPORT
+#endif
+
+// Forward declaration of MA57 fortran routines
+extern "C" {
+ void ma57id_(double* CNTL, int* ICNTL);
+ void ma57ad_(int *N, int *NE, const int *IRN, const int* JCN,
+ int *LKEEP, int* KEEP, int* IWORK, int *ICNTL,
+ int* INFO, double* RINFO);
+ void ma57bd_(int *N, int *NE, double* A, double* FACT, int* LFACT,
+ int* IFACT, int* LIFACT, int* LKEEP, int* KEEP, int* IWORK,
+ int* ICNTL, double* CNTL, int* INFO, double* RINFO);
+ void ma57cd_(int* JOB, int *N, double* FACT, int* LFACT,
+ int* IFACT, int* LIFACT, int* NRHS, double* RHS,
+ int* LRHS, double* WORK, int* LWORK, int* IWORK,
+ int* ICNTL, int* INFO);
+ void ma57dd_(int* JOB, int *N, int *NE, int *IRN, int *JCN,
+ double *FACT, int *LFACT, int *IFACT, int *LIFACT,
+ double *RHS, double *X, double *RESID, double *WORK,
+ int *IWORK, int *ICNTL, double *CNTL, int *INFO,
+ double *RINFO);
+ void ma57ed_(int *N, int* IC, int* KEEP, double* FACT, int* LFACT,
+ double* NEWFAC, int* LNEW, int* IFACT, int* LIFACT,
+ int* NEWIFC, int* LINEW, int* INFO);
+} // extern "C"
+
+void abort_bad_memory(int status){
+ printf("Bad memory allocation in MA57 C interface. Aborting.");
+ exit(status);
+}
+
+
+struct MA57_struct {
+ MA57_struct():
+ LKEEP(0), LIFACT(0), LWORK(0), LFACT(0),
+ LRHS(0), NRHS(0), JOB(0),
+ NRHS_set(false),
+ LRHS_set(false),
+ JOB_set(false),
+ WORK_factor(1.2),
+ FACT_factor(2.0),
+ IFACT_factor(2.0),
+ KEEP(NULL),
+ IFACT(NULL),
+ WORK(NULL),
+ FACT(NULL)
+ {
+ ma57id_(this->CNTL, this->ICNTL);
+ }
+ virtual ~MA57_struct() {
+ if ( this->WORK ) {
+ delete[] this->WORK;
+ }
+ if ( this->FACT ) {
+ delete[] this->FACT;
+ }
+ if ( this->IFACT ) {
+ delete[] this->IFACT;
+ }
+ if ( this->KEEP ) {
+ delete[] this->KEEP;
+ }
+ }
+
+ int LKEEP, LIFACT, LWORK, LFACT, LRHS, NRHS, JOB;
+ bool NRHS_set, LRHS_set, JOB_set;
+ double WORK_factor, FACT_factor, IFACT_factor;
+ int* KEEP;
+ int* IFACT;
+ double* WORK;
+ double* FACT;
+ int ICNTL[20], INFO[40];
+ double CNTL[5], RINFO[20];
+};
+
+extern "C" {
+
+ PYNUMERO_HSL_EXPORT
+ MA57_struct* new_MA57_struct(void){
+
+ MA57_struct* ma57 = new MA57_struct;
+ if (ma57 == NULL) { abort_bad_memory(1); }
+ // Return pointer to ma57 that Python program can pass to other
+ // functions in this code
+ return ma57;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void free_MA57_struct(MA57_struct* ma57) {
+ delete ma57;
+ }
+
+ // Functions for setting/accessing INFO/CNTL arrays:
+ PYNUMERO_HSL_EXPORT
+ void set_icntl(MA57_struct* ma57, int i, int val) {
+ ma57->ICNTL[i] = val;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ int get_icntl(MA57_struct* ma57, int i) {
+ return ma57->ICNTL[i];
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void set_cntl(MA57_struct* ma57, int i, double val) {
+ ma57->CNTL[i] = val;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ double get_cntl(MA57_struct* ma57, int i) {
+ return ma57->CNTL[i];
+ }
+
+ PYNUMERO_HSL_EXPORT
+ int get_info(MA57_struct* ma57, int i) {
+ return ma57->INFO[i];
+ }
+
+ PYNUMERO_HSL_EXPORT
+ double get_rinfo(MA57_struct* ma57, int i) {
+ return ma57->RINFO[i];
+ }
+
+ // Functions for allocating WORK/FACT arrays:
+ PYNUMERO_HSL_EXPORT
+ void alloc_keep(MA57_struct* ma57, int l) {
+ if ( ma57->KEEP ) {
+ delete[] ma57->KEEP;
+ }
+ ma57->LKEEP = l;
+ ma57->KEEP = new int[l];
+ if (ma57->KEEP == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void alloc_work(MA57_struct* ma57, int l) {
+ if ( ma57->WORK ) {
+ delete[] ma57->WORK;
+ }
+ ma57->LWORK = l;
+ ma57->WORK = new double[l];
+ if (ma57->WORK == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void alloc_fact(MA57_struct* ma57, int l) {
+ if ( ma57->FACT ) {
+ delete[] ma57->FACT;
+ }
+ ma57->LFACT = l;
+ ma57->FACT = new double[l];
+ if (ma57->FACT == NULL) { abort_bad_memory(1); }
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void alloc_ifact(MA57_struct* ma57, int l) {
+ if ( ma57->IFACT ) {
+ delete[] ma57->IFACT;
+ }
+ ma57->LIFACT = l;
+ ma57->IFACT = new int[l];
+ if (ma57->IFACT == NULL) { abort_bad_memory(1); }
+ }
+
+ // Functions for specifying dimensions of RHS:
+ PYNUMERO_HSL_EXPORT
+ void set_nrhs(MA57_struct* ma57, int n) {
+ ma57->NRHS = n;
+ ma57->NRHS_set = true;
+ }
+
+ PYNUMERO_HSL_EXPORT
+ void set_lrhs(MA57_struct* ma57, int l) {
+ ma57->LRHS = l;
+ ma57->LRHS_set = true;
+ }
+
+ // Specify what job to be performed - maybe make an arg to functions
+ PYNUMERO_HSL_EXPORT
+ void set_job(MA57_struct* ma57, int j) {
+ ma57->JOB = j;
+ ma57->JOB_set = true;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void do_symbolic_factorization(MA57_struct* ma57, int N, int NE,
+ int* IRN, int* JCN) {
+
+ // Arrays, presumably supplied from Python, are assumed to have base-
+ // zero indices. Convert to base-one before sending to Fortran.
+ for (int i=0; iKEEP ) {
+ // KEEP must be >= 5*N+NE+MAX(N,NE)+42
+ int size = 5*N + NE + (NE + N) + 42;
+ alloc_keep(ma57, size);
+ }
+
+ // This is a hard requirement, no need to give the user the option
+ // to change
+ int* IWORK = new int[5*N];
+ if (IWORK == NULL) { abort_bad_memory(1); }
+
+ ma57ad_(&N, &NE, IRN, JCN,
+ &(ma57->LKEEP), ma57->KEEP,
+ IWORK, ma57->ICNTL,
+ ma57->INFO, ma57->RINFO);
+
+ delete[] IWORK;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void do_numeric_factorization(MA57_struct* ma57, int N, int NE,
+ double* A) {
+
+ // Get memory estimates from INFO, allocate FACT and IFACT
+ if ( ! ma57->FACT ) {
+ int info9 = ma57->INFO[9-1];
+ int size = (int)(ma57->FACT_factor*info9);
+ alloc_fact(ma57, size);
+ }
+ if ( ! ma57->IFACT ) {
+ int info10 = ma57->INFO[10-1];
+ int size = (int)(ma57->IFACT_factor*info10);
+ alloc_ifact(ma57, size);
+ }
+
+ // Again, length of IWORK is a hard requirement
+ int* IWORK = new int[N];
+ if (IWORK == NULL) { abort_bad_memory(1); }
+
+ ma57bd_(&N, &NE, A,
+ ma57->FACT, &(ma57->LFACT),
+ ma57->IFACT, &(ma57->LIFACT),
+ &(ma57->LKEEP), ma57->KEEP,
+ IWORK, ma57->ICNTL,
+ ma57->CNTL, ma57->INFO,
+ ma57->RINFO);
+
+ delete[] IWORK;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void do_backsolve(MA57_struct* ma57, int N, double* RHS) {
+
+ // Set number and length (principal axis) of RHS if not already set
+ if (!ma57->NRHS_set) {
+ set_nrhs(ma57, 1);
+ }
+ if (!ma57->LRHS_set) {
+ set_lrhs(ma57, N);
+ }
+
+ // Set JOB. Default is to perform full factorization
+ if (!ma57->JOB_set) {
+ set_job(ma57, 1);
+ }
+
+ // Allocate WORK if not done. Should be >= N
+ if ( ! ma57->WORK ) {
+ int size = (int)(ma57->WORK_factor*ma57->NRHS*N);
+ alloc_work(ma57, size);
+ }
+
+ // IWORK should always be length N
+ int* IWORK = new int[N];
+ if (IWORK == NULL) { abort_bad_memory(1); }
+
+ ma57cd_(
+ &(ma57->JOB),
+ &N,
+ ma57->FACT,
+ &(ma57->LFACT),
+ ma57->IFACT,
+ &(ma57->LIFACT),
+ &(ma57->NRHS),
+ RHS,
+ &(ma57->LRHS),
+ ma57->WORK,
+ &(ma57->LWORK),
+ IWORK,
+ ma57->ICNTL,
+ ma57->INFO
+ );
+
+ delete[] IWORK;
+ delete[] ma57->WORK;
+ ma57->WORK = NULL;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void do_iterative_refinement(MA57_struct* ma57, int N, int NE,
+ double* A, int* IRN, int* JCN, double* RHS, double* X, double* RESID) {
+ // Number of steps of iterative refinement can be controlled with ICNTL[9-1]
+
+ // Set JOB if not set. Controls how (whether) X and RESID will be used
+ if (!ma57->JOB_set) {
+ set_job(ma57, 1);
+ }
+
+ // Need to allocate WORK differently depending on ICNTL options
+ if ( ! ma57->WORK ) {
+ int icntl9 = ma57->ICNTL[9-1];
+ int icntl10 = ma57->ICNTL[10-1];
+ int size;
+ if (icntl9 == 1) {
+ size = (int)(ma57->WORK_factor*N);
+ } else if (icntl9 > 1 && icntl10 == 0) {
+ size = (int)(ma57->WORK_factor*3*N);
+ } else if (icntl9 > 1 && icntl10 > 0) {
+ size = (int)(ma57->WORK_factor*4*N);
+ }
+ alloc_work(ma57, size);
+ }
+
+ int* IWORK = new int[N];
+ if (IWORK == NULL) { abort_bad_memory(1); }
+
+ ma57dd_(
+ &(ma57->JOB),
+ &N,
+ &NE,
+ IRN,
+ JCN,
+ ma57->FACT,
+ &(ma57->LFACT),
+ ma57->IFACT,
+ &(ma57->LIFACT),
+ RHS,
+ X,
+ RESID,
+ ma57->WORK,
+ IWORK,
+ ma57->ICNTL,
+ ma57->CNTL,
+ ma57->INFO,
+ ma57->RINFO
+ );
+
+ delete[] IWORK;
+ delete[] ma57->WORK;
+ ma57->WORK = NULL;
+ }
+
+
+ PYNUMERO_HSL_EXPORT
+ void do_reallocation(MA57_struct* ma57, int N, double realloc_factor, int IC) {
+ // Need realloc_factor > 1 here
+
+ // MA57 seems to require that both LNEW and LINEW are larger than the old
+ // values, regardless of which is being reallocated (set by IC)
+ int LNEW = (int)(realloc_factor*ma57->LFACT);
+ double* NEWFAC = new double[LNEW];
+ if (NEWFAC == NULL) { abort_bad_memory(1); }
+
+ int LINEW = (int)(realloc_factor*ma57->LIFACT);
+ int* NEWIFC = new int[LINEW];
+ if (NEWIFC == NULL) { abort_bad_memory(1); }
+
+ ma57ed_(
+ &N,
+ &IC,
+ ma57->KEEP,
+ ma57->FACT,
+ &(ma57->LFACT),
+ NEWFAC,
+ &LNEW,
+ ma57->IFACT,
+ &(ma57->LIFACT),
+ NEWIFC,
+ &LINEW,
+ ma57->INFO
+ );
+
+ if (IC <= 0) {
+ // Copied real array; new int array is garbage
+ delete[] ma57->FACT;
+ ma57->LFACT = LNEW;
+ ma57->FACT = NEWFAC;
+ delete[] NEWIFC;
+ } else if (IC >= 1) {
+ // Copied int array; new real array is garbage
+ delete[] ma57->IFACT;
+ ma57->LIFACT = LINEW;
+ ma57->IFACT = NEWIFC;
+ delete[] NEWFAC;
+ } // Now either FACT or IFACT, whichever was specified by IC, can be used
+ // as normal in MA57B/C/D
+ }
+
+} // extern "C"
diff --git a/pyomo/contrib/pynumero/src/tests/CMakeLists.txt b/pyomo/contrib/pynumero/src/tests/CMakeLists.txt
new file mode 100644
index 00000000000..391d4e6ddbe
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/tests/CMakeLists.txt
@@ -0,0 +1,5 @@
+
+ADD_EXECUTABLE(pynumero_asl_test simple_test.cpp)
+TARGET_LINK_LIBRARIES( pynumero_asl_test pynumero_ASL)
+INSTALL(TARGETS pynumero_asl_test DESTINATION bin/tests )
+INSTALL(FILES simple_nlp.nl DESTINATION bin/tests )
diff --git a/pyomo/contrib/pynumero/cmake/tests/simple_nlp.nl b/pyomo/contrib/pynumero/src/tests/simple_nlp.nl
similarity index 100%
rename from pyomo/contrib/pynumero/cmake/tests/simple_nlp.nl
rename to pyomo/contrib/pynumero/src/tests/simple_nlp.nl
diff --git a/pyomo/contrib/pynumero/src/tests/simple_test.cpp b/pyomo/contrib/pynumero/src/tests/simple_test.cpp
new file mode 100644
index 00000000000..4edbbb67a35
--- /dev/null
+++ b/pyomo/contrib/pynumero/src/tests/simple_test.cpp
@@ -0,0 +1,11 @@
+#include
+#include "AmplInterface.hpp"
+
+int main()
+{
+ AmplInterface* ans = new AmplInterfaceFile();
+ ans->initialize("simple_nlp.nl");
+ delete ans;
+ std::cout << "Done\n";
+ return 0;
+}
diff --git a/pyomo/contrib/satsolver/satsolver.py b/pyomo/contrib/satsolver/satsolver.py
index fd5d7aa7420..8352353eb91 100644
--- a/pyomo/contrib/satsolver/satsolver.py
+++ b/pyomo/contrib/satsolver/satsolver.py
@@ -22,11 +22,6 @@
from pyomo.core.expr.visitor import (
StreamBasedExpressionVisitor,
)
-from pyomo.core.kernel.set_types import (
- RealSet,
- IntegerSet,
- BooleanSet
-)
from pyomo.gdp import Disjunction
_z3_available = True
@@ -132,17 +127,17 @@ def _add_bound(self, var):
def add_var(self, var):
label = self.variable_label_map.getSymbol(var)
domain = var.domain
- if isinstance(domain, RealSet):
+ if var.is_continuous():
self.variable_list.append("(declare-fun " + label + "() Real)\n")
self._add_bound(var)
- elif isinstance(domain, IntegerSet):
+ elif var.is_binary():
self.variable_list.append("(declare-fun " + label + "() Int)\n")
self._add_bound(var)
- elif isinstance(domain, BooleanSet):
+ elif var.is_integer():
self.variable_list.append("(declare-fun " + label + "() Int)\n")
self._add_bound(var)
else:
- raise NotImplementedError("SMT cannot handle" + str(domain) + "variables")
+ raise NotImplementedError("SMT cannot handle " + str(domain) + " variables")
return label
# Defines SMT expression from pyomo expression
@@ -282,20 +277,20 @@ def exitNode(self, node, data):
raise NotImplementedError(str(type(node)) + " expression not handled by z3 interface")
return ans
- def beforeChild(self, node, child):
+ def beforeChild(self, node, child, child_idx):
if type(child) in nonpyomo_leaf_types:
# This means the child is POD
# i.e., int, float, string
return False, str(child)
- elif child.is_variable_type():
- return False, str(self.variable_label_map.getSymbol(child))
- elif child.is_parameter_type():
- return False, str(value(child))
- elif not child.is_expression_type():
- return False, str(child)
- else:
- # this is an expression node
+ elif child.is_expression_type():
return True, ""
+ elif child.is_numeric_type():
+ if child.is_fixed():
+ return False, str(value(child))
+ else:
+ return False, str(self.variable_label_map.getSymbol(child))
+ else:
+ return False, str(child)
def finalizeResult(self, node_result):
return node_result
diff --git a/pyomo/contrib/satsolver/test_satsolver.py b/pyomo/contrib/satsolver/test_satsolver.py
index 4f3c06c59b4..f92a2b17fe5 100644
--- a/pyomo/contrib/satsolver/test_satsolver.py
+++ b/pyomo/contrib/satsolver/test_satsolver.py
@@ -4,7 +4,7 @@
from pyutilib.misc import import_file
from pyomo.contrib.satsolver.satsolver import satisfiable, _z3_available
-from pyomo.core.kernel.set_types import PositiveIntegers, NonNegativeReals, Binary
+from pyomo.core.base.set_types import PositiveIntegers, NonNegativeReals, Binary
from pyomo.environ import (
ConcreteModel, Var, Constraint, Objective, sin, cos, tan, asin, acos, atan, sqrt, log,
minimize)
diff --git a/pyomo/contrib/sensitivity_toolbox/sens.py b/pyomo/contrib/sensitivity_toolbox/sens.py
index 6485adf110a..c3467235334 100644
--- a/pyomo/contrib/sensitivity_toolbox/sens.py
+++ b/pyomo/contrib/sensitivity_toolbox/sens.py
@@ -74,7 +74,7 @@ def sipopt(instance,paramSubList,perturbList,cloneModel=True,
"length of perturbList")
for pp in paramSubList:
- if pp.type() is not Param:
+ if pp.ctype is not Param:
raise ValueError("paramSubList argument is expecting a list of Params")
for pp in paramSubList:
@@ -83,7 +83,7 @@ def sipopt(instance,paramSubList,perturbList,cloneModel=True,
for pp in perturbList:
- if pp.type() is not Param:
+ if pp.ctype is not Param:
raise ValueError("perturbList argument is expecting a list of Params")
#Add model block to compartmentalize all sipopt data
b=Block()
diff --git a/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py b/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py
index b96fa206c52..1193388f2b4 100644
--- a/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py
+++ b/pyomo/contrib/sensitivity_toolbox/tests/test_sens.py
@@ -97,56 +97,56 @@ def test_clonedModel_soln(self):
self.assertFalse(m_sipopt == m_orig)
self.assertTrue(hasattr(m_sipopt,'_sipopt_data') and
- m_sipopt._sipopt_data.type() is Block)
+ m_sipopt._sipopt_data.ctype is Block)
self.assertFalse(hasattr(m_orig,'_sipopt_data'))
self.assertFalse(hasattr(m_orig,'b'))
#verify variable declaration
self.assertTrue(hasattr(m_sipopt._sipopt_data,'a') and
- m_sipopt._sipopt_data.a.type() is Var)
+ m_sipopt._sipopt_data.a.ctype is Var)
self.assertTrue(hasattr(m_sipopt._sipopt_data,'H') and
- m_sipopt._sipopt_data.H.type() is Var)
+ m_sipopt._sipopt_data.H.ctype is Var)
#verify suffixes
self.assertTrue(hasattr(m_sipopt,'sens_state_0') and
- m_sipopt.sens_state_0.type() is Suffix and
+ m_sipopt.sens_state_0.ctype is Suffix and
m_sipopt.sens_state_0[m_sipopt._sipopt_data.H]==2 and
m_sipopt.sens_state_0[m_sipopt._sipopt_data.a]==1)
self.assertTrue(hasattr(m_sipopt,'sens_state_1') and
- m_sipopt.sens_state_1.type() is Suffix and
+ m_sipopt.sens_state_1.ctype is Suffix and
m_sipopt.sens_state_1[m_sipopt._sipopt_data.H]==2 and
m_sipopt.sens_state_1[m_sipopt._sipopt_data.a]==1)
self.assertTrue(hasattr(m_sipopt,'sens_state_value_1') and
- m_sipopt.sens_state_value_1.type() is Suffix and
+ m_sipopt.sens_state_value_1.ctype is Suffix and
m_sipopt.sens_state_value_1[
m_sipopt._sipopt_data.H]==0.55 and
m_sipopt.sens_state_value_1[
m_sipopt._sipopt_data.a]==-0.25)
self.assertTrue(hasattr(m_sipopt,'sens_init_constr') and
- m_sipopt.sens_init_constr.type() is Suffix and
+ m_sipopt.sens_init_constr.ctype is Suffix and
m_sipopt.sens_init_constr[
m_sipopt._sipopt_data.paramConst[1]]==1 and
m_sipopt.sens_init_constr[
m_sipopt._sipopt_data.paramConst[2]]==2)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1') and
- m_sipopt.sens_sol_state_1.type() is Suffix)
+ m_sipopt.sens_sol_state_1.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1[
m_sipopt.F[15]],-0.00102016765,8)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_L') and
- m_sipopt.sens_sol_state_1_z_L.type() is Suffix)
+ m_sipopt.sens_sol_state_1_z_L.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1_z_L[
m_sipopt.u[15]],-2.181712e-09,13)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_U') and
- m_sipopt.sens_sol_state_1_z_U.type() is Suffix)
+ m_sipopt.sens_sol_state_1_z_U.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1_z_U[
m_sipopt.u[15]],6.580899e-09,13)
@@ -191,53 +191,53 @@ def test_noClone_soln(self):
#test _sipopt_data block exists
self.assertTrue(hasattr(m_orig,'_sipopt_data') and
- m_orig._sipopt_data.type() is Block)
+ m_orig._sipopt_data.ctype is Block)
#test variable declaration
self.assertTrue(hasattr(m_sipopt._sipopt_data,'a') and
- m_sipopt._sipopt_data.a.type() is Var)
+ m_sipopt._sipopt_data.a.ctype is Var)
self.assertTrue(hasattr(m_sipopt._sipopt_data,'H') and
- m_sipopt._sipopt_data.H.type() is Var)
+ m_sipopt._sipopt_data.H.ctype is Var)
#test for suffixes
self.assertTrue(hasattr(m_sipopt,'sens_state_0') and
- m_sipopt.sens_state_0.type() is Suffix and
+ m_sipopt.sens_state_0.ctype is Suffix and
m_sipopt.sens_state_0[m_sipopt._sipopt_data.H]==2 and
m_sipopt.sens_state_0[m_sipopt._sipopt_data.a]==1)
self.assertTrue(hasattr(m_sipopt,'sens_state_1') and
- m_sipopt.sens_state_1.type() is Suffix and
+ m_sipopt.sens_state_1.ctype is Suffix and
m_sipopt.sens_state_1[m_sipopt._sipopt_data.H]==2 and
m_sipopt.sens_state_1[m_sipopt._sipopt_data.a]==1)
self.assertTrue(hasattr(m_sipopt,'sens_state_value_1') and
- m_sipopt.sens_state_value_1.type() is Suffix and
+ m_sipopt.sens_state_value_1.ctype is Suffix and
m_sipopt.sens_state_value_1[
m_sipopt._sipopt_data.H]==0.55 and
m_sipopt.sens_state_value_1[
m_sipopt._sipopt_data.a]==-0.25)
self.assertTrue(hasattr(m_sipopt,'sens_init_constr') and
- m_sipopt.sens_init_constr.type() is Suffix and
+ m_sipopt.sens_init_constr.ctype is Suffix and
m_sipopt.sens_init_constr[
m_sipopt._sipopt_data.paramConst[1]]==1 and
m_sipopt.sens_init_constr[
m_sipopt._sipopt_data.paramConst[2]]==2)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1') and
- m_sipopt.sens_sol_state_1.type() is Suffix)
+ m_sipopt.sens_sol_state_1.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1[
m_sipopt.F[15]],-0.00102016765,8)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_L') and
- m_sipopt.sens_sol_state_1_z_L.type() is Suffix)
+ m_sipopt.sens_sol_state_1_z_L.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1_z_L[
m_sipopt.u[15]],-2.181712e-09,13)
self.assertTrue(hasattr(m_sipopt,'sens_sol_state_1_z_U') and
- m_sipopt.sens_sol_state_1_z_U.type() is Suffix)
+ m_sipopt.sens_sol_state_1_z_U.ctype is Suffix)
self.assertAlmostEqual(
m_sipopt.sens_sol_state_1_z_U[
m_sipopt.u[15]],6.580899e-09,13)
@@ -317,8 +317,8 @@ def test_constraintSub(self):
m_sipopt = sipopt(m,[m.a,m.b], [m.pert_a,m.pert_b])
#verify substitutions in equality constraint
- self.assertTrue(m_sipopt.C_equal.lower.type() is Param and
- m_sipopt.C_equal.upper.type() is Param)
+ self.assertTrue(m_sipopt.C_equal.lower.ctype is Param and
+ m_sipopt.C_equal.upper.ctype is Param)
self.assertFalse(m_sipopt.C_equal.active)
self.assertTrue(m_sipopt._sipopt_data.constList[3].lower == 0.0 and
@@ -328,7 +328,7 @@ def test_constraintSub(self):
#verify substitutions in one-sided bounded constraint
self.assertTrue(m_sipopt.C_singleBnd.lower is None and
- m_sipopt.C_singleBnd.upper.type() is Param)
+ m_sipopt.C_singleBnd.upper.ctype is Param)
self.assertFalse(m_sipopt.C_singleBnd.active)
self.assertTrue(m_sipopt._sipopt_data.constList[4].lower is None and
@@ -337,8 +337,8 @@ def test_constraintSub(self):
m_sipopt._sipopt_data.constList[4].body))) == 2)
#verify substitutions in ranged inequality constraint
- self.assertTrue(m_sipopt.C_rangedIn.lower.type() is Param and
- m_sipopt.C_rangedIn.upper.type() is Param)
+ self.assertTrue(m_sipopt.C_rangedIn.lower.ctype is Param and
+ m_sipopt.C_rangedIn.upper.ctype is Param)
self.assertFalse(m_sipopt.C_rangedIn.active)
self.assertTrue(m_sipopt._sipopt_data.constList[1].lower is None and
diff --git a/pyomo/contrib/simplemodel/__init__.py b/pyomo/contrib/simplemodel/__init__.py
index 9b0d49f07c1..cbfa0ba549e 100644
--- a/pyomo/contrib/simplemodel/__init__.py
+++ b/pyomo/contrib/simplemodel/__init__.py
@@ -1,11 +1,11 @@
from pyomo.common.deprecation import deprecation_warning
try:
- deprecation_warning("The use of pyomo.contrib.simple model is deprecated. "
- "This capability is now supported in the "
- "pyomo_simplemodel package, which is included in the "
- "pyomo_community distribution.", version='TBD',
- remove_in='TBD')
+ deprecation_warning(
+ "The use of pyomo.contrib.simple model is deprecated. "
+ "This capability is now supported in the pyomo_simplemodel "
+ "package, which is included in the pyomo_community distribution.",
+ version='5.6.9')
from pyomocontrib_simplemodel import *
except:
# Only raise exception if nose is NOT running
diff --git a/pyomo/contrib/trustregion/GeometryGenerator.py b/pyomo/contrib/trustregion/GeometryGenerator.py
index 7809d6852c3..d973838e29d 100644
--- a/pyomo/contrib/trustregion/GeometryGenerator.py
+++ b/pyomo/contrib/trustregion/GeometryGenerator.py
@@ -3,7 +3,6 @@
# This is an auto geometry generator for quadratic ROM
import numpy as np
from six import StringIO
-import pyomo.common
from pyomo.contrib.trustregion.cache import GeometryCache
logger = logging.getLogger('pyomo.contrib.trustregion')
diff --git a/pyomo/contrib/trustregion/PyomoInterface.py b/pyomo/contrib/trustregion/PyomoInterface.py
index f7f6021bd5c..dcde80b7dc8 100644
--- a/pyomo/contrib/trustregion/PyomoInterface.py
+++ b/pyomo/contrib/trustregion/PyomoInterface.py
@@ -152,7 +152,7 @@ def transformForTrustRegion(self,model,eflist):
TRF = Block()
# Get all varibles
- seenVar = Set()
+ seenVar = set()
allVariables = []
for var in model.component_data_objects(Var):
if id(var) not in seenVar:
@@ -186,7 +186,7 @@ def transformForTrustRegion(self,model,eflist):
# xvars and zvars are lists of x and z varibles as in the paper
TRF.xvars = []
TRF.zvars = []
- seenVar = Set()
+ seenVar = set()
for varss in TRF.exfn_xvars:
for var in varss:
if id(var) not in seenVar:
diff --git a/pyomo/contrib/trustregion/getGJH.py b/pyomo/contrib/trustregion/getGJH.py
index f9ffb294cde..2dcb644ca2d 100644
--- a/pyomo/contrib/trustregion/getGJH.py
+++ b/pyomo/contrib/trustregion/getGJH.py
@@ -33,7 +33,7 @@
def get_gjh(downloader):
system, bits = downloader.get_sysinfo()
- url = downloader.get_url(urlmap)
+ url = downloader.get_platform_url(urlmap)
downloader.set_destination_filename(
os.path.join('bin', 'gjh'+exemap[system]))
diff --git a/pyomo/contrib/trustregion/tests/TestTRConfig.py b/pyomo/contrib/trustregion/tests/TestTRConfig.py
index 347deb7fbb1..461f7881f60 100644
--- a/pyomo/contrib/trustregion/tests/TestTRConfig.py
+++ b/pyomo/contrib/trustregion/tests/TestTRConfig.py
@@ -5,17 +5,12 @@
from pyutilib.misc.config import ConfigBlock, ConfigValue, ConfigList
from pyomo.common.config import (
PositiveInt, PositiveFloat, NonNegativeFloat, In)
+from pyomo.common.dependencies import numpy_available
from pyomo.core import Var, value
from pyomo.environ import *
from pyomo.opt import SolverFactory, SolverStatus, TerminationCondition
-try:
- import numpy
- numpy_available = True
-except ImportError:
- numpy_available = False
-
@unittest.skipIf(not SolverFactory('ipopt').available(False), "The IPOPT solver is not available")
@unittest.skipIf(not SolverFactory('gjh').available(False), "The GJH solver is not available")
@unittest.skipIf(not numpy_available, "Cannot test the trustregion solver without numpy")
diff --git a/pyomo/contrib/viewer/tests/pytest_qt.py b/pyomo/contrib/viewer/tests/pytest_qt.py
index 9245775d1e5..8049f58549e 100644
--- a/pyomo/contrib/viewer/tests/pytest_qt.py
+++ b/pyomo/contrib/viewer/tests/pytest_qt.py
@@ -83,10 +83,10 @@ def test_model_information(qtbot):
text = mw._dialog.text()
mw._dialog.close()
text = text.split("\n")
- assert(text[0].startswith("8")) # Active constraints
- assert(text[1].startswith("7")) # Active equalities
- assert(text[2].startswith("7")) # Free vars in active equalities
- assert(text[3].startswith("0")) # degrees of feedom
+ assert(str(text[0]).startswith("8")) # Active constraints
+ assert(str(text[1]).startswith("7")) # Active equalities
+ assert(str(text[2]).startswith("7")) # Free vars in active equalities
+ assert(str(text[3]).startswith("0")) # degrees of feedom
# Main window has parts it is supposed to
assert(hasattr(mw, "menuBar"))
assert(isinstance(mw.variables, ModelBrowser))
diff --git a/pyomo/core/base/PyomoModel.py b/pyomo/core/base/PyomoModel.py
index 5e662adb192..81768ccc783 100644
--- a/pyomo/core/base/PyomoModel.py
+++ b/pyomo/core/base/PyomoModel.py
@@ -22,39 +22,30 @@
from collections import OrderedDict
except ImportError: #pragma:nocover
from ordereddict import OrderedDict
-try:
- from pympler import muppy
- from pympler import summary
- pympler_available = True
-except ImportError: #pragma:nocover
- pympler_available = False
-except AttributeError: #pragma:nocover
- pympler_available = False
-
from pyutilib.math import *
-from pyutilib.misc import tuplize, Container, PauseGC, Bunch
+from pyutilib.misc import Container, PauseGC, Bunch
import pyomo.common
+from pyomo.common.dependencies import pympler, pympler_available
from pyomo.common.deprecation import deprecation_warning
from pyomo.common.plugin import ExtensionPoint
from pyomo.common._task import pyomo_api
-from pyomo.common.deprecation import deprecation_warning
from pyomo.core.expr import expr_common
from pyomo.core.expr.symbol_map import SymbolMap
-from pyomo.core.base.var import _VarData, Var
+from pyomo.core.base.var import Var
from pyomo.core.base.constraint import Constraint
from pyomo.core.base.objective import Objective
from pyomo.core.base.set_types import *
from pyomo.core.base.suffix import active_import_suffix_generator
from pyomo.core.base.indexed_component import IndexedComponent
-from pyomo.dataportal import DataPortal
+from pyomo.dataportal.DataPortal import DataPortal
from pyomo.core.base.plugin import *
from pyomo.core.base.numvalue import *
from pyomo.core.base.block import SimpleBlock
-from pyomo.core.base.sets import Set
+from pyomo.core.base.set import Set, UnknownSetDimen
from pyomo.core.base.component import Component, ComponentUID
from pyomo.core.base.plugin import ModelComponentFactory, TransformationFactory
from pyomo.core.base.label import CNameLabeler, CuidLabeler
@@ -791,19 +782,6 @@ def load(self, arg, namespaces=[None], profile_memory=0, report_timing=None):
namespaces,
profile_memory=profile_memory)
- def _tuplize(self, data, setobj):
- if data is None: #pragma:nocover
- return None
- if setobj.dimen == 1:
- return data
- if len(list(data.keys())) == 1 and list(data.keys())[0] is None and len(data[None]) == 0: # dlw december 2017
- return None
- ans = {}
- for key in data:
- if type(data[key][0]) is tuple:
- return data
- ans[key] = tuplize(data[key], setobj.dimen, setobj.local_name)
- return ans
def _load_model_data(self, modeldata, namespaces, **kwds):
"""
@@ -824,17 +802,17 @@ def _load_model_data(self, modeldata, namespaces, **kwds):
#
profile_memory = kwds.get('profile_memory', 0)
- if (pympler_available is True) and (profile_memory >= 2):
- mem_used = muppy.get_size(muppy.get_objects())
+ if profile_memory >= 2 and pympler_available:
+ mem_used = pympler.muppy.get_size(muppy.get_objects())
print("")
print(" Total memory = %d bytes prior to model "
"construction" % mem_used)
- if (pympler_available is True) and (profile_memory >= 3):
- gc.collect()
- mem_used = muppy.get_size(muppy.get_objects())
- print(" Total memory = %d bytes prior to model "
- "construction (after garbage collection)" % mem_used)
+ if profile_memory >= 3:
+ gc.collect()
+ mem_used = pympler.muppy.get_size(muppy.get_objects())
+ print(" Total memory = %d bytes prior to model "
+ "construction (after garbage collection)" % mem_used)
#
# Do some error checking
@@ -850,7 +828,7 @@ def _load_model_data(self, modeldata, namespaces, **kwds):
for component_name, component in iteritems(self.component_map()):
- if component.type() is Model:
+ if component.ctype is Model:
continue
self._initialize_component(modeldata, namespaces, component_name, profile_memory)
@@ -875,29 +853,26 @@ def _load_model_data(self, modeldata, namespaces, **kwds):
#connector_expander = ConnectorExpander()
#connector_expander.apply(instance=self)
- if (pympler_available is True) and (profile_memory >= 2):
+ if profile_memory >= 2 and pympler_available:
print("")
print(" Summary of objects following instance construction")
- post_construction_summary = summary.summarize(muppy.get_objects())
- summary.print_(post_construction_summary, limit=100)
+ post_construction_summary = pympler.summary.summarize(
+ pympler.muppy.get_objects())
+ pympler.summary.print_(post_construction_summary, limit=100)
print("")
def _initialize_component(self, modeldata, namespaces, component_name, profile_memory):
declaration = self.component(component_name)
if component_name in modeldata._default:
- if declaration.type() is not Set:
+ if declaration.ctype is not Set:
declaration.set_default(modeldata._default[component_name])
data = None
for namespace in namespaces:
if component_name in modeldata._data.get(namespace,{}):
- if declaration.type() is Set:
- data = self._tuplize(modeldata._data[namespace][component_name],
- declaration)
- else:
- data = modeldata._data[namespace][component_name]
- if not data is None:
+ data = modeldata._data[namespace][component_name]
+ if data is not None:
break
if __debug__ and logger.isEnabledFor(logging.DEBUG):
@@ -922,14 +897,14 @@ def _initialize_component(self, modeldata, namespaces, component_name, profile_m
logger.debug("Constructed component '%s':\n %s"
% ( declaration.name, _out.getvalue()))
- if (pympler_available is True) and (profile_memory >= 2):
- mem_used = muppy.get_size(muppy.get_objects())
+ if profile_memory >= 2 and pympler_available:
+ mem_used = pympler.muppy.get_size(pympler.muppy.get_objects())
print(" Total memory = %d bytes following construction of component=%s" % (mem_used, component_name))
- if (pympler_available is True) and (profile_memory >= 3):
- gc.collect()
- mem_used = muppy.get_size(muppy.get_objects())
- print(" Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name))
+ if profile_memory >= 3:
+ gc.collect()
+ mem_used = pympler.muppy.get_size(pympler.muppy.get_objects())
+ print(" Total memory = %d bytes following construction of component=%s (after garbage collection)" % (mem_used, component_name))
def create(self, filename=None, **kwargs):
diff --git a/pyomo/core/base/__init__.py b/pyomo/core/base/__init__.py
index 57e2945d4a1..bc6b70350a7 100644
--- a/pyomo/core/base/__init__.py
+++ b/pyomo/core/base/__init__.py
@@ -21,9 +21,12 @@
# Components
#
from pyomo.core.base.component import *
+import pyomo.core.base.indexed_component
from pyomo.core.base.action import *
from pyomo.core.base.check import *
-from pyomo.core.base.sets import *
+from pyomo.core.base.set import (
+ Set, SetOf, simple_set_rule, RangeSet,
+)
from pyomo.core.base.param import *
from pyomo.core.base.var import *
from pyomo.core.base.constraint import *
@@ -45,10 +48,14 @@
import pyomo.core.base._pyomo
#
import pyomo.core.base.util
-from pyomo.core.base.rangeset import *
from pyomo.core.base.instance2dat import *
+# These APIs are deprecated and should be removed in the near future
+from pyomo.core.base.set import (
+ set_options, RealSet, IntegerSet, BooleanSet,
+)
+
#
# This is a hack to strip out modules, which shouldn't have been included in these imports
#
diff --git a/pyomo/core/base/_pyomo.py b/pyomo/core/base/_pyomo.py
index eb7d9ea4071..e217eeaae37 100644
--- a/pyomo/core/base/_pyomo.py
+++ b/pyomo/core/base/_pyomo.py
@@ -8,15 +8,14 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+from six import iteritems
from pyomo.core.base.plugin import *
def predefined_sets():
- from pyomo.core.base.set_types import _virtual_sets
- ans = []
- for item in _virtual_sets:
- ans.append( (item.name, item.doc) )
- return ans
+ from pyomo.core.base.set import GlobalSets
+ return list((name, obj.doc) for name,obj in iteritems(GlobalSets))
def model_components():
- return [(name,ModelComponentFactory.doc(name)) for name in ModelComponentFactory]
+ return [ (name, ModelComponentFactory.doc(name))
+ for name in ModelComponentFactory ]
diff --git a/pyomo/core/base/alias.py b/pyomo/core/base/alias.py
index b84f783fafb..29ab613ec8b 100644
--- a/pyomo/core/base/alias.py
+++ b/pyomo/core/base/alias.py
@@ -56,12 +56,12 @@ def __init__(self, obj):
self._aliased_object = weakref.ref(obj)
ctype = Alias
if isinstance(obj, Component):
- ctype = obj.type()
+ ctype = obj.ctype
else:
if not isinstance(obj, ComponentData):
raise TypeError("Aliased object must be an "
"instance of Component or ComponentData")
- ctype = obj.parent_component().type()
+ ctype = obj.parent_component().ctype
Component.__init__(self, ctype=ctype)
@property
diff --git a/pyomo/core/base/block.py b/pyomo/core/base/block.py
index 34f75f1719f..543b391830e 100644
--- a/pyomo/core/base/block.py
+++ b/pyomo/core/base/block.py
@@ -12,28 +12,35 @@
'active_components', 'components', 'active_components_data',
'components_data', 'SimpleBlock']
+import collections
import copy
+import logging
import sys
import weakref
-import logging
+import textwrap
+
from inspect import isclass
from operator import itemgetter, attrgetter
from six import iteritems, iterkeys, itervalues, StringIO, string_types, \
advance_iterator, PY3
+if PY3:
+ from collections.abc import Mapping as collections_Mapping
+else:
+ from collections import Mapping as collections_Mapping
+
from pyutilib.misc.indent_io import StreamIndenter
from pyomo.common.timing import ConstructionTimer
from pyomo.core.base.plugin import * # ModelComponentFactory
from pyomo.core.base.component import Component, ActiveComponentData, \
ComponentUID
-from pyomo.core.base.sets import Set, _SetDataBase
+from pyomo.core.base.set import Set, RangeSet, GlobalSetBase, _SetDataBase
from pyomo.core.base.var import Var
from pyomo.core.base.misc import apply_indexed_rule
from pyomo.core.base.suffix import ComponentMap
from pyomo.core.base.indexed_component import IndexedComponent, \
ActiveIndexedComponent, UnindexedComponent_set
-import collections
from pyomo.opt.base import ProblemFormat, guess_format
from pyomo.opt import WriterFactory
@@ -41,41 +48,6 @@
logger = logging.getLogger('pyomo.core')
-# Monkey-patch for deepcopying weakrefs
-# Only required on Python <= 2.6
-#
-# TODO: can we verify that this is really needed? [JDS 7/8/14]
-if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
- copy._copy_dispatch[weakref.ref] = copy._copy_immutable
- copy._deepcopy_dispatch[weakref.ref] = copy._deepcopy_atomic
- copy._deepcopy_dispatch[weakref.KeyedRef] = copy._deepcopy_atomic
-
- def dcwvd(self, memo):
- """Deepcopy implementation for WeakValueDictionary class"""
- from copy import deepcopy
- new = self.__class__()
- for key, wr in self.data.items():
- o = wr()
- if o is not None:
- new[deepcopy(key, memo)] = o
- return new
- weakref.WeakValueDictionary.__copy__ = \
- weakref.WeakValueDictionary.copy
- weakref.WeakValueDictionary.__deepcopy__ = dcwvd
-
- def dcwkd(self, memo):
- """Deepcopy implementation for WeakKeyDictionary class"""
- from copy import deepcopy
- new = self.__class__()
- for key, value in self.data.items():
- o = key()
- if o is not none:
- new[o] = deepcopy(value, memo)
- return new
- weakref.WeakKeyDictionary.__copy__ = weakref.WeakKeyDictionary.copy
- weakref.WeakKeyDictionary.__deepcopy__ = dcwkd
-
-
class _generic_component_decorator(object):
"""A generic decorator that wraps Block.__setattr__()
@@ -99,6 +71,7 @@ def __call__(self, rule):
rule.__name__,
self._component(*self._args, rule=rule, **(self._kwds))
)
+ return rule
class _component_decorator(object):
@@ -291,7 +264,7 @@ def __getitem__(self, key):
"""
if key in self._block._decl:
x = self._block._decl_order[self._block._decl[key]]
- if self._ctypes is None or x[0].type() in self._ctypes:
+ if self._ctypes is None or x[0].ctype in self._ctypes:
if self._active is None or x[0].active == self._active:
return x[0]
msg = ""
@@ -359,7 +332,7 @@ def __contains__(self, key):
# component matches those flags
if key in self._block._decl:
x = self._block._decl_order[self._block._decl[key]]
- if self._ctypes is None or x[0].type() in self._ctypes:
+ if self._ctypes is None or x[0].ctype in self._ctypes:
return self._active is None or x[0].active == self._active
return False
@@ -702,17 +675,118 @@ def __delattr__(self, name):
#
super(_BlockData, self).__delattr__(name)
+ def _compact_decl_storage(self):
+ idxMap = {}
+ _new_decl_order = []
+ j = 0
+ # Squeeze out the None entries
+ for i, entry in enumerate(self._decl_order):
+ if entry[0] is not None:
+ idxMap[i] = j
+ j += 1
+ _new_decl_order.append(entry)
+ # Update the _decl map
+ self._decl = {k:idxMap[idx] for k,idx in iteritems(self._decl)}
+ # Update the ctypes, _decl_order linked lists
+ for ctype, info in iteritems(self._ctypes):
+ idx = info[0]
+ entry = self._decl_order[idx]
+ while entry[0] is None:
+ idx = entry[1]
+ entry = self._decl_order[idx]
+ info[0] = last = idxMap[idx]
+ while entry[1] is not None:
+ idx = entry[1]
+ entry = self._decl_order[idx]
+ if entry[0] is not None:
+ this = idxMap[idx]
+ _new_decl_order[last] = (_new_decl_order[last][0], this)
+ last = this
+ info[1] = last
+ _new_decl_order[last] = (_new_decl_order[last][0], None)
+ self._decl_order = _new_decl_order
+
def set_value(self, val):
- for k in list(getattr(self, '_decl', {})):
- self.del_component(k)
- self._ctypes = {}
- self._decl = {}
- self._decl_order = []
- if val:
- for k in sorted(iterkeys(val)):
- self.add_component(k,val[k])
-
- def _add_temporary_set(self, val):
+ raise RuntimeError(textwrap.dedent(
+ """\
+ Block components do not support assignment or set_value().
+ Use the transfer_attributes_from() method to transfer the
+ components and public attributes from one block to another:
+ model.b[1].transfer_attributes_from(other_block)
+ """))
+
+ def clear(self):
+ for name in iterkeys(self.component_map()):
+ if name not in self._Block_reserved_words:
+ self.del_component(name)
+ for attr in tuple(self.__dict__):
+ if attr not in self._Block_reserved_words:
+ delattr(self, attr)
+ self._compact_decl_storage()
+
+ def transfer_attributes_from(self, src):
+ """Transfer user-defined attributes from src to this block
+
+ This transfers all components and user-defined attributes from
+ the block or dictionary `src` and places them on this Block.
+ Components are transferred in declaration order.
+
+ If a Component on `src` is also declared on this block as either
+ a Component or attribute, the local Component or attribute is
+ replaced by the incoming component. If an attribute name on
+ `src` matches a Component declared on this block, then the
+ incoming attribute is passed to the local Component's
+ `set_value()` method. Attribute names appearing in this block's
+ `_Block_reserved_words` set will not be transferred (although
+ Components will be).
+
+ Parameters
+ ----------
+ src: _BlockData or dict
+ The Block or mapping that contains the new attributes to
+ assign to this block.
+ """
+ if isinstance(src, _BlockData):
+ # There is a special case where assigning a parent block to
+ # this block creates a circular hierarchy
+ if src is self:
+ return
+ p_block = self.parent_block()
+ while p_block is not None:
+ if p_block is src:
+ raise ValueError(
+ "_BlockData.transfer_attributes_from(): Cannot set a "
+ "sub-block (%s) to a parent block (%s): creates a "
+ "circular hierarchy" % (self, src))
+ p_block = p_block.parent_block()
+ # record the components and the non-component objects added
+ # to the block
+ src_comp_map = src.component_map()
+ src_raw_dict = {k:v for k,v in iteritems(src.__dict__)
+ if k not in src_comp_map}
+ elif isinstance(src, collections_Mapping):
+ src_comp_map = {}
+ src_raw_dict = src
+ else:
+ raise ValueError(
+ "_BlockData.transfer_attributes_from(): expected a "
+ "Block or dict; received %s" % (type(src).__name__,))
+
+ # Use component_map for the components to preserve decl_order
+ for k,v in iteritems(src_comp_map):
+ if k in self._decl:
+ self.del_component(k)
+ src.del_component(k)
+ self.add_component(k,v)
+ # Because Blocks are not slotized and we allow the
+ # assignment of arbitrary data to Blocks, we will move over
+ # any other unrecognized entries in the object's __dict__:
+ for k in sorted(iterkeys(src_raw_dict)):
+ if k not in self._Block_reserved_words or not hasattr(self, k) \
+ or k in self._decl:
+ setattr(self, k, src_raw_dict[k])
+
+ def _add_implicit_sets(self, val):
"""TODO: This method has known issues (see tickets) and needs to be
reviewed. [JDS 9/2014]"""
@@ -723,40 +797,24 @@ def _add_temporary_set(self, val):
#
if _component_sets is not None:
for ctr, tset in enumerate(_component_sets):
- if tset.parent_component()._name == "_unknown_":
- self._construct_temporary_set(
- tset,
- val.local_name + "_index_" + str(ctr)
- )
- if isinstance(val._index, _SetDataBase) and \
- val._index.parent_component().local_name == "_unknown_":
- self._construct_temporary_set(val._index, val.local_name + "_index")
- if isinstance(getattr(val, 'initialize', None), _SetDataBase) and \
- val.initialize.parent_component().local_name == "_unknown_":
- self._construct_temporary_set(val.initialize, val.local_name + "_index_init")
- if getattr(val, 'domain', None) is not None and \
- getattr(val.domain, 'local_name', None) == "_unknown_":
- self._construct_temporary_set(val.domain, val.local_name + "_domain")
-
- def _construct_temporary_set(self, obj, name):
- """TODO: This method has known issues (see tickets) and needs to be
- reviewed. [JDS 9/2014]"""
- if type(obj) is tuple:
- if len(obj) == 1: # pragma:nocover
- raise Exception(
- "Unexpected temporary set construction for set "
- "%s on block %s" % (name, self.name))
- else:
- tobj = obj[0]
- for t in obj[1:]:
- tobj = tobj * t
- self.add_component(name, tobj)
- tobj.virtual = True
- return tobj
- elif isinstance(obj, Set):
- self.add_component(name, obj)
- return obj
- raise Exception("BOGUS")
+ if tset.parent_component().parent_block() is None \
+ and not isinstance(tset.parent_component(), GlobalSetBase):
+ self.add_component("%s_index_%d" % (val.local_name, ctr), tset)
+ if getattr(val, '_index', None) is not None \
+ and isinstance(val._index, _SetDataBase) \
+ and val._index.parent_component().parent_block() is None \
+ and not isinstance(val._index.parent_component(), GlobalSetBase):
+ self.add_component("%s_index" % (val.local_name,), val._index.parent_component())
+ if getattr(val, 'initialize', None) is not None \
+ and isinstance(val.initialize, _SetDataBase) \
+ and val.initialize.parent_component().parent_block() is None \
+ and not isinstance(val.initialize.parent_component(), GlobalSetBase):
+ self.add_component("%s_index_init" % (val.local_name,), val.initialize.parent_component())
+ if getattr(val, 'domain', None) is not None \
+ and isinstance(val.domain, _SetDataBase) \
+ and val.domain.parent_block() is None \
+ and not isinstance(val.domain, GlobalSetBase):
+ self.add_component("%s_domain" % (val.local_name,), val.domain)
def _flag_vars_as_stale(self):
"""
@@ -853,7 +911,7 @@ def add_component(self, name, val):
if not val.valid_model_component():
raise RuntimeError(
"Cannot add '%s' as a component to a block" % str(type(val)))
- if name in self._Block_reserved_words:
+ if name in self._Block_reserved_words and hasattr(self, name):
raise ValueError("Attempting to declare a block component using "
"the name of a reserved attribute:\n\t%s"
% (name,))
@@ -867,7 +925,7 @@ def add_component(self, name, val):
# component type that is suppressed.
#
_component = self.parent_component()
- _type = val.type()
+ _type = val.ctype
if _type in _component._suppress_ctypes:
return
#
@@ -892,6 +950,18 @@ def add_component(self, name, val):
component, use the block del_component() and add_component() methods.
""" % (msg.strip(),))
#
+ # If the new component is a Block, then there is the chance that
+ # it is the model(), and assigning it would create a circular
+ # hierarchy. Note that we only have to check the model as the
+ # check immediately above would catch any "internal" blocks in
+ # the block hierarchy
+ #
+ if isinstance(val, Block) and val is self.model():
+ raise ValueError(
+ "Cannot assign the top-level block as a subblock of one of "
+ "its children (%s): creates a circular hierarchy"
+ % (self,))
+ #
# Set the name and parent pointer of this component.
#
val._name = name
@@ -906,8 +976,7 @@ def add_component(self, name, val):
# kind of thing to an "update_parent()" method on the
# components.
#
- if hasattr(val, '_index'):
- self._add_temporary_set(val)
+ self._add_implicit_sets(val)
#
# Add the component to the underlying Component store
#
@@ -981,9 +1050,11 @@ def add_component(self, name, val):
if getattr(_component, '_constructed', False):
# NB: we don't have to construct the temporary / implicit
# sets here: if necessary, that happens when
- # _add_temporary_set() calls add_component().
- if id(self) in _BlockConstruction.data:
- data = _BlockConstruction.data[id(self)].get(name, None)
+ # _add_implicit_sets() calls add_component().
+ if _BlockConstruction.data:
+ data = _BlockConstruction.data.get(id(self), None)
+ if data is not None:
+ data = data.get(name, None)
else:
data = None
if __debug__ and logger.isEnabledFor(logging.DEBUG):
@@ -1046,10 +1117,10 @@ def del_component(self, name_or_object):
self._decl_order[idx] = (None, self._decl_order[idx][1])
# Update the ctype linked lists
- ctype_info = self._ctypes[obj.type()]
+ ctype_info = self._ctypes[obj.ctype]
ctype_info[2] -= 1
if ctype_info[2] == 0:
- del self._ctypes[obj.type()]
+ del self._ctypes[obj.ctype]
# Clear the _parent attribute
obj._parent = None
@@ -1074,7 +1145,7 @@ def reclassify_component_type(self, name_or_object, new_ctype,
if obj is None:
return
- if obj._type is new_ctype:
+ if obj.ctype is new_ctype:
return
name = obj.local_name
@@ -1083,22 +1154,22 @@ def reclassify_component_type(self, name_or_object, new_ctype,
# easiest (and fastest) thing to do is just delete it and
# re-add it.
self.del_component(name)
- obj._type = new_ctype
+ obj._ctype = new_ctype
self.add_component(name, obj)
return
idx = self._decl[name]
# Update the ctype linked lists
- ctype_info = self._ctypes[obj.type()]
+ ctype_info = self._ctypes[obj.ctype]
ctype_info[2] -= 1
if ctype_info[2] == 0:
- del self._ctypes[obj.type()]
+ del self._ctypes[obj.ctype]
elif ctype_info[0] == idx:
ctype_info[0] = self._decl_order[idx][1]
else:
prev = None
- tmp = self._ctypes[obj.type()][0]
+ tmp = self._ctypes[obj.ctype][0]
while tmp < idx:
prev = tmp
tmp = self._decl_order[tmp][1]
@@ -1108,7 +1179,7 @@ def reclassify_component_type(self, name_or_object, new_ctype,
if ctype_info[1] == idx:
ctype_info[1] = prev
- obj._type = new_ctype
+ obj._ctype = new_ctype
# Insert into the new ctype list
if new_ctype not in self._ctypes:
@@ -1248,27 +1319,22 @@ def _component_data_iter(self, ctype=None, active=None, sort=False):
_sort_indices = SortComponents.sort_indices(sort)
_subcomp = PseudoMap(self, ctype, active, sort)
for name, comp in _subcomp.iteritems():
- # _NOTE_: Suffix has a dict interface (something other
- # derived non-indexed Components may do as well),
- # so we don't want to test the existence of
- # iteritems as a check for components. Also,
- # the case where we test len(comp) after seeing
- # that comp.is_indexed is False is a hack for a
- # SimpleConstraint whose expression resolved to
- # Constraint.skip or Constraint.feasible (in which
- # case its data is empty and iteritems would have
- # been empty as well)
- # try:
- # _items = comp.iteritems()
- # except AttributeError:
- # _items = [ (None, comp) ]
+ # NOTE: Suffix has a dict interface (something other derived
+ # non-indexed Components may do as well), so we don't want
+ # to test the existence of iteritems as a check for
+ # component datas. We will rely on is_indexed() to catch
+ # all the indexed components. Then we will do special
+ # processing for the scalar components to catch the case
+ # where there are "sparse scalar components"
if comp.is_indexed():
_items = comp.iteritems()
- # This is a hack (see _NOTE_ above).
- elif len(comp) or not hasattr(comp, '_data'):
- _items = ((None, comp),)
+ elif hasattr(comp, '_data'):
+ # This may be an empty Scalar component (e.g., from
+ # Constraint.Skip on a scalar Constraint)
+ assert len(comp._data) <= 1
+ _items = iteritems(comp._data)
else:
- _items = tuple()
+ _items = ((None, comp),)
if _sort_indices:
_items = sorted(_items, key=itemgetter(0))
@@ -1441,7 +1507,7 @@ def _tree_iterator(self,
# "descend_into" argument in public calling functions: callers
# expect that the called thing will be iterated over.
#
- # if self.parent_component().type() not in ctype:
+ # if self.parent_component().ctype not in ctype:
# return ().__iter__()
if traversal is None or \
@@ -1765,7 +1831,35 @@ def __init__(self, *args, **kwargs):
self.construct()
def _getitem_when_not_present(self, idx):
- return self._setitem_when_not_present(idx, None)
+ _block = self._setitem_when_not_present(idx)
+ if self._rule is None:
+ return _block
+
+ if _BlockConstruction.data:
+ data = _BlockConstruction.data.get(id(self), None)
+ if data is not None:
+ data = data.get(idx, None)
+ if data is not None:
+ _BlockConstruction.data[id(_block)] = data
+ else:
+ data = None
+
+ try:
+ obj = apply_indexed_rule(
+ self, self._rule, _block, idx, self._options)
+ finally:
+ if data is not None:
+ del _BlockConstruction.data[id(_block)]
+
+ if obj is not _block and isinstance(obj, _BlockData):
+ # If the user returns a block, transfer over everything
+ # they defined into the empty one we created.
+ _block.transfer_attributes_from(obj)
+
+ # TBD: Should we allow skipping Blocks???
+ # if obj is Block.Skip and idx is not None:
+ # del self._data[idx]
+ return _block
def find_component(self, label_or_component):
"""
@@ -1785,61 +1879,63 @@ def construct(self, data=None):
timer = ConstructionTimer(self)
self._constructed = True
- # We must check that any pre-existing components are
- # constructed. This catches the case where someone is building
- # a Concrete model by building (potentially pseudo-abstract)
- # sub-blocks and then adding them to a Concrete model block.
- for idx in self._data:
- _block = self[idx]
- for name, obj in iteritems(_block.component_map()):
- if not obj._constructed:
- if data is None:
- _data = None
- else:
- _data = data.get(name, None)
- obj.construct(_data)
-
- if self._rule is None:
- # Ensure the _data dictionary is populated for singleton
- # blocks
- if not self.is_indexed():
- self[None]
+ # Constructing blocks is tricky. Scalar blocks are already
+ # partially constructed (they have _data[None] == self) in order
+ # to support Abstract blocks. The block may therefore already
+ # have components declared on it. In order to preserve
+ # decl_order, we must construct those components *first* before
+ # firing any rule. Indexed blocks should be empty, so we only
+ # need to fire the rule in order.
+ #
+ # Since the rule does not pass any "data" on, we build a scalar
+ # "stack" of pointers to block data (_BlockConstruction.data)
+ # that the individual blocks' add_component() can refer back to
+ # to handle component construction.
+ if data is not None:
+ _BlockConstruction.data[id(self)] = data
+ try:
+ if self.is_indexed():
+ # We can only populate Blocks with finite indexing sets
+ if self._rule is not None and self.index_set().isfinite():
+ for _idx in self.index_set():
+ # Trigger population & call the rule
+ self._getitem_when_not_present(_idx)
+ else:
+ # We must check that any pre-existing components are
+ # constructed. This catches the case where someone is
+ # building a Concrete model by building (potentially
+ # pseudo-abstract) sub-blocks and then adding them to a
+ # Concrete model block.
+ _idx = next(iter(UnindexedComponent_set))
+ if _idx not in self._data:
+ # Derived block classes may not follow the scalar
+ # Block convention of initializing _data to point to
+ # itself (i.e., they are not set up to support
+ # Abstract models)
+ self._data[_idx] = self
+ _block = self
+ for name, obj in iteritems(_block.component_map()):
+ if not obj._constructed:
+ if data is None:
+ _data = None
+ else:
+ _data = data.get(name, None)
+ obj.construct(_data)
+ if self._rule is not None:
+ obj = apply_indexed_rule(
+ self, self._rule, _block, _idx, self._options)
+ if obj is not _block and isinstance(obj, _BlockData):
+ # If the user returns a block, transfer over
+ # everything they defined into the empty one we
+ # created.
+ _block.transfer_attributes_from(obj)
+ finally:
+ # We must check if data is still in the dictionary, as
+ # scalar blocks will have already removed the entry (as
+ # the _data and the component are the same object)
+ if data is not None and id(self) in _BlockConstruction.data:
+ del _BlockConstruction.data[id(self)]
timer.report()
- return
- # If we have a rule, fire the rule for all indices.
- # Notes:
- # - Since this block is now concrete, any components added to
- # it will be immediately constructed by
- # block.add_component().
- # - Since the rule does not pass any "data" on, we build a
- # scalar "stack" of pointers to block data
- # (_BlockConstruction.data) that the individual blocks'
- # add_component() can refer back to to handle component
- # construction.
- for idx in self._index:
- _block = self[idx]
- if data is not None and idx in data:
- _BlockConstruction.data[id(_block)] = data[idx]
- obj = apply_indexed_rule(
- self, self._rule, _block, idx, self._options)
- if id(_block) in _BlockConstruction.data:
- del _BlockConstruction.data[id(_block)]
-
- if isinstance(obj, _BlockData) and obj is not _block:
- # If the user returns a block, use their block instead
- # of the empty one we just created.
- for c in list(obj.component_objects(descend_into=False)):
- obj.del_component(c)
- _block.add_component(c.local_name, c)
- # transfer over any other attributes that are not components
- for name, val in iteritems(obj.__dict__):
- if not hasattr(_block, name) and not hasattr(self, name):
- super(_BlockData, _block).__setattr__(name, val)
-
- # TBD: Should we allow skipping Blocks???
- # if obj is Block.Skip and idx is not None:
- # del self._data[idx]
- timer.report()
def _pprint_callback(self, ostream, idx, data):
if not self.is_indexed():
@@ -1882,6 +1978,10 @@ class SimpleBlock(_BlockData, Block):
def __init__(self, *args, **kwds):
_BlockData.__init__(self, component=self)
Block.__init__(self, *args, **kwds)
+ # Initialize the data dict so that (abstract) attribute
+ # assignment will work. Note that we do not trigger
+ # get/setitem_when_not_present so that we do not (implicitly)
+ # trigger the Block rule
self._data[None] = self
def display(self, filename=None, ostream=None, prefix=""):
diff --git a/pyomo/core/base/blockutil.py b/pyomo/core/base/blockutil.py
index 3869ee844d6..a98c5eecc17 100644
--- a/pyomo/core/base/blockutil.py
+++ b/pyomo/core/base/blockutil.py
@@ -17,7 +17,8 @@
from pyomo.core.base import Var
-@deprecated("This function has been moved to `pyomo.util.blockutil`", version='TBD', remove_in='TBD')
+@deprecated("This function has been moved to `pyomo.util.blockutil`",
+ version='5.6.9')
def has_discrete_variables(block):
from pyomo.util.blockutil import has_discrete_variables
return has_discrete_variables(block)
diff --git a/pyomo/core/base/component.py b/pyomo/core/base/component.py
index 220927f1004..6f10aaefb42 100644
--- a/pyomo/core/base/component.py
+++ b/pyomo/core/base/component.py
@@ -22,6 +22,7 @@
import pyomo.common
from pyomo.common import deprecated
+from pyomo.core.pyomoobject import PyomoObject
from pyomo.core.base.misc import tabular_writer, sorted_robust
logger = logging.getLogger('pyomo.core')
@@ -66,12 +67,16 @@ def name(component, index=None, fully_qualified=False, relative_to=None):
return base + _name_index_generator( index )
-@deprecated(msg="The cname() function has been renamed to name()", version='TBD', remove_in='TBD')
+@deprecated(msg="The cname() function has been renamed to name()",
+ version='5.6.9')
def cname(*args, **kwds):
return name(*args, **kwds)
-class _ComponentBase(object):
+class CloneError(pyomo.common.errors.PyomoException):
+ pass
+
+class _ComponentBase(PyomoObject):
"""A base class for Component and ComponentData
This class defines some fundamental methods and properties that are
@@ -82,6 +87,10 @@ class _ComponentBase(object):
_PPRINT_INDENT = " "
+ def is_component_type(self):
+ """Return True if this class is a Pyomo component"""
+ return True
+
def __deepcopy__(self, memo):
# The problem we are addressing is when we want to clone a
# sub-block in a model. In that case, the block can have
@@ -195,6 +204,8 @@ def __deepcopy__(self, memo):
if paranoid:
saved_memo = dict(memo)
new_state[k] = deepcopy(v, memo)
+ except CloneError:
+ raise
except:
if paranoid:
memo.clear()
@@ -217,16 +228,31 @@ def __deepcopy__(self, memo):
"Unable to clone Pyomo component attribute.\n"
"%s '%s' contains an uncopyable field '%s' (%s)"
% ( what, self.name, k, type(v) ))
+ # If this is an abstract model, then we are probably
+ # in the middle of create_instance, and the model
+ # that will eventually become the concrete model is
+ # missing initialization data. This is an
+ # exceptional event worthy of a stronger (and more
+ # informative) error.
+ if not self.parent_component()._constructed:
+ raise CloneError(
+ "Uncopyable attribute (%s) encountered when "
+ "cloning component %s on an abstract block. "
+ "The resulting instance is therefore "
+ "missing data from the original abstract model "
+ "and likely will not construct correctly. "
+ "Consider changing how you initialize this "
+ "component or using a ConcreteModel."
+ % ( k, self.name ))
ans.__setstate__(new_state)
return ans
+ @deprecated("""The cname() method has been renamed to getname().
+ The preferred method of obtaining a component name is to use the
+ .name property, which returns the fully qualified component name.
+ The .local_name property will return the component name only within
+ the context of the immediate parent container.""", version='5.0')
def cname(self, *args, **kwds):
- logger.warning(
- """DEPRECATED: The cname() method has been renamed to getname().
-The preferred method of obtaining a component name is to use the .name
-property, which returns the fully qualified component name. The
-.local_name property will return the component name only within the
-context of the immediate parent container.""")
return self.getname(*args, **kwds)
def pprint(self, ostream=None, verbose=False, prefix=""):
@@ -360,15 +386,15 @@ class Component(_ComponentBase):
_constructed A boolean that is true if this component has been
constructed
_parent A weakref to the parent block that owns this component
- _type The class type for the derived subclass
+ _ctype The class type for the derived subclass
"""
def __init__ (self, **kwds):
#
# Get arguments
#
- self._type = kwds.pop('ctype', None)
- self.doc = kwds.pop('doc', None)
+ self._ctype = kwds.pop('ctype', None)
+ self.doc = kwds.pop('doc', None)
self._name = kwds.pop('name', str(type(self).__name__))
if kwds:
raise ValueError(
@@ -377,7 +403,7 @@ def __init__ (self, **kwds):
#
# Verify that ctype has been specified.
#
- if self._type is None:
+ if self._ctype is None:
raise pyomo.common.DeveloperError(
"Must specify a component type for class %s!"
% ( type(self).__name__, ) )
@@ -439,9 +465,16 @@ def __setstate__(self, state):
# of setting self.__dict__[key] = val.
object.__setattr__(self, key, val)
+ @property
+ def ctype(self):
+ """Return the class type for this component"""
+ return self._ctype
+
+ @deprecated("Component.type() method has been replaced by the "
+ ".ctype property.", version='TBD')
def type(self):
"""Return the class type for this component"""
- return self._type
+ return self.ctype
def construct(self, data=None): #pragma:nocover
"""API definition for constructing components"""
@@ -566,10 +599,6 @@ def is_indexed(self):
"""Return true if this component is indexed"""
return False
- def is_component_type(self):
- """Return True if this class is a Pyomo component"""
- return True
-
def clear_suffix_value(self, suffix_or_name, expand=True):
"""Clear the suffix value for this component data"""
if isinstance(suffix_or_name, six.string_types):
@@ -743,12 +772,19 @@ class owns weakrefs for '_component', which must be restored
# of setting self.__dict__[key] = val.
object.__setattr__(self, key, val)
- def type(self):
+ @property
+ def ctype(self):
"""Return the class type for this component"""
_parent = self.parent_component()
if _parent is None:
- return _parent
- return _parent._type
+ return None
+ return _parent._ctype
+
+ @deprecated("Component.type() method has been replaced by the "
+ ".ctype property.", version='TBD')
+ def type(self):
+ """Return the class type for this component"""
+ return self.ctype
def parent_component(self):
"""Returns the component associated with this object."""
@@ -877,10 +913,6 @@ def is_indexed(self):
"""Return true if this component is indexed"""
return False
- def is_component_type(self):
- """Return True if this class is a Pyomo component"""
- return True
-
def clear_suffix_value(self, suffix_or_name, expand=True):
"""Set the suffix value for this component data"""
if isinstance(suffix_or_name, six.string_types):
diff --git a/pyomo/core/base/component_order.py b/pyomo/core/base/component_order.py
index 0e9bf4bca66..a6e9bfd87cc 100644
--- a/pyomo/core/base/component_order.py
+++ b/pyomo/core/base/component_order.py
@@ -11,8 +11,7 @@
__all__ = ['items', 'display_items', 'display_name']
-from pyomo.core.base.sets import Set
-from pyomo.core.base.rangeset import RangeSet
+from pyomo.core.base.set import Set, RangeSet
from pyomo.core.base.param import Param
from pyomo.core.base.var import Var
from pyomo.core.base.expression import Expression
diff --git a/pyomo/core/base/config.py b/pyomo/core/base/config.py
index 0835b964618..5fef3ff6726 100644
--- a/pyomo/core/base/config.py
+++ b/pyomo/core/base/config.py
@@ -1,16 +1,12 @@
import appdirs
import os
import json
-try:
- import yaml
- yaml_available = True
-except ImportError:
- yaml_available = False
from pyutilib.misc.config import ConfigBase
from pyomo.common.config import (
ConfigBlock, ConfigValue, ADVANCED_OPTION, PYOMO_CONFIG_DIR,
)
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
import logging
logger = logging.getLogger('pyomo.core')
@@ -21,20 +17,26 @@ def __init__(self):
self._options_stack = [ default_pyomo_config() ]
# Load the user's configuration
- sources = [(json.load, 'json')]
- if yaml_available:
- sources.append( (yaml.load, 'yml') )
- sources.append( (yaml.load, 'yaml') )
- for parser, suffix in sources:
+ sources = [(json, 'json', True, 'json', {}),
+ (json, 'jsn', True, 'json', {})]
+ sources.append((yaml, 'yml', yaml_available, 'yaml', yaml_load_args))
+ sources.append((yaml, 'yaml', yaml_available, 'yaml', yaml_load_args))
+ for parser, suffix, available, library, parser_args in sources:
cfg_file = os.path.join( PYOMO_CONFIG_DIR, 'config.'+suffix)
- if os.path.exists(cfg_file):
- fp = open(cfg_file)
- try:
- data = parser(fp)
- except:
- logger.error("Error parsing the user's default "
- "configuration file\n\t%s." % (cfg_file,))
- self._options_stack[0].set_value(data)
+ if not os.path.exists(cfg_file):
+ continue
+ if not available:
+ logger.warning("Default configuration file (%s) cannot be "
+ "loaded; %s is not available"
+ % (cfg_file, library))
+ continue
+ fp = open(cfg_file)
+ try:
+ data = parser.load(fp, **parser_args)
+ except:
+ logger.error("Error parsing the user's default "
+ "configuration file\n\t%s." % (cfg_file,))
+ self._options_stack[0].set_value(data)
def active_config(self):
return self._options_stack[-1]
diff --git a/pyomo/core/base/connector.py b/pyomo/core/base/connector.py
index 532a17620f9..94968de5666 100644
--- a/pyomo/core/base/connector.py
+++ b/pyomo/core/base/connector.py
@@ -152,7 +152,7 @@ def __new__(cls, *args, **kwds):
@deprecated(
"Use of pyomo.connectors is deprecated. "
"Its functionality has been replaced by pyomo.network.",
- version='TBD', remove_in='TBD',)
+ version='5.6.9')
def __init__(self, *args, **kwd):
kwd.setdefault('ctype', Connector)
self._rule = kwd.pop('rule', None)
@@ -285,7 +285,7 @@ class ConnectorExpander(Plugin):
@deprecated(
"Use of pyomo.connectors is deprecated. "
"Its functionality has been replaced by pyomo.network.",
- version='TBD', remove_in='TBD', )
+ version='5.6.9')
def apply(self, **kwds):
instance = kwds.pop('instance')
xform = TransformationFactory('core.expand_connectors')
diff --git a/pyomo/core/base/constraint.py b/pyomo/core/base/constraint.py
index 0be2a26acfb..7f748953619 100644
--- a/pyomo/core/base/constraint.py
+++ b/pyomo/core/base/constraint.py
@@ -33,7 +33,7 @@
_get_indexed_component_data_name, )
from pyomo.core.base.misc import (apply_indexed_rule,
tabular_writer)
-from pyomo.core.base.sets import Set
+from pyomo.core.base.set import Set
from six import StringIO, iteritems
@@ -1101,7 +1101,7 @@ class ConstraintList(IndexedConstraint):
def __init__(self, **kwargs):
"""Constructor"""
- args = (Set(),)
+ args = (Set(dimen=1),)
if 'expr' in kwargs:
raise ValueError(
"ConstraintList does not accept the 'expr' keyword")
@@ -1120,6 +1120,7 @@ def construct(self, data=None):
if self._constructed:
return
self._constructed=True
+ self.index_set().construct()
assert self._init_expr is None
_init_rule = self.rule
diff --git a/pyomo/core/base/external.py b/pyomo/core/base/external.py
index c23649eeddf..7fb001ab1bc 100644
--- a/pyomo/core/base/external.py
+++ b/pyomo/core/base/external.py
@@ -46,6 +46,8 @@ def __new__(cls, *args, **kwds):
return AMPLExternalFunction.__new__(AMPLExternalFunction)
def __init__(self, *args, **kwds):
+ self._units = kwds.pop('units', None)
+ self._arg_units = kwds.pop('arg_units', None)
kwds.setdefault('ctype', ExternalFunction)
Component.__init__(self, **kwds)
self._constructed = True
@@ -54,6 +56,14 @@ def __init__(self, *args, **kwds):
# block._add_temporary_set assumes ALL components define an
# index. Sigh.
self._index = None
+
+ def get_units(self):
+ """Return the units for this ExternalFunction"""
+ return self._units
+
+ def get_arg_units(self):
+ """Return the units for this ExternalFunctions arguments"""
+ return self._arg_units
def __call__(self, *args):
args_ = []
@@ -192,12 +202,12 @@ def __init__(self, *args, **kwds):
"single positional positional arguments" )
if not args:
self._fcn = kwds.pop('function')
- if kwds:
- raise ValueError(
- "PythonCallbackFunction constructor does not support "
- "keyword arguments" )
+
self._library = 'pyomo_ampl.so'
self._function = 'pyomo_socket_server'
+ arg_units = kwds.get('arg_units', None)
+ if arg_units is not None:
+ kwds['arg_units'] = [None]+list(arg_units)
ExternalFunction.__init__(self, *args, **kwds)
self._fcn_id = PythonCallbackFunction.register_instance(self)
diff --git a/pyomo/core/base/global_set.py b/pyomo/core/base/global_set.py
new file mode 100644
index 00000000000..f335b129a73
--- /dev/null
+++ b/pyomo/core/base/global_set.py
@@ -0,0 +1,67 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+GlobalSets = {}
+def _get_global_set(name):
+ return GlobalSets[name]
+_get_global_set.__safe_for_unpickling__ = True
+
+class GlobalSetBase(object):
+ """The base class for all Global sets"""
+ __slots__ = ()
+
+ def __reduce__(self):
+ # Cause pickle to preserve references to this object
+ return _get_global_set, (self.local_name,)
+
+ def __deepcopy__(self, memo):
+ # Prevent deepcopy from duplicating this object
+ return self
+
+ def __str__(self):
+ # Override str() to always print out the global set name
+ return self.name
+
+# FIXME: This mocks up part of the Set API until we can break up the set
+# module to resolve circular dependencies and can make this a proper
+# GlobalSet (Scalar IndexedComponent objects are indexed by
+# UnindexedComponent_set, but we would like UnindexedComponent_set to be
+# a proper scalar IndexedComponent).
+#
+#UnindexedComponent_set = set([None])
+class _UnindexedComponent_set(GlobalSetBase):
+ def __init__(self, name):
+ self.name = name
+ def __contains__(self, val):
+ return val is None
+ def get(self, value, default):
+ if value is None:
+ return value
+ return default
+ def __iter__(self):
+ return (None,).__iter__()
+ def subsets(self):
+ return [ self ]
+ def construct(self):
+ pass
+ def __len__(self):
+ return 1
+ def __eq__(self, other):
+ return self is other
+ def __ne__(self, other):
+ return self is not other
+ def isdiscrete(self):
+ return True
+ def isfinite(self):
+ return True
+ def isordered(self):
+ # As this set only has a single element, it is implicitly "ordered"
+ return True
+UnindexedComponent_set = _UnindexedComponent_set('UnindexedComponent_set')
diff --git a/pyomo/core/base/indexed_component.py b/pyomo/core/base/indexed_component.py
index 2bc9e3f0f77..97c26e7c87b 100644
--- a/pyomo/core/base/indexed_component.py
+++ b/pyomo/core/base/indexed_component.py
@@ -14,9 +14,10 @@
from pyomo.core.expr.expr_errors import TemplateExpressionError
from pyomo.core.expr.numvalue import native_types
-from pyomo.core.base.indexed_component_slice import _IndexedComponent_slice
+from pyomo.core.base.indexed_component_slice import IndexedComponent_slice
from pyomo.core.base.component import Component, ActiveComponent
from pyomo.core.base.config import PyomoOptions
+from pyomo.core.base.global_set import UnindexedComponent_set
from pyomo.common import DeveloperError
from six import PY3, itervalues, iteritems, string_types
@@ -26,9 +27,6 @@
else:
from collections import Sequence as collections_Sequence
-
-UnindexedComponent_set = set([None])
-
sequence_types = {tuple, list}
def normalize_index(x):
"""Normalize a component index.
@@ -51,21 +49,13 @@ def normalize_index(x):
# Note that casting a tuple to a tuple is cheap (no copy, no
# new object)
x = tuple(x)
- elif hasattr(x, '__iter__') and isinstance(x, collections_Sequence):
- if isinstance(x, string_types):
- # This is very difficult to get to: it would require a user
- # creating a custom derived string type
- return x
- sequence_types.add(x.__class__)
- x = tuple(x)
else:
- return x
+ x = (x,)
x_len = len(x)
i = 0
while i < x_len:
- _xi = x[i]
- _xi_class = _xi.__class__
+ _xi_class = x[i].__class__
if _xi_class in native_types:
i += 1
elif _xi_class in sequence_types:
@@ -73,10 +63,11 @@ def normalize_index(x):
# Note that casting a tuple to a tuple is cheap (no copy, no
# new object)
x = x[:i] + tuple(x[i]) + x[i + 1:]
- elif _xi_class is not tuple and isinstance(_xi, collections_Sequence):
- if isinstance(_xi, string_types):
+ elif issubclass(_xi_class, collections_Sequence):
+ if issubclass(_xi_class, string_types):
# This is very difficult to get to: it would require a
# user creating a custom derived string type
+ native_types.add(_xi_class)
i += 1
else:
sequence_types.add(_xi_class)
@@ -95,6 +86,8 @@ def normalize_index(x):
class _NotFound(object):
pass
+class _NotSpecified(object):
+ pass
#
# Get the fully-qualified name for this index. If there isn't anything
@@ -183,7 +176,7 @@ class IndexedComponent(Component):
_DEFAULT_INDEX_CHECKING_ENABLED = True
def __init__(self, *args, **kwds):
- from pyomo.core.base.sets import process_setarg
+ from pyomo.core.base.set import process_setarg
#
kwds.pop('noruleinit', None)
Component.__init__(self, **kwds)
@@ -265,7 +258,7 @@ def dim(self):
"""Return the dimension of the index"""
if not self.is_indexed():
return 0
- return getattr(self._index, 'dimen', 0)
+ return self._index.dimen
def __len__(self):
"""
@@ -281,7 +274,7 @@ def __contains__(self, idx):
def __iter__(self):
"""Iterate over the keys in the dictionary"""
- if not getattr(self._index, 'concrete', True):
+ if hasattr(self._index, 'isfinite') and not self._index.isfinite():
#
# If the index set is virtual (e.g., Any) then return the
# data iterator. Note that since we cannot check the length
@@ -316,7 +309,7 @@ def __iter__(self):
where it is empty.
""" % (self.name,) )
- if not hasattr(self._index, 'ordered') or not self._index.ordered:
+ if not hasattr(self._index, 'isordered') or not self._index.isordered():
#
# If the index set is not ordered, then return the
# data iterator. This is in an arbitrary order, which is
@@ -382,7 +375,7 @@ def __getitem__(self, index):
index = TypeError
if index is TypeError:
raise
- if index.__class__ is _IndexedComponent_slice:
+ if index.__class__ is IndexedComponent_slice:
return index
# The index could have contained constant but nonhashable
# objects (e.g., scalar immutable Params).
@@ -408,7 +401,7 @@ def __getitem__(self, index):
# _processUnhashableIndex could have found a slice, or
# _validate could have found an Ellipsis and returned a
# slicer
- if index.__class__ is _IndexedComponent_slice:
+ if index.__class__ is IndexedComponent_slice:
return index
obj = self._data.get(index, _NotFound)
#
@@ -445,7 +438,7 @@ def __setitem__(self, index, val):
# If we didn't find the index in the data, then we need to
# validate it against the underlying set (as long as
# _processUnhashableIndex didn't return a slicer)
- if index.__class__ is not _IndexedComponent_slice:
+ if index.__class__ is not IndexedComponent_slice:
index = self._validate_index(index)
else:
return self._setitem_impl(index, obj, val)
@@ -454,10 +447,10 @@ def __setitem__(self, index, val):
# dictionary and set the value
#
# Note that we need to RECHECK the class against
- # _IndexedComponent_slice, as _validate_index could have found
+ # IndexedComponent_slice, as _validate_index could have found
# an Ellipsis (which is hashable) and returned a slicer
#
- if index.__class__ is _IndexedComponent_slice:
+ if index.__class__ is IndexedComponent_slice:
# support "m.x[:,1] = 5" through a simple recursive call.
#
# Assert that this slice was just generated
@@ -487,11 +480,11 @@ def __delitem__(self, index):
index = self._processUnhashableIndex(index)
if obj is _NotFound:
- if index.__class__ is not _IndexedComponent_slice:
+ if index.__class__ is not IndexedComponent_slice:
index = self._validate_index(index)
# this supports "del m.x[:,1]" through a simple recursive call
- if index.__class__ is _IndexedComponent_slice:
+ if index.__class__ is IndexedComponent_slice:
# Assert that this slice ws just generated
assert len(index._call_stack) == 1
# Make a copy of the slicer items *before* we start
@@ -525,13 +518,14 @@ def _validate_index(self, idx):
# This is only called through __{get,set,del}item__, which has
# already trapped unhashable objects.
- if idx in self._index:
+ validated_idx = self._index.get(idx, _NotFound)
+ if validated_idx is not _NotFound:
# If the index is in the underlying index set, then return it
# Note: This check is potentially expensive (e.g., when the
# indexing set is a complex set operation)!
- return idx
+ return validated_idx
- if idx.__class__ is _IndexedComponent_slice:
+ if idx.__class__ is IndexedComponent_slice:
return idx
if normalize_index.flatten:
@@ -555,7 +549,7 @@ def _validate_index(self, idx):
#
if not self.is_indexed():
raise KeyError(
- "Cannot treat the scalar component '%s'"
+ "Cannot treat the scalar component '%s' "
"as an indexed component" % ( self.name, ))
#
# Raise an exception
@@ -633,7 +627,7 @@ def _processUnhashableIndex(self, idx):
# templatized expression.
#
from pyomo.core.expr import current as EXPR
- return EXPR.GetItemExpression(tuple(idx), self)
+ return EXPR.GetItemExpression((self,) + tuple(idx))
except EXPR.NonConstantExpressionError:
#
@@ -672,7 +666,7 @@ def _processUnhashableIndex(self, idx):
fixed[i - len(idx)] = val
if sliced or ellipsis is not None:
- return _IndexedComponent_slice(self, fixed, sliced, ellipsis)
+ return IndexedComponent_slice(self, fixed, sliced, ellipsis)
elif _found_numeric:
if len(idx) == 1:
return fixed[0]
@@ -712,7 +706,7 @@ def _setitem_impl(self, index, obj, value):
obj.set_value(value)
return obj
- def _setitem_when_not_present(self, index, value):
+ def _setitem_when_not_present(self, index, value=_NotSpecified):
"""Perform the fundamental component item creation and storage.
Components that want to implement a nonstandard storage mechanism
@@ -729,11 +723,12 @@ def _setitem_when_not_present(self, index, value):
else:
obj = self._data[index] = self._ComponentDataClass(component=self)
try:
- obj.set_value(value)
- return obj
+ if value is not _NotSpecified:
+ obj.set_value(value)
except:
del self._data[index]
raise
+ return obj
def set_value(self, value):
"""Set the value of a scalar component."""
diff --git a/pyomo/core/base/indexed_component_slice.py b/pyomo/core/base/indexed_component_slice.py
index 5c7d99e9ae1..76e9e3b8dec 100644
--- a/pyomo/core/base/indexed_component_slice.py
+++ b/pyomo/core/base/indexed_component_slice.py
@@ -8,10 +8,10 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import copy
-from six import PY3, iteritems, advance_iterator
+from six import PY3, iteritems, iterkeys, advance_iterator
from pyomo.common import DeveloperError
-class _IndexedComponent_slice(object):
+class IndexedComponent_slice(object):
"""Special class for slicing through hierarchical component trees
The basic concept is to interrupt the normal slice generation
@@ -23,29 +23,87 @@ class _IndexedComponent_slice(object):
calls to __getitem__ / __getattr__ / __call__ happen *before* the
call to __iter__()
"""
+ ATTR_MASK = 4
+ ITEM_MASK = 8
+ CALL_MASK = 16
+
slice_info = 0
- get_attribute = 1
- set_attribute = 4
- del_attribute = 7
- get_item = 2
- set_item = 5
- del_item = 6
- call = 3
-
- def __init__(self, component, fixed, sliced, ellipsis):
+ get_attribute = ATTR_MASK | 1
+ set_attribute = ATTR_MASK | 2
+ del_attribute = ATTR_MASK | 3
+ get_item = ITEM_MASK | 1
+ set_item = ITEM_MASK | 2
+ del_item = ITEM_MASK | 3
+ call = CALL_MASK
+
+ def __init__(self, component, fixed=None, sliced=None, ellipsis=None):
+ """A "slice" over an _IndexedComponent hierarchy
+
+ This class has two forms for the constructor. The first form is
+ the standard constructor that takes a base component and
+ indexing information. This form takes
+
+ IndexedComponent_slice(component, fixed, sliced, ellipsis)
+
+ The second form is a "copy constructor" that is used internally
+ when building up the "call stack" for the hierarchical slice. The
+ copy constructor takes an IndexedComponent_slice and an
+ optional "next term" in the slice construction (from get/set/del
+ item/attr or call):
+
+ IndexedComponent_slice(slice, next_term=None)
+
+ Parameters
+ ----------
+ component: IndexedComponent
+ The base component for this slice
+
+ fixed: dict
+ A dictionary indicating the fixed indices of component,
+ mapping index position to value
+
+ sliced: dict
+ A dictionary indicating the sliced indices of component
+ mapping the index position to the (python) slice object
+
+ ellipsis: int
+ The position of the ellipsis in the initial component slice
+
+ """
# Note that because we use a custom __setattr__, we need to
# define actual instance attributes using the base class
# __setattr__.
- set_attr = super(_IndexedComponent_slice, self).__setattr__
-
- set_attr('_call_stack', [
- (_IndexedComponent_slice.slice_info,
- (component, fixed, sliced, ellipsis)) ])
- # Since this is an object, users may change these flags between
- # where they declare the slice and iterate over it.
- set_attr('call_errors_generate_exceptions', True)
- set_attr('key_errors_generate_exceptions', True)
- set_attr('attribute_errors_generate_exceptions', True)
+ set_attr = super(IndexedComponent_slice, self).__setattr__
+ if type(component) is IndexedComponent_slice:
+ # Copy constructor
+ _len = component._len
+ # For efficiency, we will only duplicate the call stack
+ # list if this instance is not point to the end of the list.
+ if _len == len(component._call_stack):
+ set_attr('_call_stack', component._call_stack)
+ else:
+ set_attr('_call_stack', component._call_stack[:_len])
+ set_attr('_len', _len)
+ if fixed is not None:
+ self._call_stack.append(fixed)
+ self._len += 1
+ set_attr('call_errors_generate_exceptions',
+ component.call_errors_generate_exceptions)
+ set_attr('key_errors_generate_exceptions',
+ component.key_errors_generate_exceptions)
+ set_attr('attribute_errors_generate_exceptions',
+ component.attribute_errors_generate_exceptions)
+ else:
+ # Normal constructor
+ set_attr('_call_stack', [
+ (IndexedComponent_slice.slice_info,
+ (component, fixed, sliced, ellipsis)) ])
+ set_attr('_len', 1)
+ # Since this is an object, users may change these flags
+ # between where they declare the slice and iterate over it.
+ set_attr('call_errors_generate_exceptions', True)
+ set_attr('key_errors_generate_exceptions', True)
+ set_attr('attribute_errors_generate_exceptions', True)
def __getstate__(self):
"""Serialize this object.
@@ -59,7 +117,7 @@ def __getstate__(self):
def __setstate__(self, state):
"""Deserialize the state into this object. """
- set_attr = super(_IndexedComponent_slice, self).__setattr__
+ set_attr = super(IndexedComponent_slice, self).__setattr__
for k,v in iteritems(state):
set_attr(k,v)
@@ -77,12 +135,11 @@ def __getattr__(self, name):
"""Override the "." operator to defer resolution until iteration.
Creating a slice of a component returns a
- _IndexedComponent_slice object. Subsequent attempts to resolve
+ IndexedComponent_slice object. Subsequent attempts to resolve
attributes hit this method.
"""
- self._call_stack.append( (
- _IndexedComponent_slice.get_attribute, name ) )
- return self
+ return IndexedComponent_slice(self, (
+ IndexedComponent_slice.get_attribute, name ) )
def __setattr__(self, name, value):
"""Override the "." operator implementing attribute assignment
@@ -95,24 +152,23 @@ def __setattr__(self, name, value):
"""
# Don't overload any pre-existing attributes
if name in self.__dict__:
- return super(_IndexedComponent_slice, self).__setattr__(name,value)
+ return super(IndexedComponent_slice, self).__setattr__(name,value)
- self._call_stack.append( (
- _IndexedComponent_slice.set_attribute, name, value ) )
# Immediately evaluate the slice and set the attributes
- for i in self: pass
+ for i in IndexedComponent_slice(self, (
+ IndexedComponent_slice.set_attribute, name, value ) ):
+ pass
return None
def __getitem__(self, idx):
"""Override the "[]" operator to defer resolution until iteration.
Creating a slice of a component returns a
- _IndexedComponent_slice object. Subsequent attempts to query
+ IndexedComponent_slice object. Subsequent attempts to query
items hit this method.
"""
- self._call_stack.append( (
- _IndexedComponent_slice.get_item, idx ) )
- return self
+ return IndexedComponent_slice(self, (
+ IndexedComponent_slice.get_item, idx ) )
def __setitem__(self, idx, val):
"""Override the "[]" operator for setting item values.
@@ -123,10 +179,10 @@ def __setitem__(self, idx, val):
and immediately evaluates the slice.
"""
- self._call_stack.append( (
- _IndexedComponent_slice.set_item, idx, val ) )
# Immediately evaluate the slice and set the attributes
- for i in self: pass
+ for i in IndexedComponent_slice(self, (
+ IndexedComponent_slice.set_item, idx, val ) ):
+ pass
return None
def __delitem__(self, idx):
@@ -138,16 +194,16 @@ def __delitem__(self, idx):
and immediately evaluates the slice.
"""
- self._call_stack.append( (
- _IndexedComponent_slice.del_item, idx ) )
# Immediately evaluate the slice and set the attributes
- for i in self: pass
+ for i in IndexedComponent_slice(self, (
+ IndexedComponent_slice.del_item, idx ) ):
+ pass
return None
def __call__(self, *idx, **kwds):
"""Special handling of the "()" operator for component slices.
- Creating a slice of a component returns a _IndexedComponent_slice
+ Creating a slice of a component returns a IndexedComponent_slice
object. Subsequent attempts to call items hit this method. We
handle the __call__ method separately based on the item (identifier
immediately before the "()") being called:
@@ -164,28 +220,39 @@ def __call__(self, *idx, **kwds):
# called after retrieving an attribute that will be called. I
# don't know why that happens, but we will trap it here and
# remove the getattr(__name__) from the call stack.
- if self._call_stack[-1][0] == _IndexedComponent_slice.get_attribute \
- and self._call_stack[-1][1] == '__name__':
- self._call_stack.pop()
-
- self._call_stack.append( (
- _IndexedComponent_slice.call, idx, kwds ) )
- if self._call_stack[-2][1] == 'component':
- return self
+ _len = self._len
+ if self._call_stack[_len-1][0] == IndexedComponent_slice.get_attribute \
+ and self._call_stack[_len-1][1] == '__name__':
+ self._len -= 1
+
+ ans = IndexedComponent_slice(self, (
+ IndexedComponent_slice.call, idx, kwds ) )
+ # Because we just duplicated the slice and added a new entry, we
+ # know that the _len == len(_call_stack)
+ if ans._call_stack[-2][1] == 'component':
+ return ans
else:
# Note: simply calling "list(self)" results in infinite
# recursion in python2.6
- return list( i for i in self )
+ return list( i for i in ans )
+
+ def __hash__(self):
+ return hash(tuple(_freeze(x) for x in self._call_stack[:self._len]))
+
+ def __eq__(self, other):
+ if other is self:
+ return True
+ if type(other) is not IndexedComponent_slice:
+ return False
+ return tuple(_freeze(x) for x in self._call_stack[:self._len]) \
+ == tuple(_freeze(x) for x in other._call_stack[:other._len])
+
+ def __ne__(self, other):
+ return not self.__eq__(other)
def duplicate(self):
- ans = _IndexedComponent_slice(None,None,None,None)
- ans.call_errors_generate_exceptions \
- = self.call_errors_generate_exceptions
- ans.key_errors_generate_exceptions \
- = self.key_errors_generate_exceptions
- ans.attribute_errors_generate_exceptions \
- = self.attribute_errors_generate_exceptions
- ans._call_stack = list(self._call_stack)
+ ans = IndexedComponent_slice(self)
+ ans._call_stack = ans._call_stack[:ans._len]
return ans
def index_wildcard_keys(self):
@@ -209,6 +276,27 @@ def expanded_items(self):
return ((_iter.get_last_index(), _) for _ in _iter)
+def _freeze(info):
+ if info[0] == IndexedComponent_slice.slice_info:
+ return (
+ info[0],
+ id(info[1][0]), # id of the Component
+ tuple(iteritems(info[1][1])), # {idx: value} for fixed
+ tuple(iterkeys(info[1][2])), # {idx: slice} for slices
+ info[1][3] # elipsis index
+ )
+ elif info[0] & IndexedComponent_slice.ITEM_MASK:
+ return (
+ info[0],
+ tuple( (x.start,x.stop,x.step) if type(x) is slice else x
+ for x in info[1] ),
+ info[2:],
+ )
+ else:
+ return info
+
+
+
class _slice_generator(object):
"""Utility (iterator) for generating the elements of one slice
@@ -270,6 +358,8 @@ def __next__(self):
else:
return None
+# Backwards compatibility
+_IndexedComponent_slice = IndexedComponent_slice
# Mock up a callable object with a "check_complete" method
def _advance_iter(_iter):
@@ -293,12 +383,13 @@ def __init__(self, component_slice, advance_iter=_advance_iter,
self.advance_iter = advance_iter
self._iter_over_index = iter_over_index
call_stack = self._slice._call_stack
- self._iter_stack = [None]*len(call_stack)
- if call_stack[0][0] == _IndexedComponent_slice.slice_info:
+ call_stack_len = self._slice._len
+ self._iter_stack = [None]*call_stack_len
+ if call_stack[0][0] == IndexedComponent_slice.slice_info:
self._iter_stack[0] = _slice_generator(
*call_stack[0][1], iter_over_index=self._iter_over_index)
- elif call_stack[0][0] == _IndexedComponent_slice.set_item:
- assert len(call_stack) == 1
+ elif call_stack[0][0] == IndexedComponent_slice.set_item:
+ assert call_stack_len == 1
# defer creating the iterator until later
self._iter_stack[0] = _NotIterable # Something not None
else:
@@ -338,9 +429,9 @@ def __next__(self):
idx -= 1
continue
# Walk down the hierarchy to get to the final object
- while idx < len(self._slice._call_stack):
+ while idx < self._slice._len:
_call = self._slice._call_stack[idx]
- if _call[0] == _IndexedComponent_slice.get_attribute:
+ if _call[0] == IndexedComponent_slice.get_attribute:
try:
_comp = getattr(_comp, _call[1])
except AttributeError:
@@ -352,7 +443,7 @@ def __next__(self):
and not self._iter_over_index:
raise
break
- elif _call[0] == _IndexedComponent_slice.get_item:
+ elif _call[0] == IndexedComponent_slice.get_item:
try:
_comp = _comp.__getitem__( _call[1] )
except KeyError:
@@ -365,12 +456,12 @@ def __next__(self):
and not self._iter_over_index:
raise
break
- if _comp.__class__ is _IndexedComponent_slice:
+ if _comp.__class__ is IndexedComponent_slice:
# Extract the _slice_generator (for
# efficiency... these are always 1-level slices,
# so we don't need the overhead of the
- # _IndexedComponent_slice object)
- assert len(_comp._call_stack) == 1
+ # IndexedComponent_slice object)
+ assert _comp._len == 1
self._iter_stack[idx] = _slice_generator(
*_comp._call_stack[0][1],
iter_over_index=self._iter_over_index
@@ -387,7 +478,7 @@ def __next__(self):
break
else:
self._iter_stack[idx] = None
- elif _call[0] == _IndexedComponent_slice.call:
+ elif _call[0] == IndexedComponent_slice.call:
try:
_comp = _comp( *(_call[1]), **(_call[2]) )
except:
@@ -400,8 +491,8 @@ def __next__(self):
and not self._iter_over_index:
raise
break
- elif _call[0] == _IndexedComponent_slice.set_attribute:
- assert idx == len(self._slice._call_stack) - 1
+ elif _call[0] == IndexedComponent_slice.set_attribute:
+ assert idx == self._slice._len - 1
try:
_comp = setattr(_comp, _call[1], _call[2])
except AttributeError:
@@ -412,8 +503,8 @@ def __next__(self):
if self._slice.attribute_errors_generate_exceptions:
raise
break
- elif _call[0] == _IndexedComponent_slice.set_item:
- assert idx == len(self._slice._call_stack) - 1
+ elif _call[0] == IndexedComponent_slice.set_item:
+ assert idx == self._slice._len - 1
# We have a somewhat unusual situation when someone
# makes a _ReferenceDict to m.x[:] and then wants to
# set one of the attributes. In that situation,
@@ -455,9 +546,9 @@ def __next__(self):
and not self._iter_over_index:
raise
break
- if _tmp.__class__ is _IndexedComponent_slice:
+ if _tmp.__class__ is IndexedComponent_slice:
# Extract the _slice_generator and evaluate it.
- assert len(_tmp._call_stack) == 1
+ assert _tmp._len == 1
_iter = _IndexedComponent_slice_iter(
_tmp, self.advance_iter)
for _ in _iter:
@@ -472,8 +563,8 @@ def __next__(self):
self.advance_iter.check_complete()
# No try-catch, since we know this key is valid
_comp[_call[1]] = _call[2]
- elif _call[0] == _IndexedComponent_slice.del_item:
- assert idx == len(self._slice._call_stack) - 1
+ elif _call[0] == IndexedComponent_slice.del_item:
+ assert idx == self._slice._len - 1
# The problem here is that _call[1] may be a slice.
# If it is, but we are in something like a
# _ReferenceDict, where the caller actually wants a
@@ -494,9 +585,9 @@ def __next__(self):
if self._slice.key_errors_generate_exceptions:
raise
break
- if _tmp.__class__ is _IndexedComponent_slice:
+ if _tmp.__class__ is IndexedComponent_slice:
# Extract the _slice_generator and evaluate it.
- assert len(_tmp._call_stack) == 1
+ assert _tmp._len == 1
_iter = _IndexedComponent_slice_iter(
_tmp, self.advance_iter)
_idx_to_del = []
@@ -513,8 +604,8 @@ def __next__(self):
else:
# No try-catch, since we know this key is valid
del _comp[_call[1]]
- elif _call[0] == _IndexedComponent_slice.del_attribute:
- assert idx == len(self._slice._call_stack) - 1
+ elif _call[0] == IndexedComponent_slice.del_attribute:
+ assert idx == self._slice._len - 1
try:
_comp = delattr(_comp, _call[1])
except AttributeError:
@@ -527,11 +618,11 @@ def __next__(self):
break
else:
raise DeveloperError(
- "Unexpected entry in _IndexedComponent_slice "
+ "Unexpected entry in IndexedComponent_slice "
"_call_stack: %s" % (_call[0],))
idx += 1
- if idx == len(self._slice._call_stack):
+ if idx == self._slice._len:
# Check to make sure the custom iterator
# (i.e._fill_in_known_wildcards) is complete
self.advance_iter.check_complete()
diff --git a/pyomo/core/base/objective.py b/pyomo/core/base/objective.py
index 1c96157502d..3a90221181b 100644
--- a/pyomo/core/base/objective.py
+++ b/pyomo/core/base/objective.py
@@ -31,7 +31,7 @@
from pyomo.core.base.expression import (_ExpressionData,
_GeneralExpressionDataImpl)
from pyomo.core.base.misc import apply_indexed_rule, tabular_writer
-from pyomo.core.base.sets import Set
+from pyomo.core.base.set import Set
from pyomo.core.base import minimize, maximize
from six import iteritems
@@ -614,7 +614,7 @@ class ObjectiveList(IndexedObjective):
def __init__(self, **kwargs):
"""Constructor"""
- args = (Set(),)
+ args = (Set(dimen=1),)
if 'expr' in kwargs:
raise ValueError(
"ObjectiveList does not accept the 'expr' keyword")
@@ -633,6 +633,7 @@ def construct(self, data=None):
if self._constructed:
return
self._constructed=True
+ self.index_set().construct()
assert self._init_expr is None
_init_rule = self.rule
diff --git a/pyomo/core/base/param.py b/pyomo/core/base/param.py
index 4023767ae42..67340c7e7f7 100644
--- a/pyomo/core/base/param.py
+++ b/pyomo/core/base/param.py
@@ -15,6 +15,7 @@
import logging
from weakref import ref as weakref_ref
+from pyomo.common.deprecation import deprecation_warning
from pyomo.common.modeling import NoArgumentGiven
from pyomo.common.timing import ConstructionTimer
from pyomo.core.base.plugin import ModelComponentFactory
@@ -23,7 +24,7 @@
UnindexedComponent_set
from pyomo.core.base.misc import apply_indexed_rule, apply_parameterized_indexed_rule
from pyomo.core.base.numvalue import NumericValue, native_types, value
-from pyomo.core.base.set_types import Any
+from pyomo.core.base.set_types import Any, Reals
from six import iteritems, iterkeys, next, itervalues
@@ -41,6 +42,46 @@ def _raise_modifying_immutable_error(obj, index):
"declare the parameter as mutable [i.e., Param(mutable=True)]"
% (name,))
+class _ImplicitAny(Any.__class__):
+ """An Any that issues a deprecation warning for non-Real values.
+
+ This is a helper class to implement the deprecation warnings for the
+ change of Param's implicit domain from Any to Reals.
+
+ """
+ def __new__(cls, **kwds):
+ return super(_ImplicitAny, cls).__new__(cls)
+
+ def __init__(self, owner, **kwds):
+ super(_ImplicitAny, self).__init__(**kwds)
+ self._owner = weakref_ref(owner)
+ self._component = weakref_ref(self)
+ self.construct()
+
+ def __getstate__(self):
+ state = super(_ImplicitAny, self).__getstate__()
+ state['_owner'] = None if self._owner is None else self._owner()
+ return state
+
+ def __setstate__(self, state):
+ _owner = state.pop('_owner')
+ super(_ImplicitAny, self).__setstate__(state)
+ self._owner = None if _owner is None else weakref_ref(_owner)
+
+ def __deepcopy__(self, memo):
+ return super(Any.__class__, self).__deepcopy__(memo)
+
+ def __contains__(self, val):
+ if val not in Reals:
+ deprecation_warning(
+ "The default domain for Param objects is 'Any'. However, "
+ "we will be changing that default to 'Reals' in the "
+ "future. If you really intend the domain of this Param (%s) "
+ "to be 'Any', you can suppress this warning by explicitly "
+ "specifying 'within=Any' to the Param constructor."
+ % ('Unknown' if self._owner is None else self._owner().name,),
+ version='5.6.9', remove_in='6.0')
+ return True
class _NotValid(object):
"""A dummy type that is pickle-safe that we can use as the default
@@ -125,6 +166,9 @@ def value(self, val):
"""Set the value for this variable."""
self.set_value(val)
+ def get_units(self):
+ """Return the units for this ParamData"""
+ return self.parent_component()._units
def is_fixed(self):
"""
@@ -144,22 +188,6 @@ def is_parameter_type(self):
"""
return True
- def is_variable_type(self):
- """
- Returns False because this is not a variable object.
- """
- return False
-
- def is_expression_type(self):
- """Returns False because this is not an expression"""
- return False
-
- def is_potentially_variable(self):
- """
- Returns False because this object can never reference variables.
- """
- return False
-
def _compute_polynomial_degree(self, result):
"""
Returns 0 because this object can never reference variables.
@@ -197,6 +225,8 @@ class Param(IndexedComponent):
initialize
A dictionary or rule for setting up this parameter with existing
model data
+ unit: pyomo unit expression
+ An expression containing the units for the parameter
"""
DefaultMutable = False
@@ -213,18 +243,21 @@ def __init__(self, *args, **kwd):
self._rule = kwd.pop('rule', _NotValid )
self._rule = kwd.pop('initialize', self._rule )
self._validate = kwd.pop('validate', None )
- self.domain = kwd.pop('domain', Any )
+ self.domain = kwd.pop('domain', None )
self.domain = kwd.pop('within', self.domain )
self._mutable = kwd.pop('mutable', Param.DefaultMutable )
self._default_val = kwd.pop('default', _NotValid )
self._dense_initialize = kwd.pop('initialize_as_dense', False)
+ self._units = kwd.pop('units', None)
+ if self._units is not None:
+ self._mutable = True
#
if 'repn' in kwd:
logger.error(
"The 'repn' keyword is not a validate keyword argument for Param")
#
if self.domain is None:
- self.domain = Any
+ self.domain = _ImplicitAny(owner=self, name='Any')
#
kwd.setdefault('ctype', Param)
IndexedComponent.__init__(self, *args, **kwd)
@@ -257,10 +290,6 @@ def __iter__(self):
return self._data.__iter__()
return self._index.__iter__()
- def is_expression_type(self):
- """Returns False because this is not an expression"""
- return False
-
#
# These are "sparse equivalent" access / iteration methods that
# only loop over the defined data.
@@ -598,6 +627,7 @@ def _setitem_when_not_present(self, index, value, _check_domain=True):
return value
except:
del self._data[index]
+ raise
def _validate_value(self, index, value, validate_domain=True):
@@ -991,7 +1021,10 @@ def is_constant(self):
"""
return self._constructed and not self._mutable
-
+ def get_units(self):
+ """Return the units expression for this parameter"""
+ return self._units
+
class IndexedParam(Param):
def __call__(self, exception=True):
diff --git a/pyomo/core/base/piecewise.py b/pyomo/core/base/piecewise.py
index 59a12aba9df..193e12f7b20 100644
--- a/pyomo/core/base/piecewise.py
+++ b/pyomo/core/base/piecewise.py
@@ -43,8 +43,8 @@
import itertools
import operator
import types
+import enum
-from pyutilib.enum import Enum
from pyutilib.misc import flatten_tuple
from pyomo.common.timing import ConstructionTimer
@@ -61,19 +61,21 @@
logger = logging.getLogger('pyomo.core')
-PWRepn = Enum('SOS2',
- 'BIGM_BIN',
- 'BIGM_SOS1',
- 'CC',
- 'DCC',
- 'DLOG',
- 'LOG',
- 'MC',
- 'INC')
-
-Bound = Enum('Lower',
- 'Upper',
- 'Equal')
+class PWRepn(str, enum.Enum):
+ SOS2 = 'SOS2'
+ BIGM_BIN = 'BIGM_BIN'
+ BIGM_SOS1 = 'BIGM_SOS1'
+ CC = 'CC'
+ DCC = 'DCC'
+ DLOG = 'DLOG'
+ LOG = 'LOG'
+ MC = 'MC'
+ INC = 'INC'
+
+class Bound(str, enum.Enum):
+ Lower = 'Lower'
+ Upper = 'Upper'
+ Equal = 'Equal'
# BE SURE TO CHANGE THE PIECWISE DOCSTRING
# IF THIS GETS CHANGED
diff --git a/pyomo/core/base/range.py b/pyomo/core/base/range.py
index 27c6874f173..5a4d0dbd666 100644
--- a/pyomo/core/base/range.py
+++ b/pyomo/core/base/range.py
@@ -27,6 +27,8 @@ def remainder(a,b):
ans -= b
return ans
+_inf = float('inf')
+
class RangeDifferenceError(ValueError): pass
class NumericRange(object):
@@ -69,6 +71,10 @@ def __init__(self, start, end, step, closed=(True,True)):
raise ValueError(
"NumericRange step must be int (got %s)" % (step,))
step = int(step)
+ if start == -_inf:
+ start = None
+ if end == _inf:
+ end = None
if start is None:
if step:
raise ValueError("NumericRange: start must not be None "
diff --git a/pyomo/core/base/rangeset.py b/pyomo/core/base/rangeset.py
index 64070534c46..bae47cb1477 100644
--- a/pyomo/core/base/rangeset.py
+++ b/pyomo/core/base/rangeset.py
@@ -10,210 +10,10 @@
__all__ = ['RangeSet']
-import logging
-import math
-from six.moves import xrange
-
-from pyomo.common.timing import ConstructionTimer
-from pyomo.core.expr.numvalue import value
-from pyomo.core.base.sets import OrderedSimpleSet
-from pyomo.core.base.set_types import Integers, Reals
-from pyomo.core.base.misc import apply_indexed_rule
-from pyomo.core.base.plugin import ModelComponentFactory
-
-logger = logging.getLogger('pyomo.core')
-
-
-@ModelComponentFactory.register("A sequence of numeric values. RangeSet(start,end,step) is a sequence starting a value 'start', and increasing in values by 'step' until a value greater than or equal to 'end' is reached.")
-class RangeSet(OrderedSimpleSet):
- """
- A set that represents a list of numeric values.
- """
-
- def __init__(self, *args, **kwds):
- """
- Construct a list of integers
- """
- if len(args) == 0:
- raise RuntimeError("Attempting to construct a RangeSet object with no arguments!")
- super(RangeSet, self).__init__(**kwds)
- self._type=RangeSet
- #
- if len(args) == 1:
- #
- # RangeSet(end) generates the set: 1 ... end
- #
- self._start=1
- self._end=args[0]
- self._step=1
- elif len(args) == 2:
- #
- # RangeSet(start,end) generates the set: start ... end
- #
- self._start=args[0]
- self._end=args[1]
- self._step=1
- else:
- #
- # RangeSet(start,end,step) generates the set: start, start+step, start+2*step, ... end
- #
- self._start=args[0]
- self._end=args[1]
- self._step=args[2]
- #
- self.ordered = True # This is an ordered set
- self.value = None # No internal set data
- self.virtual = True # This is a virtual set
- self.concrete = True # This is a concrete set
- self._len = 0 # This is set by the construct() method
-
- def construct(self, values=None):
- """
- Initialize set data
- """
- if self._constructed:
- return
- timer = ConstructionTimer(self)
- self._constructed=True
- #
- # We call value() here for cases like Expressions, mutable
- # Params and the like
- #
- self._start_val = value(self._start)
- self._end_val = value(self._end)
- self._step_val = value(self._step)
- #
- # The set generates integer values if the starting value,
- # step and end value are all integers. Otherwise, the set
- # generates real values.
- #
- if type(self._start_val) is int and type(self._step) is int and type(self._end_val) is int:
- self.domain = Integers
- else:
- self.domain = Reals
- #
- # Compute the set length and upper bound
- #
- if self.filter is None and self.validate is None:
- #
- # Directly compute the number of elements in the set, from
- # which the upper-bound is computed.
- #
- self._len = int(math.floor((self._end_val-self._start_val+self._step_val+1e-7)//self._step_val))
- ub = self._start_val + (self._len-1)*self._step_val
- else:
- #
- # Iterate through the set to compute the upper bound
- # and number of elements.
- #
- ub = self._start_val
- ctr=0
- for i in self:
- ub = i
- ctr += 1
- self._len = ctr
- #
- # Set the bounds information
- #
- self._bounds = (self._start_val, ub)
- timer.report()
-
- def __len__(self):
- """
- Return the pre-computed set length
- """
- return self._len
-
- def __iter__(self):
- if not self._constructed:
- raise RuntimeError(
- "Cannot iterate over abstract RangeSet '%s' before it has "
- "been constructed (initialized)." % (self.name,) )
- if self.filter is None and self.validate is None:
- #
- # Iterate through all set elements
- #
- for i in xrange(self._len):
- yield self._start_val + i*self._step_val
- else:
- #
- # Iterate through all set elements and filter
- # and/or validate the element values.
- #
- for i in xrange(int((self._end_val-self._start_val+self._step_val+1e-7)//self._step_val)):
- val = self._start_val + i*self._step_val
- if not self.filter is None and not apply_indexed_rule(self, self.filter, self._parent(), val):
- continue
- if not self.validate is None and not apply_indexed_rule(self, self.validate, self._parent(), val):
- continue
- yield val
-
- def data(self):
- """The underlying set data."""
- return set(self)
-
- def first(self):
- """The first element is the lower bound"""
- return self._bounds[0]
-
- def last(self):
- """The last element is the upper bound"""
- return self._bounds[1]
-
- def member(self, key):
- """
- Return the value associated with this key.
- """
- logger.warning("DEPRECATED: The RangeSet method \"x.member(idx)\" "
- "is deprecated and will be removed in Pyomo 5.0. "
- "Use x[idx] instead.")
- return self.__getitem__(key)
-
- def __getitem__(self, key):
- """
- Return the value associated with this key. Valid
- index values are 1 .. len(set), or -1 .. -len(set).
- Negative key values index from the end of the set.
- """
- if key >= 1:
- if key > self._len:
- raise IndexError("Cannot index a RangeSet past the last element")
- return self._start_val + (key-1)*self._step_val
- elif key < 0:
- if self._len+key < 0:
- raise IndexError("Cannot index a RangeSet past the first element")
- return self._start_val + (self._len+key)*self._step_val
- else:
- raise IndexError("Valid index values for sets are 1 .. len(set) or -1 .. -len(set)")
-
- def _set_contains(self, element):
- """
- Test if the specified element in this set.
- """
- try:
- x = element - self._start_val
- if x % self._step_val != 0:
- #
- # If we are doing floating-point arithmetic, there is a
- # chance that we are seeing roundoff error...
- #
- if math.fabs((x + 1e-7) % self._step_val) > 2e-7:
- return False
- if element < self._bounds[0] or element > self._bounds[1]:
- return False
- except:
- #
- # This exception is triggered when type(element) is not int or float.
- #
- return False
- #
- # Now see if the element if filtered or invalid.
- #
- if self.filter is not None and not self.filter(element):
- return False
- if self.validate is not None and not self.validate(self, element):
- return False
- return True
-
-
+from .set import RangeSet
+from pyomo.common.deprecation import deprecation_warning
+deprecation_warning(
+ 'The pyomo.core.base.rangeset module is deprecated. '
+ 'Import RangeSet objects from pyomo.core.base.set or pyomo.core.',
+ version='TBD')
diff --git a/pyomo/core/base/reference.py b/pyomo/core/base/reference.py
index d63011b27f3..ec056da2411 100644
--- a/pyomo/core/base/reference.py
+++ b/pyomo/core/base/reference.py
@@ -10,13 +10,13 @@
from pyutilib.misc import flatten_tuple
from pyomo.common import DeveloperError
-from pyomo.core.base.sets import SetOf, _SetProduct, _SetDataBase
+from pyomo.core.base.set import SetOf, _SetDataBase
from pyomo.core.base.component import Component, ComponentData
from pyomo.core.base.indexed_component import (
IndexedComponent, UnindexedComponent_set
)
from pyomo.core.base.indexed_component_slice import (
- _IndexedComponent_slice, _IndexedComponent_slice_iter
+ IndexedComponent_slice, _IndexedComponent_slice_iter
)
import six
@@ -143,14 +143,14 @@ class _ReferenceDict(collections_MutableMapping):
"""A dict-like object whose values are defined by a slice.
This implements a dict-like object whose keys and values are defined
- by a component slice (:py:class:`_IndexedComponent_slice`). The
+ by a component slice (:py:class:`IndexedComponent_slice`). The
intent behind this object is to replace the normal ``_data``
:py:class:`dict` in :py:class:`IndexedComponent` containers to
create "reference" components.
Parameters
----------
- component_slice : :py:class:`_IndexedComponent_slice`
+ component_slice : :py:class:`IndexedComponent_slice`
The slice object that defines the "members" of this mutable mapping.
"""
def __init__(self, component_slice):
@@ -192,19 +192,19 @@ def __getitem__(self, key):
def __setitem__(self, key, val):
tmp = self._slice.duplicate()
op = tmp._call_stack[-1][0]
- if op == _IndexedComponent_slice.get_item:
+ if op == IndexedComponent_slice.get_item:
tmp._call_stack[-1] = (
- _IndexedComponent_slice.set_item,
+ IndexedComponent_slice.set_item,
tmp._call_stack[-1][1],
val )
- elif op == _IndexedComponent_slice.slice_info:
+ elif op == IndexedComponent_slice.slice_info:
tmp._call_stack[-1] = (
- _IndexedComponent_slice.set_item,
+ IndexedComponent_slice.set_item,
tmp._call_stack[-1][1],
val )
- elif op == _IndexedComponent_slice.get_attribute:
+ elif op == IndexedComponent_slice.get_attribute:
tmp._call_stack[-1] = (
- _IndexedComponent_slice.set_attribute,
+ IndexedComponent_slice.set_attribute,
tmp._call_stack[-1][1],
val )
else:
@@ -218,13 +218,13 @@ def __setitem__(self, key, val):
def __delitem__(self, key):
tmp = self._slice.duplicate()
op = tmp._call_stack[-1][0]
- if op == _IndexedComponent_slice.get_item:
+ if op == IndexedComponent_slice.get_item:
# If the last attribute of the slice gets an item,
# change it to delete the item
tmp._call_stack[-1] = (
- _IndexedComponent_slice.del_item,
+ IndexedComponent_slice.del_item,
tmp._call_stack[-1][1] )
- elif op == _IndexedComponent_slice.slice_info:
+ elif op == IndexedComponent_slice.slice_info:
assert len(tmp._call_stack) == 1
_iter = self._get_iter(tmp, key)
try:
@@ -233,11 +233,11 @@ def __delitem__(self, key):
return
except StopIteration:
raise KeyError("KeyError: %s" % (key,))
- elif op == _IndexedComponent_slice.get_attribute:
+ elif op == IndexedComponent_slice.get_attribute:
# If the last attribute of the slice retrieves an attribute,
# change it to delete the attribute
tmp._call_stack[-1] = (
- _IndexedComponent_slice.del_attribute,
+ IndexedComponent_slice.del_attribute,
tmp._call_stack[-1][1] )
else:
raise DeveloperError(
@@ -300,7 +300,7 @@ class _ReferenceSet(collections_Set):
"""A set-like object whose values are defined by a slice.
This implements a dict-like object whose members are defined by a
- component slice (:py:class:`_IndexedComponent_slice`).
+ component slice (:py:class:`IndexedComponent_slice`).
:py:class:`_ReferenceSet` differs from the
:py:class:`_ReferenceDict` above in that it looks in the underlying
component ``index_set()`` for values that match the slice, and not
@@ -308,7 +308,7 @@ class _ReferenceSet(collections_Set):
Parameters
----------
- component_slice : :py:class:`_IndexedComponent_slice`
+ component_slice : :py:class:`IndexedComponent_slice`
The slice object that defines the "members" of this set
"""
@@ -347,14 +347,6 @@ def _get_iter(self, _slice, key):
)
-def _get_base_sets(_set):
- if isinstance(_set, _SetProduct):
- for subset in _set.set_tuple:
- for _ in _get_base_sets(subset):
- yield _
- else:
- yield _set
-
def _identify_wildcard_sets(iter_stack, index):
# if we have already decided that there isn't a comon index for the
# slices, there is nothing more we can do. Bail.
@@ -368,7 +360,7 @@ def _identify_wildcard_sets(iter_stack, index):
if level is not None:
offset = 0
wildcard_sets = {}
- for j,s in enumerate(_get_base_sets(level.component.index_set())):
+ for j,s in enumerate(level.component.index_set().subsets()):
if s is UnindexedComponent_set:
wildcard_sets[j] = s
offset += 1
@@ -439,7 +431,7 @@ def Reference(reference, ctype=_NotSpecified):
Parameters
----------
- reference : :py:class:`_IndexedComponent_slice`
+ reference : :py:class:`IndexedComponent_slice`
component slice that defines the data to include in the
Reference component
@@ -514,7 +506,7 @@ def Reference(reference, ctype=_NotSpecified):
4 : 1 : 10 : None : False : False : Reals
"""
- if isinstance(reference, _IndexedComponent_slice):
+ if isinstance(reference, IndexedComponent_slice):
pass
elif isinstance(reference, Component):
reference = reference[...]
@@ -535,7 +527,7 @@ def Reference(reference, ctype=_NotSpecified):
ctypes = set((1,2))
index = []
for obj in _iter:
- ctypes.add(obj.type())
+ ctypes.add(obj.ctype)
if not isinstance(obj, ComponentData):
# This object is not a ComponentData (likely it is a pure
# IndexedComponent container). As the Reference will treat
diff --git a/pyomo/core/base/set.py b/pyomo/core/base/set.py
index 24098f796c0..8597c9d8d12 100644
--- a/pyomo/core/base/set.py
+++ b/pyomo/core/base/set.py
@@ -19,14 +19,13 @@
from six import iteritems, iterkeys
from six.moves import xrange
-from pyutilib.misc.misc import flatten_tuple
-
from pyomo.common.deprecation import deprecated, deprecation_warning
-from pyomo.common.errors import DeveloperError
+from pyomo.common.errors import DeveloperError, PyomoException
from pyomo.common.timing import ConstructionTimer
from pyomo.core.expr.numvalue import (
native_types, native_numeric_types, as_numeric, value,
)
+from pyomo.core.base.plugin import ModelComponentFactory
from pyomo.core.base.util import (
disable_methods, InitializerBase, Initializer, ConstantInitializer,
CountedCallInitializer, ItemInitializer, IndexedCallInitializer,
@@ -39,20 +38,78 @@
from pyomo.core.base.indexed_component import (
IndexedComponent, UnindexedComponent_set, normalize_index,
)
+from pyomo.core.base.global_set import (
+ GlobalSets, GlobalSetBase,
+)
from pyomo.core.base.misc import sorted_robust
if six.PY3:
from collections.abc import Sequence as collections_Sequence
+ def formatargspec(fn):
+ return str(inspect.signature(fn))
else:
from collections import Sequence as collections_Sequence
+ def formatargspec(fn):
+ return str(inspect.formatargspec(*inspect.getargspec(fn)))
logger = logging.getLogger('pyomo.core')
_prePython37 = sys.version_info[:2] < (3,7)
+_inf = float('inf')
+
FLATTEN_CROSS_PRODUCT = True
+"""Set objects
+
+Pyomo `Set` objects are designed to be "API-compatible" with Python
+`set` objects. However, not all Set objects implement the full `set`
+API (e.g., only finite discrete Sets support `add()`).
+
+All Sets implement one of the following APIs:
+
+0. `class _SetDataBase(ComponentData)`
+ *(pure virtual interface)*
+
+1. `class _SetData(_SetDataBase)`
+ *(base class for all AML Sets)*
+
+2. `class _FiniteSetMixin(object)`
+ *(pure virtual interface, adds support for discrete/iterable sets)*
+
+4. `class _OrderedSetMixin(object)`
+ *(pure virtual interface, adds support for ordered Sets)*
+
+This is a bit of a change from python set objects. First, the
+lowest-level (non-abstract) Data object supports infinite sets; that is,
+sets that contain an infinite number of values (this includes both
+bounded continuous ranges as well as unbounded discrete ranges). As
+there are an infinite number of values, iteration is *not*
+supported. The base class also implements all Python set operations.
+Note that `_SetData` does *not* implement `len()`, as Python requires
+`len()` to return a positive integer.
+
+Finite sets add iteration and support for `len()`. In addition, they
+support access to members through three methods: `data()` returns the
+members as a tuple (in the internal storage order), and may not be
+deterministic. `ordered_data()` returns the members, and is guaranteed
+to be in a deterministic order (in the case of insertion order sets, up
+to the determinism of the script that populated the set). Finally,
+`sorted_data()` returns the members in a sorted order (guaranteed
+deterministic, up to the implementation of < and ==).
+
+..TODO: should these three members all return generators? This would
+further change the implementation of `data()`, but would allow consumers
+to potentially access the members in a more efficient manner.
+
+Ordered sets add support for `ord()` and `__getitem__`, as well as the
+`first`, `last`, `next` and `prev` methods for stepping over set
+members.
+
+Note that the base APIs are all declared (and to the extent possible,
+implemented) through Mixin classes.
+"""
def process_setarg(arg):
if isinstance(arg, _SetDataBase):
@@ -60,7 +117,7 @@ def process_setarg(arg):
elif isinstance(arg, IndexedComponent):
raise TypeError("Cannot apply a Set operator to an "
"indexed %s component (%s)"
- % (arg.type().__name__, arg.name,))
+ % (arg.ctype.__name__, arg.name,))
elif isinstance(arg, Component):
raise TypeError("Cannot apply a Set operator to a non-Set "
"%s component (%s)"
@@ -69,19 +126,23 @@ def process_setarg(arg):
raise TypeError("Cannot apply a Set operator to a non-Set "
"component data (%s)" % (arg.name,))
- # TODO: DEPRECATE this functionality? It has never been documented,
+ # DEPRECATED: This functionality has never been documented,
# and I don't know of a use of it in the wild.
- try:
+ if hasattr(arg, 'set_options'):
# If the argument has a set_options attribute, then use
# it to initialize a set
- args = getattr(arg,'set_options')
+ args = arg.set_options
args.setdefault('initialize', arg)
args.setdefault('ordered', type(arg) not in Set._UnorderedInitializers)
ans = Set(**args)
- ans.construct()
+
+ _init = args['initialize']
+ if not ( inspect.isgenerator(_init)
+ or inspect.isfunction(_init)
+ or ( isinstance(_init, ComponentData)
+ and not _init.parent_component().is_constructed() )):
+ ans.construct()
return ans
- except AttributeError:
- pass
# TBD: should lists/tuples be copied into Sets, or
# should we preserve the reference using SetOf?
@@ -91,19 +152,35 @@ def process_setarg(arg):
# ordered=type(arg) in {tuple, list}))
# ans.construct()
#
- # But this causes problems, especially because Set()'s
- # constructor needs to know if the object is ordered
- # (Set defaults to ordered, and will toss a warning if
- # the underlying data is not ordered)). While we could
- # add checks where we create the Set (like here and in
- # the __r*__ operators) and pass in a reasonable value
- # for ordered, it is starting to make more sense to use
- # SetOf (which has that logic). Alternatively, we could
- # use SetOf to create the Set:
+ # But this causes problems, especially because Set()'s constructor
+ # needs to know if the object is ordered (Set defaults to ordered,
+ # and will toss a warning if the underlying data source is not
+ # ordered)). While we could add checks where we create the Set
+ # (like here and in the __r*__ operators) and pass in a reasonable
+ # value for ordered, it is starting to make more sense to use SetOf
+ # (which has that logic). Alternatively, we could use SetOf to
+ # create the Set:
+ #
+ _defer_construct = False
+ if inspect.isgenerator(arg):
+ _ordered = True
+ _defer_construct = True
+ elif inspect.isfunction(arg):
+ _ordered = True
+ _defer_construct = True
+ else:
+ arg = SetOf(arg)
+ _ordered = arg.isordered()
+
+ ans = Set(initialize=arg, ordered=_ordered)
#
- tmp = SetOf(arg)
- ans = Set(initialize=tmp, ordered=tmp.isordered())
- ans.construct()
+ # Because the resulting set will be attached to the model (at least
+ # for the time being), we will NOT construct it here unless the data
+ # is already determined (either statically provided, or through an
+ # already-constructed component).
+ #
+ if not _defer_construct:
+ ans.construct()
#
# Or we can do the simple thing and just use SetOf:
#
@@ -111,7 +188,9 @@ def process_setarg(arg):
return ans
-@deprecated('The set_options decorator seems nonessential and is deprecated',
+@deprecated('The set_options decorator is deprecated; create Sets from '
+ 'functions explicitly by passing the function to the Set '
+ 'constructor using the "initialize=" keyword argument.',
version='TBD')
def set_options(**kwds):
"""
@@ -147,16 +226,39 @@ def A_rule(model, i, j):
...
"""
- def wrapper_function ( *args, **kwargs ):
- value = fn( *args, **kwargs )
+ # Because some of our processing of initializer functions relies on
+ # knowing the number of positional arguments, we will go to extra
+ # effort here to preserve the original function signature.
+ _funcdef = """def wrapper_function%s:
+ args, varargs, kwds, local_env = inspect.getargvalues(
+ inspect.currentframe())
+ args = tuple(local_env[_] for _ in args) + (varargs or ())
+ value = fn(*args, **(kwds or {}))
+ # Map None -> Set.End
if value is None:
return Set.End
return value
- return wrapper_function
+""" % (formatargspec(fn),)
+ # Create the wrapper in a temporary environment that mimics this
+ # function's environment.
+ _env = dict(globals())
+ _env.update(locals())
+ exec(_funcdef, _env)
+ return _env['wrapper_function']
+
class UnknownSetDimen(object): pass
class SetInitializer(InitializerBase):
+ """An Initializer wrapper for returning Set objects
+
+ This initializer wraps another Initializer and converts the return
+ value to a proper Pyomo Set. If the initializer is None, then Any
+ is returned. This initializer can be 'intersected' with another
+ initializer to return the SetIntersect of the Sets returned by the
+ initializers.
+
+ """
__slots__ = ('_set','verified')
def __init__(self, init, allow_generators=True):
@@ -184,16 +286,32 @@ def __call__(self, parent, idx):
if self._set is None:
return Any
else:
- return self._set(parent, idx)
+ return process_setarg(self._set(parent, idx))
def constant(self):
return self._set is None or self._set.constant()
+ def contains_indices(self):
+ return self._set is not None and self._set.contains_indices()
+
+ def indices(self):
+ if self._set is not None:
+ return self._set.indices()
+ else:
+ super(SetInitializer, self).indices()
+
def setdefault(self, val):
if self._set is None:
- self._set = ConstantInitializer(val)
+ self._set = Initializer(val)
class SetIntersectInitializer(InitializerBase):
+ """An Initializer that returns the intersection of two SetInitializers
+
+ Users will typically not create a SetIntersectInitializer directly.
+ Instead, SetInitializer.intersect() may return a SetInitializer that
+ contains a SetIntersectInitializer instance.
+
+ """
__slots__ = ('_A','_B',)
def __init__(self, setA, setB):
self._A = setA
@@ -205,9 +323,37 @@ def __call__(self, parent, idx):
def constant(self):
return self._A.constant() and self._B.constant()
-class RangeSetInitializer(InitializerBase):
+ def contains_indices(self):
+ return self._A.contains_indices() or self._B.contains_indices()
+
+ def indices(self):
+ if self._A.contains_indices():
+ if self._B.contains_indices():
+ if set(self._A.indices()) != set (self._B.indices()):
+ raise ValueError(
+ "SetIntersectInitializer contains two "
+ "sub-initializers with inconsistent external indices")
+ return self._A.indices()
+ else:
+ # It is OK (and desirable) for this to raise the exception
+ # if B does not contain external indices
+ return self._B.indices()
+
+class BoundsInitializer(InitializerBase):
+ """An Initializer wrapper that converts bounds information to a RangeSet
+
+ The BoundsInitializer wraps another initializer that is expected to
+ return valid arguments to the RangeSet constructor. Nominally, this
+ would be bounds information in the form of (lower bound, upper
+ bound), but could also be a single scalar or a 3-tuple. Calling
+ this initializer will return a RangeSet object.
+
+ BoundsInitializer objects can be intersected with other
+ SetInitializer objects using the SetInitializer.intersect() method.
+
+ """
__slots__ = ('_init', 'default_step',)
- def __init__(self, init, default_step=1):
+ def __init__(self, init, default_step=0):
self._init = Initializer(init, treat_sequences_as_mappings=False)
self.default_step = default_step
@@ -215,10 +361,18 @@ def __call__(self, parent, idx):
val = self._init(parent, idx)
if not isinstance(val, collections_Sequence):
val = (1, val, self.default_step)
- if len(val) < 3:
- val = tuple(val) + (self.default_step,)
+ else:
+ val = tuple(val)
+ if len(val) == 2:
+ val += (self.default_step,)
+ elif len(val) == 1:
+ val = (1, val[0], self.default_step)
+ elif len(val) == 0:
+ val = (None, None, self.default_step)
ans = RangeSet(*tuple(val))
- ans.construct()
+ # We don't need to construct here, as the RangeSet will
+ # automatically construct itself if it can
+ #ans.construct()
return ans
def constant(self):
@@ -228,50 +382,68 @@ def setdefault(self, val):
# This is a real range set... there is no default to set
pass
-#
-# DESIGN NOTES
-#
-# What do sets do?
-#
-# ALL:
-# __contains__
-#
-# Note: FINITE implies DISCRETE. Infinite discrete sets cannot be iterated
-#
-# FINITE: ALL +
-# __len__ (Note: Python len() requires __len__ to return non-negative int)
-# __iter__, __reversed__
-# add()
-# sorted(), ordered_data()
-#
-# ORDERED: FINITE +
-# __getitem__
-# next(), prev(), first(), last()
-# ord()
-#
-# When we do math, the least specific set dictates the API of the resulting set.
-#
-# Note that isfinite and isordered must be resolvable when the class
-# is instantiated (*before* construction). We will key off these fields
-# when performing set operations to know what type of operation to
-# create, and we will allow set operations in Abstract before
-# construction.
+class TuplizeError(PyomoException):
+ pass
-#
-# Set rewrite TODOs:
-#
-# - Test index/ord for equivalence of 1 and (1,)
-#
-# - Make sure that all classes implement the appropriate methods
-# (e.g., bounds)
-#
-# - Sets created with Set.Skip should produce intelligible errors
-#
-# - Resolve nonnumeric range operations on tuples of numeric ranges
-#
-# - Ensure the range operators raise exeptions for unexpected
-# (non-range/non list arguments.
-#
+class TuplizeValuesInitializer(InitializerBase):
+ """An initializer wrapper that will "tuplize" a sequence
+
+ This initializer takes the result of another initializer, and if it
+ is a sequence that does not already contain tuples, wil convert it
+ to a sequence of tuples, each of length 'dimen' before returning it.
+
+ """
+ __slots__ = ('_init', '_dimen')
+
+ def __new__(cls, *args):
+ if args == (None,):
+ return None
+ else:
+ return super(TuplizeValuesInitializer, cls).__new__(cls)
+
+ def __init__(self, _init):
+ self._init = _init
+ self._dimen = UnknownSetDimen
+
+ def __call__(self, parent, index):
+ _val = self._init(parent, index)
+ if self._dimen in {1, None, UnknownSetDimen}:
+ return _val
+ elif _val is Set.Skip:
+ return _val
+ elif not _val:
+ return _val
+
+ if not isinstance(_val, collections_Sequence):
+ _val = tuple(_val)
+ if len(_val) == 0:
+ return _val
+ if isinstance(_val[0], tuple):
+ return _val
+ return self._tuplize(_val, parent, index)
+
+ def constant(self):
+ return self._init.constant()
+
+ def contains_indices(self):
+ return self._init.contains_indices()
+
+ def indices(self):
+ return self._init.indices()
+
+ def _tuplize(self, _val, parent, index):
+ d = self._dimen
+ if len(_val) % d:
+ raise TuplizeError(
+ "Cannot tuplize list data for set %%s%%s because its "
+ "length %s is not a multiple of dimen=%s" % (len(_val), d))
+
+ return list(tuple(_val[d*i:d*(i+1)]) for i in xrange(len(_val)//d))
+
+
+class _NotFound(object):
+ "Internal type flag used to indicate if an object is not found in a set"
+ pass
# A trivial class that we can use to test if an object is a "legitimate"
@@ -291,8 +463,28 @@ class _SetData(_SetDataBase):
__slots__ = ()
def __contains__(self, value):
+ try:
+ ans = self.get(value, _NotFound)
+ except TypeError:
+ # In Python 3.x, Sets are unhashable
+ if isinstance(value, _SetData):
+ ans = _NotFound
+ else:
+ raise
+
+ if ans is _NotFound:
+ if isinstance(value, _SetData):
+ deprecation_warning(
+ "Testing for set subsets with 'a in b' is deprecated. "
+ "Use 'a.issubset(b)'.", version='TBD')
+ return value.issubset(self)
+ else:
+ return False
+ return True
+
+ def get(self, value, default=None):
raise DeveloperError("Derived set class (%s) failed to "
- "implement __contains__" % (type(self).__name__,))
+ "implement get()" % (type(self).__name__,))
def isdiscrete(self):
"""Returns True if this set admits only discrete members"""
@@ -306,12 +498,36 @@ def isordered(self):
"""Returns True if this is an ordered finite discrete (iterable) Set"""
return False
+ def subsets(self, expand_all_set_operators=None):
+ return [ self ]
+
+ def __iter__(self):
+ """Iterate over the set members
+
+ Raises AttributeError for non-finite sets. This must be
+ declared for non-finite sets because scalar sets inherit from
+ IndexedComponent, which provides an iterator (over the
+ underlying indexing set).
+ """
+ raise TypeError(
+ "'%s' object is not iterable (non-finite Set '%s' "
+ "is not iterable)" % (self.__class__.__name__, self.name))
+
def __eq__(self, other):
if self is other:
return True
- try:
+ # Special case: non-finite range sets that only contain finite
+ # ranges (or no ranges). We will re-generate non-finite sets to
+ # make sure we get an accurate "finiteness" flag.
+ if hasattr(other, 'isfinite'):
other_isfinite = other.isfinite()
- except:
+ if not other_isfinite:
+ try:
+ other = RangeSet(ranges=list(other.ranges()))
+ other_isfinite = other.isfinite()
+ except TypeError:
+ pass
+ elif hasattr(other, '__contains__'):
# we assume that everything that does not implement
# isfinite() is a discrete set.
other_isfinite = True
@@ -321,6 +537,13 @@ def __eq__(self, other):
other = set(other)
except:
pass
+ else:
+ return False
+ if not self.isfinite():
+ try:
+ self = RangeSet(ranges=list(self.ranges()))
+ except TypeError:
+ pass
if self.isfinite():
if not other_isfinite:
return False
@@ -346,6 +569,11 @@ def dimen(self):
raise DeveloperError("Derived set class (%s) failed to "
"implement dimen" % (type(self).__name__,))
+ @property
+ def domain(self):
+ raise DeveloperError("Derived set class (%s) failed to "
+ "implement domain" % (type(self).__name__,))
+
def ranges(self):
raise DeveloperError("Derived set class (%s) failed to "
"implement ranges" % (type(self).__name__,))
@@ -375,6 +603,12 @@ def bounds(self):
break
else:
ub = max(ub, _ub)
+ if lb is not None:
+ if int(lb) == lb:
+ lb = int(lb)
+ if ub is not None:
+ if int(ub) == ub:
+ ub = int(ub)
return lb, ub
def get_interval(self):
@@ -400,7 +634,7 @@ def _get_discrete_interval(self):
# Note: I'd like to use set() for ranges, since we will be
# randomly removing elelments from the list; however, since we
# do it by enumerating over ranges, using set() would make this
- # routine nondeterministic. Not a hoge issue for the result,
+ # routine nondeterministic. Not a huge issue for the result,
# but problemmatic for code coverage.
ranges = list(self.ranges())
try:
@@ -569,16 +803,50 @@ def _get_continuous_interval(self):
return (interval.start, interval.end, interval.step)
@property
- @deprecated("The 'virtual' flag is no longer supported", version='TBD')
+ @deprecated("The 'virtual' attribute is no longer supported", version='TBD')
def virtual(self):
- return False
+ return isinstance(self, (_AnySet, SetOperator, _InfiniteRangeSetData))
+
+ @virtual.setter
+ def virtual(self, value):
+ if value != self.virtual:
+ raise ValueError(
+ "Attempting to set the (deprecated) 'virtual' attribute on %s "
+ "to an invalid value (%s)" % (self.name, value))
@property
- @deprecated("The 'concrete' flag is no longer supported. "
+ @deprecated("The 'concrete' attribute is no longer supported. "
"Use isdiscrete() or isfinite()", version='TBD')
def concrete(self):
return self.isfinite()
+ @concrete.setter
+ def concrete(self, value):
+ if value != self.concrete:
+ raise ValueError(
+ "Attempting to set the (deprecated) 'concrete' attribute on %s "
+ "to an invalid value (%s)" % (self.name, value))
+
+ @property
+ @deprecated("The 'ordered' attribute is no longer supported. "
+ "Use isordered()", version='TBD')
+ def ordered(self):
+ return self.isordered()
+
+ @property
+ @deprecated("'filter' is no longer a public attribute.",
+ version='TBD')
+ def filter(self):
+ return None
+
+ @deprecated("check_values() is deprecated: Sets only contain valid members",
+ version='TBD')
+ def check_values(self):
+ """
+ Verify that the values in this set are valid.
+ """
+ return True
+
def isdisjoint(self, other):
"""Test if this Set is disjoint from `other`
@@ -591,9 +859,9 @@ def isdisjoint(self, other):
-------
bool : True if this set is disjoint from `other`
"""
- try:
+ if hasattr(other, 'isfinite'):
other_isfinite = other.isfinite()
- except:
+ elif hasattr(other, '__contains__'):
# we assume that everything that does not implement
# isfinite() is a discrete set.
other_isfinite = True
@@ -603,6 +871,10 @@ def isdisjoint(self, other):
other = set(other)
except:
pass
+ else:
+ # Raise an exception consistent with Python's set.isdisjoint()
+ raise TypeError(
+ "'%s' object is not iterable" % (type(other).__name__,))
if self.isfinite():
for x in self:
if x in other:
@@ -628,9 +900,18 @@ def issubset(self, other):
-------
bool : True if this set is a subset of `other`
"""
- try:
+ # Special case: non-finite range sets that only contain finite
+ # ranges (or no ranges). We will re-generate non-finite sets to
+ # make sure we get an accurate "finiteness" flag.
+ if hasattr(other, 'isfinite'):
other_isfinite = other.isfinite()
- except:
+ if not other_isfinite:
+ try:
+ other = RangeSet(ranges=list(other.ranges()))
+ other_isfinite = other.isfinite()
+ except TypeError:
+ pass
+ elif hasattr(other, '__contains__'):
# we assume that everything that does not implement
# isfinite() is a discrete set.
other_isfinite = True
@@ -640,6 +921,15 @@ def issubset(self, other):
other = set(other)
except:
pass
+ else:
+ # Raise an exception consistent with Python's set.issubset()
+ raise TypeError(
+ "'%s' object is not iterable" % (type(other).__name__,))
+ if not self.isfinite():
+ try:
+ self = RangeSet(ranges=list(self.ranges()))
+ except TypeError:
+ pass
if self.isfinite():
for x in self:
if x not in other:
@@ -660,9 +950,29 @@ def issubset(self, other):
return True
def issuperset(self, other):
- try:
+ """Test if this Set is a superset of `other`
+
+ Parameters
+ ----------
+ other : ``Set`` or ``iterable``
+ The Set or iterable object to compare this Set against
+
+ Returns
+ -------
+ bool : True if this set is a superset of `other`
+ """
+ # Special case: non-finite range sets that only contain finite
+ # ranges (or no ranges). We will re-generate non-finite sets to
+ # make sure we get an accurate "finiteness" flag.
+ if hasattr(other, 'isfinite'):
other_isfinite = other.isfinite()
- except:
+ if not other_isfinite:
+ try:
+ other = RangeSet(ranges=list(other.ranges()))
+ other_isfinite = other.isfinite()
+ except TypeError:
+ pass
+ elif hasattr(other, '__contains__'):
# we assume that everything that does not implement
# isfinite() is a discrete set.
other_isfinite = True
@@ -672,6 +982,10 @@ def issuperset(self, other):
other = set(other)
except:
pass
+ else:
+ # Raise an exception consistent with Python's set.issuperset()
+ raise TypeError(
+ "'%s' object is not iterable" % (type(other).__name__,))
if other_isfinite:
for x in other:
# Other may contain elements that are not representable
@@ -683,7 +997,12 @@ def issuperset(self, other):
except TypeError:
return False
return True
- elif self.isfinite():
+ if not self.isfinite():
+ try:
+ self = RangeSet(ranges=list(self.ranges()))
+ except TypeError:
+ pass
+ if self.isfinite():
return False
else:
return other.issubset(self)
@@ -808,9 +1127,20 @@ def __len__(self):
raise DeveloperError("Derived finite set class (%s) failed to "
"implement __len__" % (type(self).__name__,))
- def __iter__(self):
+ def _iter_impl(self):
raise DeveloperError("Derived finite set class (%s) failed to "
- "implement __iter__" % (type(self).__name__,))
+ "implement _iter_impl" % (type(self).__name__,))
+
+ def __iter__(self):
+ """Iterate over the finite set
+
+ Note: derived classes should NOT reimplement this method, and
+ should instead overload _iter_impl. The expression template
+ system relies on being able to replace this method for all Sets
+ during template generation.
+
+ """
+ return self._iter_impl()
def __reversed__(self):
return reversed(self.data())
@@ -826,6 +1156,19 @@ def isfinite(self):
def data(self):
return tuple(self)
+ @property
+ @deprecated("The 'value' attribute is deprecated. Use .data() to "
+ "retrieve the values in a finite set.", version='TBD')
+ def value(self):
+ return set(self)
+
+ @property
+ @deprecated("The 'value_list' attribute is deprecated. Use "
+ ".ordered_data() to retrieve the values from a finite set "
+ "in a deterministic order.", version='TBD')
+ def value_list(self):
+ return list(self.ordered_data())
+
def sorted_data(self):
return tuple(sorted_robust(self.data()))
@@ -880,7 +1223,7 @@ def __init__(self, component):
# storage
if not hasattr(self, '_values'):
self._values = set()
- self._domain = None
+ self._domain = Any
self._validate = None
self._filter = None
self._dimen = UnknownSetDimen
@@ -897,7 +1240,7 @@ def __getstate__(self):
# Note: because none of the slots on this class need to be edited,
# we don't need to implement a specialized __setstate__ method.
- def __contains__(self, value):
+ def get(self, value, default=None):
"""
Return True if the set contains a given value.
@@ -906,9 +1249,11 @@ def __contains__(self, value):
if normalize_index.flatten:
value = normalize_index(value)
- return value in self._values
+ if value in self._values:
+ return value
+ return default
- def __iter__(self):
+ def _iter_impl(self):
return iter(self._values)
def __len__(self):
@@ -926,74 +1271,93 @@ def __str__(self):
@property
def dimen(self):
+ if self._dimen is UnknownSetDimen:
+ # Special case: abstract Sets with constant dimen
+ # initializers have a known dimen before construction
+ _comp = self.parent_component()
+ if not _comp._constructed and _comp._init_dimen.constant():
+ return _comp._init_dimen.val
return self._dimen
- def add(self, value):
- if normalize_index.flatten:
- _value = normalize_index(value)
- if _value.__class__ is tuple:
- _d = len(_value)
- else:
- _d = 1
- else:
- # If we are not normalizing indices, then we cannot reliably
- # infer the set dimen
- _value = value
- _d = None
-
- if _value not in self._domain:
- raise ValueError("Cannot add value %s to Set %s.\n"
- "\tThe value is not in the domain %s"
- % (value, self.name, self._domain))
-
- # We wrap this check in a try-except because some values (like lists)
- # are not hashable and can raise exceptions.
- try:
- if _value in self:
- logger.warning(
- "Element %s already exists in Set %s; no action taken"
- % (value, self.name))
- return False
- except:
- exc = sys.exc_info()
- raise TypeError("Unable to insert '%s' into Set %s:\n\t%s: %s"
- % (value, self.name, exc[0].__name__, exc[1]))
+ @property
+ def domain(self):
+ return self._domain
- if self._filter is not None:
- if not self._filter(self, _value):
- return False
+ @property
+ @deprecated("'filter' is no longer a public attribute.",
+ version='TBD')
+ def filter(self):
+ return self._filter
+
+ def add(self, *values):
+ count = 0
+ _block = self.parent_block()
+ for value in values:
+ if normalize_index.flatten:
+ _value = normalize_index(value)
+ if _value.__class__ is tuple:
+ _d = len(_value)
+ else:
+ _d = 1
+ else:
+ # If we are not normalizing indices, then we cannot reliably
+ # infer the set dimen
+ _value = value
+ _d = None
+ if _value not in self._domain:
+ raise ValueError("Cannot add value %s to Set %s.\n"
+ "\tThe value is not in the domain %s"
+ % (value, self.name, self._domain))
- if self._validate is not None:
+ # We wrap this check in a try-except because some values
+ # (like lists) are not hashable and can raise exceptions.
try:
- flag = self._validate(self, _value)
+ if _value in self:
+ logger.warning(
+ "Element %s already exists in Set %s; no action taken"
+ % (value, self.name))
+ continue
except:
- logger.error(
- "Exception raised while validating element '%s' for Set %s"
- % (value, self.name))
- raise
- if not flag:
- raise ValueError(
- "The value=%s violates the validation rule of Set %s"
- % (value, self.name))
-
- # If the Set has a fixed dimension, check that this element is
- # compatible.
- if self._dimen is not None:
- if _d != self._dimen:
- if self._dimen is UnknownSetDimen:
- # The first thing added to a Set with unknown
- # dimension sets its dimension
- self._dimen = _d
- else:
+ exc = sys.exc_info()
+ raise TypeError("Unable to insert '%s' into Set %s:\n\t%s: %s"
+ % (value, self.name, exc[0].__name__, exc[1]))
+
+ if self._filter is not None:
+ if not self._filter(_block, _value):
+ continue
+
+ if self._validate is not None:
+ try:
+ flag = self._validate(_block, _value)
+ except:
+ logger.error(
+ "Exception raised while validating element '%s' "
+ "for Set %s" % (value, self.name))
+ raise
+ if not flag:
raise ValueError(
- "The value=%s has dimension %s and is not valid for "
- "Set %s which has dimen=%s"
- % (value, _d, self.name, self._dimen))
+ "The value=%s violates the validation rule of Set %s"
+ % (value, self.name))
+
+ # If the Set has a fixed dimension, check that this element is
+ # compatible.
+ if self._dimen is not None:
+ if _d != self._dimen:
+ if self._dimen is UnknownSetDimen:
+ # The first thing added to a Set with unknown
+ # dimension sets its dimension
+ self._dimen = _d
+ else:
+ raise ValueError(
+ "The value=%s has dimension %s and is not "
+ "valid for Set %s which has dimen=%s"
+ % (value, _d, self.name, self._dimen))
- # Add the value to this object (this last redirection allows
- # derived classes to implement a different storage mmechanism)
- self._add_impl(_value)
- return True
+ # Add the value to this object (this last redirection allows
+ # derived classes to implement a different storage mechanism)
+ self._add_impl(_value)
+ count += 1
+ return count
def _add_impl(self, value):
self._values.add(value)
@@ -1165,7 +1529,7 @@ def __getstate__(self):
# Note: because none of the slots on this class need to be edited,
# we don't need to implement a specialized __setstate__ method.
- def __iter__(self):
+ def _iter_impl(self):
"""
Return an iterator for the set.
"""
@@ -1256,7 +1620,7 @@ class _InsertionOrderSetData(_OrderedSetData):
__slots__ = ()
def set_value(self, val):
- if type(val) in self._UnorderedInitializers:
+ if type(val) in Set._UnorderedInitializers:
logger.warning(
"Calling set_value() on an insertion order Set with "
"a fundamentally unordered data source (type: %s). "
@@ -1265,7 +1629,7 @@ def set_value(self, val):
super(_InsertionOrderSetData, self).set_value(val)
def update(self, values):
- if type(values) in self._UnorderedInitializers:
+ if type(values) in Set._UnorderedInitializers:
logger.warning(
"Calling update() on an insertion order Set with "
"a fundamentally unordered data source (type: %s). "
@@ -1308,13 +1672,13 @@ def __getstate__(self):
# Note: because none of the slots on this class need to be edited,
# we don't need to implement a specialized __setstate__ method.
- def __iter__(self):
+ def _iter_impl(self):
"""
Return an iterator for the set.
"""
if not self._is_sorted:
self._sort()
- return super(_SortedSetData, self).__iter__()
+ return super(_SortedSetData, self)._iter_impl()
def __reversed__(self):
if not self._is_sorted:
@@ -1373,7 +1737,7 @@ def _sort(self):
_SET_API = (
('__contains__', 'test membership in'),
- 'ranges', 'bounds',
+ 'get', 'ranges', 'bounds',
)
_FINITESET_API = _SET_API + (
('__iter__', 'iterate over'),
@@ -1386,9 +1750,11 @@ def _sort(self):
'set_value', 'add', 'remove', 'discard', 'clear', 'update', 'pop',
)
+
+@ModelComponentFactory.register(
+ "Set data that is used to define a model instance.")
class Set(IndexedComponent):
- """
- A component used to index other Pyomo components.
+ """A component used to index other Pyomo components.
This class provides a Pyomo component that is API-compatible with
Python `set` objects, with additional features, including:
@@ -1413,8 +1779,9 @@ class Set(IndexedComponent):
constructed. Values passed to `initialize` may be
overridden by `data` passed to the :py:meth:`construct`
method.
- dimen : initializer(int)
- Specify the Set's arity, or None if no arity is enforced
+ dimen : initializer(int), optional
+ Specify the Set's arity (the required tuple length for all
+ members of the Set), or None if no arity is enforced
ordered : bool or Set.InsertionOrder or Set.SortedOrder or function
Specifies whether the set is ordered. Possible values are:
False Unordered
@@ -1429,8 +1796,8 @@ class Set(IndexedComponent):
A set that defines the valid values that can be contained
in this set
bounds : initializer(tuple), optional
- A 2-tuple that specifies the lower and upper bounds for
- valid Set values
+ A tuple that specifies the bounds for valid Set values
+ (accepts 1-, 2-, or 3-tuple RangeSet arguments)
filter : initializer(rule), optional
A rule for determining membership in this set. This has the
functional form:
@@ -1455,6 +1822,7 @@ class Set(IndexedComponent):
valid set values. If more than one is specified, Set values
will be restricted to the intersection of `domain`, `within`,
and `bounds`.
+
"""
class End(object): pass
@@ -1546,15 +1914,14 @@ def __init__(self, *args, **kwds):
self._init_domain.intersect(SetInitializer(_within))
_bounds = kwds.pop('bounds', None)
if _bounds is not None:
- self._init_domain.intersect(RangeSetInitializer(
- _bounds, default_step=0))
+ self._init_domain.intersect(BoundsInitializer(_bounds))
self._init_dimen = Initializer(
kwds.pop('dimen', UnknownSetDimen),
arg_not_specified=UnknownSetDimen)
- self._init_values = Initializer(
- kwds.pop('initialize', ()),
- treat_sequences_as_mappings=False, allow_generators=True)
+ self._init_values = TuplizeValuesInitializer(Initializer(
+ kwds.pop('initialize', None),
+ treat_sequences_as_mappings=False, allow_generators=True))
self._init_validate = Initializer(kwds.pop('validate', None))
self._init_filter = Initializer(kwds.pop('filter', None))
@@ -1569,8 +1936,27 @@ def __init__(self, *args, **kwds):
# HACK to make the "counted call" syntax work. We wait until
# after the base class is set up so that is_indexed() is
# reliable.
- if self._init_values.__class__ is IndexedCallInitializer:
- self._init_values = CountedCallInitializer(self, self._init_values)
+ if self._init_values is not None \
+ and self._init_values._init.__class__ is IndexedCallInitializer:
+ self._init_values._init = CountedCallInitializer(
+ self, self._init_values._init)
+ # HACK: the DAT parser needs to know the domain of a set in
+ # order to correctly parse the data stream.
+ if not self.is_indexed():
+ if self._init_domain.constant():
+ self._domain = self._init_domain(self.parent_block(), None)
+ if self._init_dimen.constant():
+ self._dimen = self._init_dimen(self.parent_block(), None)
+
+
+ @deprecated("check_values() is deprecated: Sets only contain valid members",
+ version='TBD')
+ def check_values(self):
+ """
+ Verify that the values in this set are valid.
+ """
+ return True
+
def construct(self, data=None):
if self._constructed:
@@ -1583,15 +1969,22 @@ def construct(self, data=None):
if data is not None:
# Data supplied to construct() should override data provided
# to the constructor
- tmp_init, self._init_values = self._init_values, Initializer(
- data, treat_sequences_as_mappings=False)
+ tmp_init, self._init_values \
+ = self._init_values, TuplizeValuesInitializer(
+ Initializer(data, treat_sequences_as_mappings=False))
try:
- if type(self._init_values) is ItemInitializer:
- for index in iterkeys(self._init_values._dict):
- # The index is coming in externally; we need to
- # validate it
+ if self._init_values is None:
+ if not self.is_indexed():
+ # This ensures backwards compatibility by causing all
+ # scalar sets (including set operators) to be
+ # initialized (and potentially empty) after construct().
+ self._getitem_when_not_present(None)
+ elif self._init_values.contains_indices():
+ # The index is coming in externally; we need to validate it
+ for index in self._init_values.indices():
IndexedComponent.__getitem__(self, index)
else:
+ # Bypass the index validation and create the member directly
for index in self.index_set():
self._getitem_when_not_present(index)
finally:
@@ -1606,35 +1999,58 @@ def construct(self, data=None):
#
def _getitem_when_not_present(self, index):
"""Returns the default component data value."""
+ # Because we allow sets within an IndexedSet to have different
+ # dimen, we have moved the tuplization logic from PyomoModel
+ # into Set (because we cannot know the dimen of a _SetData until
+ # we are actually constructing that index). This also means
+ # that we need to potentially communicate the dimen to the
+ # (wrapped) value initializer. So, we will get the dimen first,
+ # then get the values. Only then will we know that this index
+ # will actually be constructed (and not Skipped).
+ _block = self.parent_block()
+
+ #Note: _init_dimen and _init_domain are guaranteed to be non-None
+ _d = self._init_dimen(_block, index)
+ if ( not normalize_index.flatten and _d is not UnknownSetDimen
+ and _d is not None ):
+ logger.warning(
+ "Ignoring non-None dimen (%s) for set %s%s "
+ "(normalize_index.flatten is False, so dimen "
+ "verification is not available)." % (
+ _d, self.name,
+ ("[%s]" % (index,) if self.is_indexed() else "") ))
+ _d = None
+
+ domain = self._init_domain(_block, index)
+ if _d is UnknownSetDimen and domain is not None \
+ and domain.dimen is not None:
+ _d = domain.dimen
+
if self._init_values is not None:
- _values = self._init_values(self, index)
+ self._init_values._dimen = _d
+ try:
+ _values = self._init_values(_block, index)
+ except TuplizeError as e:
+ raise ValueError( str(e) % (
+ self._name, "[%s]" % index if self.is_indexed() else ""))
+
if _values is Set.Skip:
return
elif _values is None:
raise ValueError(
"Set rule or initializer returned None instead of Set.Skip")
-
if index is None and not self.is_indexed():
obj = self._data[index] = self
else:
obj = self._data[index] = self._ComponentDataClass(component=self)
- if self._init_dimen is not None:
- _d = self._init_dimen(self, index)
- if _d is not UnknownSetDimen and (not normalize_index.flatten) \
- and _d is not None:
- logger.warning(
- "Ignoring non-None dimen (%s) for set %s "
- "(normalize_index.flatten is False, so dimen "
- "verification is not available)." % (_d, obj.name))
- _d = None
+ if _d is not UnknownSetDimen:
obj._dimen = _d
- if self._init_domain is not None:
- obj._domain = self._init_domain(self, index)
- if isinstance(obj._domain, _SetOperator):
- obj._domain.construct()
+ if domain is not None:
+ obj._domain = domain
+ domain.parent_component().construct()
if self._init_validate is not None:
try:
- obj._validate = Initializer(self._init_validate(self, index))
+ obj._validate = Initializer(self._init_validate(_block, index))
if obj._validate.constant():
# _init_validate was the actual validate function; use it.
obj._validate = self._init_validate
@@ -1645,7 +2061,7 @@ def _getitem_when_not_present(self, index):
obj._validate = self._init_validate
if self._init_filter is not None:
try:
- _filter = Initializer(self._init_filter(self, index))
+ _filter = Initializer(self._init_filter(_block, index))
if _filter.constant():
# _init_filter was the actual filter function; use it.
_filter = self._init_filter
@@ -1659,20 +2075,31 @@ def _getitem_when_not_present(self, index):
if self._init_values is not None:
# _values was initialized above...
if obj.isordered() \
- and type(_values) in self._UnorderedInitializers:
+ and type(_values) in Set._UnorderedInitializers:
logger.warning(
- "Initializing an ordered Set with a fundamentally "
+ "Initializing ordered Set %s with a fundamentally "
"unordered data source (type: %s). This WILL potentially "
"lead to nondeterministic behavior in Pyomo"
- % (type(_values).__name__,))
+ % (self.name, type(_values).__name__,))
# Special case: set operations that are not first attached
# to the model must be constructed.
- if isinstance(_values, _SetOperator):
+ if isinstance(_values, SetOperator):
_values.construct()
- for val in _values:
+ try:
+ val_iter = iter(_values)
+ except TypeError:
+ logger.error(
+ "Initializer for Set %s%s returned non-iterable object "
+ "of type %s." % (
+ self.name,
+ ("[%s]" % (index,) if self.is_indexed() else ""),
+ _values if _values.__class__ is type
+ else type(_values).__name__ ))
+ raise
+ for val in val_iter:
if val is Set.End:
break
- if _filter is None or _filter(self, val):
+ if _filter is None or _filter(_block, val):
obj.add(val)
# We defer adding the filter until now so that add() doesn't
# call it a second time.
@@ -1701,7 +2128,7 @@ def _pprint_dimen(x):
@staticmethod
def _pprint_domain(x):
- if x._domain is x:
+ if x._domain is x and isinstance(x, SetOperator):
return x._expression_str()
else:
return x._domain
@@ -1773,11 +2200,19 @@ def __init__(self, **kwds):
class OrderedSimpleSet(_InsertionOrderSetData, Set):
def __init__(self, **kwds):
+ # In case someone inherits from us, we will provide a rational
+ # default for the "ordered" flag
+ kwds.setdefault('ordered', Set.InsertionOrder)
+
_InsertionOrderSetData.__init__(self, component=self)
Set.__init__(self, **kwds)
class SortedSimpleSet(_SortedSetData, Set):
def __init__(self, **kwds):
+ # In case someone inherits from us, we will provide a rational
+ # default for the "ordered" flag
+ kwds.setdefault('ordered', Set.SortedOrder)
+
_SortedSetData.__init__(self, component=self)
Set.__init__(self, **kwds)
@@ -1813,20 +2248,22 @@ def __init__(self, reference, **kwds):
Component.__init__(self, **kwds)
self._ref = reference
- def __contains__(self, value):
+ def get(self, value, default=None):
# Note that the efficiency of this depends on the reference object
#
# The bulk of single-value set members were stored as scalars.
# Check that first.
if value.__class__ is tuple and len(value) == 1:
if value[0] in self._ref:
- return True
- return value in self._ref
+ return value[0]
+ if value in self._ref:
+ return value
+ return default
def __len__(self):
return len(self._ref)
- def __iter__(self):
+ def _iter_impl(self):
return iter(self._ref)
def __str__(self):
@@ -1861,6 +2298,10 @@ def dimen(self):
return None
return ans
+ @property
+ def domain(self):
+ return self
+
def _pprint(self):
"""
Return data that will be printed for this component.
@@ -1903,6 +2344,7 @@ def ord(self, item):
############################################################################
+
class _InfiniteRangeSetData(_SetData):
"""Data class for a infinite set.
@@ -1930,14 +2372,16 @@ def __getstate__(self):
# Note: because none of the slots on this class need to be edited,
# we don't need to implement a specialized __setstate__ method.
- def __contains__(self, value):
+ def get(self, value, default=None):
# The bulk of single-value set members were stored as scalars.
# Check that first.
if value.__class__ is tuple and len(value) == 1:
v = value[0]
if any(v in r for r in self._ranges):
- return True
- return any(value in r for r in self._ranges)
+ return v
+ if any(value in r for r in self._ranges):
+ return value
+ return default
def isdiscrete(self):
"""Returns True if this set admits only discrete members"""
@@ -1947,6 +2391,13 @@ def isdiscrete(self):
def dimen(self):
return 1
+ @property
+ def domain(self):
+ return Reals
+
+ def clear(self):
+ self._ranges = ()
+
def ranges(self):
return iter(self._ranges)
@@ -1971,7 +2422,7 @@ def _range_gen(r):
i += 1
n = start + i*step
- def __iter__(self):
+ def _iter_impl(self):
# If there is only a single underlying range, then we will
# iterate over it
nIters = len(self._ranges) - 1
@@ -2049,16 +2500,71 @@ def ord(self, item):
"Cannot identify position of %s in Set %s: item not in Set"
% (item, self.name))
- # We must redefine ranges() and bounds() so that we get the
+ # We must redefine ranges(), bounds(), and domain so that we get the
# _InfiniteRangeSetData version and not the one from
# _FiniteSetMixin.
bounds = _InfiniteRangeSetData.bounds
ranges = _InfiniteRangeSetData.ranges
+ domain = _InfiniteRangeSetData.domain
+@ModelComponentFactory.register(
+ "A sequence of numeric values. RangeSet(start,end,step) is a sequence "
+ "starting a value 'start', and increasing in values by 'step' until a "
+ "value greater than or equal to 'end' is reached.")
class RangeSet(Component):
- """
- A set object that represents a set of numeric values
+ """A set object that represents a set of numeric values
+
+ `RangeSet` objects are based around `NumericRange` objects, which
+ include support for non-finite ranges (both continuous and
+ unbounded). Similarly, boutique ranges (like semi-continuous
+ domains) can be represented, e.g.:
+
+ ..code:
+ RangeSet(ranges=(NumericRange(0,0,0), NumericRange(1,100,0)))
+
+ The `RangeSet` object continues to support the notation for
+ specifying discrete ranges using "[first=1], last, [step=1]" values:
+
+ ..code:
+ RangeSet(3) # [1, 2, 3]
+ RangeSet(2,5) # [2, 3, 4, 5]
+ RangeSet(2,5,2) # [2, 4]
+ RangeSet(2.5,4,0.5) # [2.5, 3, 3.5, 4]
+
+ By implementing RangeSet using NumericRanges, the global Sets (like
+ `Reals`, `Integers`, `PositiveReals`, etc.) are trivial
+ instances of a RangeSet and support all Set operations.
+
+ Parameters
+ ----------
+ *args: int | float | None
+ The range defined by ([start=1], end, [step=1]). If only a
+ single positional parameter, `end` is supplied, then the
+ RangeSet will be the integers starting at 1 up through and
+ including end. Providing two positional arguments, `x` and `y`,
+ will result in a range starting at x up to and including y,
+ incrementing by 1. Providing a 3-tuple enables the
+ specification of a step other than 1.
+
+ finite: bool, optional
+ This sets if this range is finite (discrete and bounded) or infinite
+
+ ranges: iterable, optional
+ The list of range objects that compose this RangeSet
+
+ bounds: tuple, optional
+ The lower and upper bounds of values that are admissible in this
+ RangeSet
+
+ filter: function, optional
+ Function (rule) that returns True if the specified value is in
+ the RangeSet or False if it is not.
+
+ validate: function, optional
+ Data validation function (rule). The function will be called
+ for every data member of the set, and if it returns False, a
+ ValueError will be raised.
"""
@@ -2071,10 +2577,28 @@ def __new__(cls, *args, **kwds):
if 'ranges' in kwds:
if any(not r.isfinite() for r in kwds['ranges']):
finite = False
- if all(type(_) in native_types for _ in args):
- if None in args or (len(args) > 2 and args[2] == 0):
+ for i,_ in enumerate(args):
+ if type(_) not in native_types:
+ # Strange nosetest coverage issue: if the logic is
+ # negated and the continue is in the "else", that
+ # line is not caught as being covered.
+ if not isinstance(_, ComponentData) \
+ or not _.parent_component().is_constructed():
+ continue
+ else:
+ # "Peek" at constructed components to try and
+ # infer if this component will be Infinite
+ _ = value(_)
+ if i < 2:
+ if _ in {None, _inf, -_inf}:
+ finite = False
+ break
+ elif _ == 0 and args[0] is not args[1]:
finite = False
if finite is None:
+ # Assume "undetermined" RangeSets will be finite. If a
+ # user wants them to be infinite, they can always
+ # specify finite=False
finite = True
if finite:
@@ -2094,6 +2618,12 @@ def __init__(self, *args, **kwds):
args,
kwds.pop('ranges', ()),
)
+ self._init_validate = Initializer(kwds.pop('validate', None))
+ self._init_filter = Initializer(kwds.pop('filter', None))
+ self._init_bounds = kwds.pop('bounds', None)
+ if self._init_bounds is not None:
+ self._init_bounds = BoundsInitializer(self._init_bounds)
+
Component.__init__(self, **kwds)
# Shortcut: if all the relevant construction information is
# simple (hard-coded) values, then it is safe to go ahead and
@@ -2102,15 +2632,25 @@ def __init__(self, *args, **kwds):
# NOTE: We will need to revisit this if we ever allow passing
# data into the construct method (which would override the
# hard-coded values here).
- if all(type(_) in native_types for _ in args):
- self.construct()
+ try:
+ if all( type(_) in native_types
+ or _.parent_component().is_constructed()
+ for _ in args ):
+ self.construct()
+ except AttributeError:
+ pass
def __str__(self):
if self.parent_block() is not None:
return self.name
+ # Unconstructed floating components return their type
if not self._constructed:
return type(self).__name__
+ # Named, constructed components should return their name e.g., Reals
+ if type(self).__name__ != self._name:
+ return self.name
+ # Floating, unnamed constructed components return their ranges()
ans = ' | '.join(str(_) for _ in self.ranges())
if ' | ' in ans:
return "(" + ans + ")"
@@ -2150,7 +2690,7 @@ def construct(self, data=None):
# the old RangeSet implementation, where we did less
# validation of the RangeSet arguments, and allowed the
# creation of 0-length RangeSets
- if args[1] - args[0] != -1:
+ if None in args or args[1] - args[0] != -1:
args = (args[0],args[1],1)
if len(args) == 3:
@@ -2159,19 +2699,37 @@ def construct(self, data=None):
# the NumericRange object. We will just discretize this
# range (mostly for backwards compatability)
start, end, step = args
- if step and int(step) != step:
- if (end >= start) ^ (step > 0):
- raise ValueError(
- "RangeSet: start, end ordering incompatible with "
- "step direction (got [%s:%s:%s])" % (start,end,step))
- n = start
- i = 0
- while (step > 0 and n <= end) or (step < 0 and n >= end):
- ranges = ranges + (NumericRange(n,n,0),)
- i += 1
- n = start + step*i
+ if step:
+ if start is None:
+ start, end = end, start
+ step *= -1
+
+ if start is None:
+ # Backwards compatability: assume unbounded RangeSet
+ # is grounded at 0
+ ranges += ( NumericRange(0, None, step),
+ NumericRange(0, None, -step) )
+ elif int(step) != step:
+ if end is None:
+ raise ValueError(
+ "RangeSet does not support unbounded ranges "
+ "with a non-integer step (got [%s:%s:%s])"
+ % (start, end, step))
+ if (end >= start) ^ (step > 0):
+ raise ValueError(
+ "RangeSet: start, end ordering incompatible with "
+ "step direction (got [%s:%s:%s])"
+ % (start, end, step))
+ n = start
+ i = 0
+ while (step > 0 and n <= end) or (step < 0 and n >= end):
+ ranges += (NumericRange(n,n,0),)
+ i += 1
+ n = start + step*i
+ else:
+ ranges += (NumericRange(start, end, step),)
else:
- ranges = ranges + (NumericRange(*args),)
+ ranges += (NumericRange(*args),)
for r in ranges:
if not isinstance(r, NumericRange):
@@ -2185,10 +2743,100 @@ def construct(self, data=None):
"specify 'finite=False' when declaring the RangeSet"
% (r,))
+ _block = self.parent_block()
+ if self._init_bounds is not None:
+ bnds = self._init_bounds(_block, None)
+ tmp = []
+ for r in ranges:
+ tmp.extend(r.range_intersection(bnds.ranges()))
+ ranges = tuple(tmp)
+
self._ranges = ranges
+ if self._init_filter is not None:
+ if not self.isfinite():
+ raise ValueError(
+ "The 'filter' keyword argument is not valid for "
+ "non-finite RangeSet component (%s)" % (self.name,))
+
+ try:
+ _filter = Initializer(self._init_filter(_block, None))
+ if _filter.constant():
+ # _init_filter was the actual filter function; use it.
+ _filter = self._init_filter
+ except:
+ # We will assume any exceptions raised when getting the
+ # filter for this index indicate that the function
+ # should have been passed directly to the underlying sets.
+ _filter = self._init_filter
+
+ # If this is a finite set, then we can go ahead and filter
+ # all the ranges. This allows pprint and len to be correct,
+ # without special handling
+ new_ranges = []
+ old_ranges = list(self.ranges())
+ old_ranges.reverse()
+ while old_ranges:
+ r = old_ranges.pop()
+ for i,val in enumerate(_FiniteRangeSetData._range_gen(r)):
+ if not _filter(_block, val):
+ split_r = r.range_difference((NumericRange(val,val,0),))
+ if len(split_r) == 2:
+ new_ranges.append(split_r[0])
+ old_ranges.append(split_r[1])
+ elif len(split_r) == 1:
+ if i == 0:
+ old_ranges.append(split_r[0])
+ else:
+ new_ranges.append(split_r[0])
+ i = None
+ break
+ if i is not None:
+ new_ranges.append(r)
+ self._ranges = new_ranges
+
+ if self._init_validate is not None:
+ if not self.isfinite():
+ raise ValueError(
+ "The 'validate' keyword argument is not valid for "
+ "non-finite RangeSet component (%s)" % (self.name,))
+
+ try:
+ _validate = Initializer(self._init_validate(_block, None))
+ if _validate.constant():
+ # _init_validate was the actual validate function; use it.
+ _validate = self._init_validate
+ except:
+ # We will assume any exceptions raised when getting the
+ # validator for this index indicate that the function
+ # should have been passed directly to the underlying set.
+ _validate = self._init_validate
+
+ for val in self:
+ try:
+ flag = _validate(_block, val)
+ except:
+ logger.error(
+ "Exception raised while validating element '%s' "
+ "for Set %s" % (val, self.name))
+ raise
+ if not flag:
+ raise ValueError(
+ "The value=%s violates the validation rule of "
+ "Set %s" % (val, self.name))
+
timer.report()
+ #
+ # Until the time that we support indexed RangeSet objects, we will
+ # mock up some of the IndexedComponent API for consistency with the
+ # previous (<=5.6.7) implementation.
+ #
+ def dim(self):
+ return 0
+ def index_set(self):
+ return UnindexedComponent_set
+
def _pprint(self):
"""
@@ -2236,8 +2884,8 @@ class AbstractFiniteSimpleRangeSet(FiniteSimpleRangeSet):
# Set Operators
############################################################################
-class _SetOperator(_SetData, Set):
- __slots__ = ('_sets','_implicit_subsets')
+class SetOperator(_SetData, Set):
+ __slots__ = ('_sets',)
def __init__(self, *args, **kwds):
_SetData.__init__(self, component=self)
@@ -2251,13 +2899,17 @@ def __init__(self, *args, **kwds):
implicit.append(_new_set)
self._sets = tuple(sets)
self._implicit_subsets = tuple(implicit)
+ # We will implicitly construct all set operators if the operands
+ # are all constructed.
+ if all(_.parent_component()._constructed for _ in self._sets):
+ self.construct()
def __getstate__(self):
"""
This method must be defined because this class uses slots.
"""
- state = super(_SetOperator, self).__getstate__()
- for i in _SetOperator.__slots__:
+ state = super(SetOperator, self).__getstate__()
+ for i in SetOperator.__slots__:
state[i] = getattr(self, i)
return state
@@ -2269,8 +2921,27 @@ def construct(self, data=None):
logger.debug("Constructing SetOperator, name=%s, from data=%r"
% (self.name, data))
for s in self._sets:
- s.construct()
- super(_SetOperator, self).construct(data)
+ s.parent_component().construct()
+ super(SetOperator, self).construct()
+ if data:
+ deprecation_warning(
+ "Providing construction data to SetOperator objects is "
+ "deprecated. This data is ignored and in a future version "
+ "will not be allowed", version='TBD')
+ fail = len(data) > 1 or None not in data
+ if not fail:
+ _data = data[None]
+ if len(_data) != len(self):
+ fail = True
+ else:
+ for v in _data:
+ if v not in self:
+ fail = True
+ break
+ if fail:
+ raise ValueError(
+ "Constructing SetOperator %s with incompatible data "
+ "(data=%s}" % (self.name, data))
timer.report()
# Note: because none of the slots on this class need to be edited,
@@ -2301,11 +2972,45 @@ def __str__(self):
return self.name
return self._expression_str()
+ def __deepcopy__(self, memo):
+ # SetOperators form an expression system. As we allow operators
+ # on abstract Set objects, it is important to *always* deepcopy
+ # SetOperators that have not been assigned to a Block. For
+ # example, consider an abstract indexed model component whose
+ # domain is specified by a Set expression:
+ #
+ # def x_init(m,i):
+ # if i == 2:
+ # return Set.Skip
+ # else:
+ # return []
+ # m.x = Set( [1,2],
+ # domain={1: m.A*m.B, 2: m.A*m.A},
+ # initialize=x_init )
+ #
+ # We do not want to automatically add all the Set operators to
+ # the model at declaration time, as m.x[2] is never actually
+ # created. Plus, doing so would require complex parsing of the
+ # initializers. BUT, we need to ensure that the operators are
+ # deepcopied, otherwise when the model is cloned before
+ # construction the operators will still refer to the sets on the
+ # original abstract model (in particular, the Set x will have an
+ # unknown dimen).
+ #
+ # Our solution is to cause SetOperators to be automatically
+ # cloned if they haven't been assigned to a block.
+ if '__block_scope__' in memo:
+ if self.parent_block() is None:
+ # Hijack the block scope rules to cause this object to
+ # be deepcopied.
+ memo['__block_scope__'][id(self)] = True
+ return super(SetOperator, self).__deepcopy__(memo)
+
def _expression_str(self):
_args = []
for arg in self._sets:
arg_str = str(arg)
- if ' ' in arg_str and isinstance(arg, _SetOperator):
+ if ' ' in arg_str and isinstance(arg, SetOperator):
arg_str = "(" + arg_str + ")"
_args.append(arg_str)
return self._operator.join(_args)
@@ -2314,9 +3019,42 @@ def isdiscrete(self):
"""Returns True if this set admits only discrete members"""
return all(r.isdiscrete() for r in self.ranges())
+ def subsets(self, expand_all_set_operators=None):
+ if not isinstance(self, SetProduct):
+ if expand_all_set_operators is None:
+ logger.warning("""
+ Extracting subsets for Set %s, which is a SetOperator
+ other than a SetProduct. Returning this set and not
+ descending into the set operands. To descend into this
+ operator, specify
+ 'subsets(expand_all_set_operators=True)' or to suppress
+ this warning, specify
+ 'subsets(expand_all_set_operators=False)'""" % ( self.name, ))
+ yield self
+ return
+ elif not expand_all_set_operators:
+ yield self
+ return
+ for s in self._sets:
+ for ss in s.subsets(
+ expand_all_set_operators=expand_all_set_operators):
+ yield ss
+
+ @property
+ @deprecated("SetProduct.set_tuple is deprecated. "
+ "Use SetProduct.subsets() to get the operator arguments.",
+ version='TBD')
+ def set_tuple(self):
+ # Despite its name, in the old SetProduct, set_tuple held a list
+ return list(self.subsets())
+
+ @property
+ def domain(self):
+ return self._domain
+
@property
def _domain(self):
- # We hijack the _domain attribute of _SetOperator so that pprint
+ # We hijack the _domain attribute of SetOperator so that pprint
# prints out the expression as the Set's "domain". Doing this
# as a property prevents the circular reference
return self
@@ -2327,10 +3065,6 @@ def _domain(self, val):
raise ValueError(
"Setting the domain of a Set Operator is not allowed: %s" % val)
- @property
- @deprecated("The 'virtual' flag is no longer supported", version='TBD')
- def virtual(self):
- return True
@staticmethod
def _checkArgs(*sets):
@@ -2346,7 +3080,7 @@ def _checkArgs(*sets):
############################################################################
-class SetUnion(_SetOperator):
+class SetUnion(SetOperator):
__slots__ = tuple()
_operator = " | "
@@ -2355,7 +3089,7 @@ def __new__(cls, *args):
if cls != SetUnion:
return super(SetUnion, cls).__new__(cls)
- set0, set1 = _SetOperator._checkArgs(*args)
+ set0, set1 = SetOperator._checkArgs(*args)
if set0[0] and set1[0]:
cls = SetUnion_OrderedSet
elif set0[1] and set1[1]:
@@ -2384,14 +3118,19 @@ def dimen(self):
class SetUnion_InfiniteSet(SetUnion):
__slots__ = tuple()
- def __contains__(self, val):
- return any(val in s for s in self._sets)
+ def get(self, val, default=None):
+ #return any(val in s for s in self._sets)
+ for s in self._sets:
+ v = s.get(val, default)
+ if v is not default:
+ return v
+ return default
class SetUnion_FiniteSet(_FiniteSetMixin, SetUnion_InfiniteSet):
__slots__ = tuple()
- def __iter__(self):
+ def _iter_impl(self):
set0 = self._sets[0]
return itertools.chain(
set0,
@@ -2457,7 +3196,7 @@ def ord(self, item):
############################################################################
-class SetIntersection(_SetOperator):
+class SetIntersection(SetOperator):
__slots__ = tuple()
_operator = " & "
@@ -2466,7 +3205,7 @@ def __new__(cls, *args):
if cls != SetIntersection:
return super(SetIntersection, cls).__new__(cls)
- set0, set1 = _SetOperator._checkArgs(*args)
+ set0, set1 = SetOperator._checkArgs(*args)
if set0[0] or set1[0]:
cls = SetIntersection_OrderedSet
elif set0[1] or set1[1]:
@@ -2510,14 +3249,19 @@ def dimen(self):
class SetIntersection_InfiniteSet(SetIntersection):
__slots__ = tuple()
- def __contains__(self, val):
- return all(val in s for s in self._sets)
+ def get(self, val, default=None):
+ #return all(val in s for s in self._sets)
+ for s in self._sets:
+ v = s.get(val, default)
+ if v is default:
+ return default
+ return v
class SetIntersection_FiniteSet(_FiniteSetMixin, SetIntersection_InfiniteSet):
__slots__ = tuple()
- def __iter__(self):
+ def _iter_impl(self):
set0, set1 = self._sets
if not set0.isordered():
if set1.isordered():
@@ -2578,7 +3322,7 @@ def ord(self, item):
############################################################################
-class SetDifference(_SetOperator):
+class SetDifference(SetOperator):
__slots__ = tuple()
_operator = " - "
@@ -2587,7 +3331,7 @@ def __new__(cls, *args):
if cls != SetDifference:
return super(SetDifference, cls).__new__(cls)
- set0, set1 = _SetOperator._checkArgs(*args)
+ set0, set1 = SetOperator._checkArgs(*args)
if set0[0]:
cls = SetDifference_OrderedSet
elif set0[1]:
@@ -2608,14 +3352,21 @@ def dimen(self):
class SetDifference_InfiniteSet(SetDifference):
__slots__ = tuple()
- def __contains__(self, val):
- return val in self._sets[0] and not val in self._sets[1]
+ def get(self, val, default=None):
+ #return val in self._sets[0] and not val in self._sets[1]
+ v_l = self._sets[0].get(val, default)
+ if v_l is default:
+ return default
+ v_r = self._sets[1].get(val, default)
+ if v_r is default:
+ return v_l
+ return default
class SetDifference_FiniteSet(_FiniteSetMixin, SetDifference_InfiniteSet):
__slots__ = tuple()
- def __iter__(self):
+ def _iter_impl(self):
set0, set1 = self._sets
return (_ for _ in set0 if _ not in set1)
@@ -2661,7 +3412,7 @@ def ord(self, item):
############################################################################
-class SetSymmetricDifference(_SetOperator):
+class SetSymmetricDifference(SetOperator):
__slots__ = tuple()
_operator = " ^ "
@@ -2670,7 +3421,7 @@ def __new__(cls, *args):
if cls != SetSymmetricDifference:
return super(SetSymmetricDifference, cls).__new__(cls)
- set0, set1 = _SetOperator._checkArgs(*args)
+ set0, set1 = SetOperator._checkArgs(*args)
if set0[0] and set1[0]:
cls = SetSymmetricDifference_OrderedSet
elif set0[1] and set1[1]:
@@ -2704,15 +3455,22 @@ def dimen(self):
class SetSymmetricDifference_InfiniteSet(SetSymmetricDifference):
__slots__ = tuple()
- def __contains__(self, val):
- return (val in self._sets[0]) ^ (val in self._sets[1])
+ def get(self, val, default=None):
+ #return (val in self._sets[0]) ^ (val in self._sets[1])
+ v_l = self._sets[0].get(val, default)
+ v_r = self._sets[1].get(val, default)
+ if v_l is default:
+ return v_r
+ if v_r is default:
+ return v_l
+ return default
class SetSymmetricDifference_FiniteSet(_FiniteSetMixin,
SetSymmetricDifference_InfiniteSet):
__slots__ = tuple()
- def __iter__(self):
+ def _iter_impl(self):
set0, set1 = self._sets
return itertools.chain(
(_ for _ in set0 if _ not in set1),
@@ -2762,7 +3520,7 @@ def ord(self, item):
############################################################################
-class SetProduct(_SetOperator):
+class SetProduct(SetOperator):
__slots__ = tuple()
_operator = "*"
@@ -2771,7 +3529,7 @@ def __new__(cls, *args):
if cls != SetProduct:
return super(SetProduct, cls).__new__(cls)
- _sets = _SetOperator._checkArgs(*args)
+ _sets = SetOperator._checkArgs(*args)
if all(_[0] for _ in _sets):
cls = SetProduct_OrderedSet
elif all(_[1] for _ in _sets):
@@ -2780,24 +3538,14 @@ def __new__(cls, *args):
cls = SetProduct_InfiniteSet
return cls.__new__(cls)
- def flatten_cross_product(self):
- # This is recursive, but the chances of a deeply nested product
- # of Sets is exceptionally low.
- for s in self._sets:
- if isinstance(s, SetProduct):
- for ss in s.flatten_cross_product():
- yield ss
- else:
- yield s
-
def ranges(self):
yield RangeProduct(list(
- list(_.ranges()) for _ in self.flatten_cross_product()
+ list(_.ranges()) for _ in self.subsets(False)
))
def bounds(self):
- return ( tuple(_.bounds()[0] for _ in self.flatten_cross_product()),
- tuple(_.bounds()[1] for _ in self.flatten_cross_product()) )
+ return ( tuple(_.bounds()[0] for _ in self.subsets(False)),
+ tuple(_.bounds()[1] for _ in self.subsets(False)) )
@property
def dimen(self):
@@ -2818,12 +3566,30 @@ def dimen(self):
ans += s_dim
return UnknownSetDimen if _unknown else ans
+ def _flatten_product(self, val):
+ """Flatten any nested set product terms (due to nested products)
+
+ Note that because this is called in a recursive context, this
+ method is assured that there is no more than a single level of
+ nested tuples (so this only needs to check the top-level terms)
+
+ """
+ for i in xrange(len(val)-1, -1, -1):
+ if val[i].__class__ is tuple:
+ val = val[:i] + val[i] + val[i+1:]
+ return val
class SetProduct_InfiniteSet(SetProduct):
__slots__ = tuple()
- def __contains__(self, val):
- return self._find_val(val) is not None
+ def get(self, val, default=None):
+ #return self._find_val(val) is not None
+ v = self._find_val(val)
+ if v is None:
+ return default
+ if normalize_index.flatten:
+ return self._flatten_product(v[0])
+ return v[0]
def _find_val(self, val):
"""Locate a value in this SetProduct
@@ -2863,6 +3629,12 @@ def _find_val(self, val):
# Get the dimentionality of all the component sets
setDims = list(s.dimen for s in self._sets)
+
+ # For this search, if a subset has an unknown dimension, assume
+ # it is "None".
+ for i,d in enumerate(setDims):
+ if d is UnknownSetDimen:
+ setDims[i] = None
# Find the starting index for each subset (based on dimentionality)
index = [None]*len(setDims)
lastIndex = 0
@@ -2884,7 +3656,10 @@ def _find_val(self, val):
# If there were no non-dimentioned sets, then we have checked
# each subset, found a match, and can reach a verdict:
if None not in setDims:
- return val, index
+ if lastIndex == v_len:
+ return val, index
+ else:
+ return None
# If a subset is non-dimentioned, then we will have broken out
# of the forward loop early. Start at the end and work
@@ -2968,13 +3743,13 @@ def _cutPointGenerator(subsets, val_len):
class SetProduct_FiniteSet(_FiniteSetMixin, SetProduct_InfiniteSet):
__slots__ = tuple()
- def __iter__(self):
+ def _iter_impl(self):
_iter = itertools.product(*self._sets)
# Note: if all the member sets are simple 1-d sets, then there
- # is no need to call flatten_tuple.
+ # is no need to call flatten_product.
if FLATTEN_CROSS_PRODUCT and normalize_index.flatten \
and self.dimen != len(self._sets):
- return (flatten_tuple(_) for _ in _iter)
+ return (self._flatten_product(_) for _ in _iter)
return _iter
def __len__(self):
@@ -2983,7 +3758,7 @@ def __len__(self):
"""
ans = 1
for s in self._sets:
- ans *= max(1, len(s))
+ ans *= max(0, len(s))
return ans
@@ -3002,7 +3777,7 @@ def __getitem__(self, index):
ans = tuple(s[i+1] for s,i in zip(self._sets, _ord))
if FLATTEN_CROSS_PRODUCT and normalize_index.flatten \
and self.dimen != len(ans):
- return flatten_tuple(ans)
+ return self._flatten_product(ans)
return ans
def ord(self, item):
@@ -3036,11 +3811,17 @@ def ord(self, item):
class _AnySet(_SetData, Set):
def __init__(self, **kwds):
_SetData.__init__(self, component=self)
+ # There is a chicken-and-egg game here: the SetInitializer uses
+ # Any as part of the processing of the domain/within/bounds
+ # domain restrictions. However, Any has not been declared when
+ # constructing Any, so we need to bypass that logic. This
+ # works, but requires us to declare a special domain setter to
+ # accept (and ignore) this value.
kwds.setdefault('domain', self)
Set.__init__(self, **kwds)
- def __contains__(self, val):
- return True
+ def get(self, val, default=None):
+ return val
def ranges(self):
yield AnyRange()
@@ -3048,12 +3829,74 @@ def ranges(self):
def bounds(self):
return (None, None)
+ # We need to implement this to override the clear() from IndexedComponent
+ def clear(self):
+ return
+
+ # We need to implement this to override __len__ from IndexedComponent
+ def __len__(self):
+ raise TypeError("object of type 'Any' has no len()")
+
@property
def dimen(self):
return None
+ @property
+ def domain(self):
+ return Any
+
+ def __str__(self):
+ if self.parent_block() is not None:
+ return self.name
+ return type(self).__name__
+
+
+class _AnyWithNoneSet(_AnySet):
+ # Note that we put the deprecation warning on contains() and not on
+ # the class because we will always create a global instance for
+ # backwards compatability with the Book.
+ @deprecated("The AnyWithNone set is deprecated. "
+ "Use Any, which includes None", version='TBD')
+ def get(self, val, default=None):
+ return super(_AnyWithNoneSet, self).get(val, default)
+
-def DeclareGlobalSet(obj):
+class _EmptySet(_FiniteSetMixin, _SetData, Set):
+ def __init__(self, **kwds):
+ _SetData.__init__(self, component=self)
+ Set.__init__(self, **kwds)
+
+ def get(self, val, default=None):
+ return default
+
+ # We need to implement this to override clear from IndexedComponent
+ def clear(self):
+ pass
+
+ # We need to implement this to override __len__ from IndexedComponent
+ def __len__(self):
+ return 0
+
+ def _iter_impl(self):
+ return iter(tuple())
+
+ @property
+ def dimen(self):
+ return 0
+
+ @property
+ def domain(self):
+ return EmptySet
+
+ def __str__(self):
+ if self.parent_block() is not None:
+ return self.name
+ return type(self).__name__
+
+
+############################################################################
+
+def DeclareGlobalSet(obj, caller_globals=None):
"""Declare a copy of a set as a global set in the calling module
This takes a Set object and declares a duplicate of it as a
@@ -3067,7 +3910,33 @@ def DeclareGlobalSet(obj):
"""
obj.construct()
- class GlobalSet(obj.__class__):
+ assert obj.parent_component() is obj
+ assert obj.parent_block() is None
+
+ # Build the global set before registering its name so that we don't
+ # run afoul of the logic in GlobalSet.__new__
+ _name = obj.local_name
+ if _name in GlobalSets and obj is not GlobalSets[_name]:
+ raise RuntimeError("Duplicate Global Set declaration, %s"
+ % (_name,))
+
+ # Push this object into the caller's module namespace
+ # Stack: 0: DeclareGlobalSet()
+ # 1: the caller
+ if caller_globals is None:
+ caller_globals = inspect.currentframe().f_back.f_globals
+ if _name in caller_globals and obj is not caller_globals[_name]:
+ raise RuntimeError("Refusing to overwrite global object, %s"
+ % (_name,))
+
+ if _name in GlobalSets:
+ _set = caller_globals[_name] = GlobalSets[_name]
+ return _set
+
+ # Handle duplicate registrations before defining the GlobalSet
+ # object to avoid inconsistent MRO order.
+
+ class GlobalSet(GlobalSetBase, obj.__class__):
__doc__ = """%s
References to this object will not be duplicated by deepcopy
@@ -3075,95 +3944,178 @@ class GlobalSet(obj.__class__):
""" % (obj.doc,)
# Note: a simple docstring does not appear to be picked up (at
- # least in Python 2.7, so we will explicitly set the __doc__
+ # least in Python 2.7), so we will explicitly set the __doc__
# attribute.
__slots__ = ()
- def __init__(self, _obj):
- _obj.__class__.__setstate__(self, _obj.__getstate__())
- self._component = weakref.ref(self)
- self.construct()
- assert _obj.parent_component() is _obj
- assert _obj.parent_block() is None
- caller_globals = inspect.stack()[1][0].f_globals
- assert self.local_name not in caller_globals
- caller_globals[self.local_name] = self
-
- def __reduce__(self):
- # Cause pickle to preserve references to this object
- return self.name
-
- def __deepcopy__(self, memo):
- # Prevent deepcopy from duplicating this object
- return self
+ global_name = None
+
+ def __new__(cls, **kwds):
+ """Hijack __new__ to mock up old RealSet el al. interface
+
+ In the original Set implementation (Pyomo<=5.6.7), the
+ global sets were instances of their own virtual set classes
+ (RealSet, IntegerSet, BooleanSet), and one could create new
+ instances of those sets with modified bounds. Since the
+ GlobalSet mechanism also declares new classes for every
+ GlobalSet, we can mock up the old behavior through how we
+ handle __new__().
+ """
+ if cls is GlobalSet and GlobalSet.global_name \
+ and issubclass(GlobalSet, RangeSet):
+ base_set = GlobalSets[GlobalSet.global_name]
+ bounds = kwds.pop('bounds', None)
+ range_init = SetInitializer(base_set)
+ if bounds is not None:
+ range_init.intersect(BoundsInitializer(bounds))
+ name = name_kwd = kwds.pop('name', None)
+ cls_name = kwds.pop('class_name', None)
+ if name is None:
+ if cls_name is None:
+ name = base_set.name
+ else:
+ name = cls_name
+ ans = RangeSet( ranges=list(range_init(None, None).ranges()),
+ name=name )
+ if name_kwd is None and (
+ cls_name is not None or bounds is not None):
+ ans._name += str(ans.bounds())
+ else:
+ ans = super(GlobalSet, cls).__new__(cls, **kwds)
+ if kwds:
+ raise RuntimeError("Unexpected keyword arguments: %s" % (kwds,))
+ return ans
- def __str__(self):
- # Override str() to always print out the global set name
- return self.name
+ _set = GlobalSet()
+ # TODO: Can GlobalSets be a proper Block?
+ GlobalSets[_name] = caller_globals[_name] = _set
+ GlobalSet.global_name = _name
- return GlobalSet(obj)
+ _set.__class__.__setstate__(_set, obj.__getstate__())
+ _set._component = weakref.ref(_set)
+ _set.construct()
+ return _set
DeclareGlobalSet(_AnySet(
name='Any',
doc="A global Pyomo Set that admits any value",
-))
+), globals())
+DeclareGlobalSet(_AnyWithNoneSet(
+ name='AnyWithNone',
+ doc="A global Pyomo Set that admits any value",
+), globals())
+DeclareGlobalSet(_EmptySet(
+ name='EmptySet',
+ doc="A global Pyomo Set that contains no members",
+), globals())
DeclareGlobalSet(RangeSet(
name='Reals',
doc='A global Pyomo Set that admits any real (floating point) value',
ranges=(NumericRange(None,None,0),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NonNegativeReals',
doc='A global Pyomo Set admitting any real value in [0, +inf]',
ranges=(NumericRange(0,None,0),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NonPositiveReals',
doc='A global Pyomo Set admitting any real value in [-inf, 0]',
ranges=(NumericRange(None,0,0),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NegativeReals',
doc='A global Pyomo Set admitting any real value in [-inf, 0)',
ranges=(NumericRange(None,0,0,(True,False)),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='PositiveReals',
doc='A global Pyomo Set admitting any real value in (0, +inf]',
ranges=(NumericRange(0,None,0,(False,True)),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='Integers',
doc='A global Pyomo Set admitting any integer value',
ranges=(NumericRange(0,None,1), NumericRange(0,None,-1)),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NonNegativeIntegers',
doc='A global Pyomo Set admitting any integer value in [0, +inf]',
ranges=(NumericRange(0,None,1),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NonPositiveIntegers',
doc='A global Pyomo Set admitting any integer value in [-inf, 0]',
ranges=(NumericRange(0,None,-1),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='NegativeIntegers',
doc='A global Pyomo Set admitting any integer value in [-inf, -1]',
ranges=(NumericRange(-1,None,-1),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='PositiveIntegers',
doc='A global Pyomo Set admitting any integer value in [1, +inf]',
ranges=(NumericRange(1,None,1),),
-))
+), globals())
DeclareGlobalSet(RangeSet(
name='Binary',
doc='A global Pyomo Set admitting the integers {0, 1}',
ranges=(NumericRange(0,1,1),),
-))
+), globals())
+
+#TODO: Convert Boolean from an alias for Binary to a proper Boolean Set
+# admitting {True, False})
+DeclareGlobalSet(RangeSet(
+ name='Boolean',
+ doc='A global Pyomo Set admitting the integers {0, 1}',
+ ranges=(NumericRange(0,1,1),),
+), globals())
+
+DeclareGlobalSet(RangeSet(
+ name='PercentFraction',
+ doc='A global Pyomo Set admitting any real value in [0, 1]',
+ ranges=(NumericRange(0,1,0),),
+), globals())
+DeclareGlobalSet(RangeSet(
+ name='UnitInterval',
+ doc='A global Pyomo Set admitting any real value in [0, 1]',
+ ranges=(NumericRange(0,1,0),),
+), globals())
+
+# DeclareGlobalSet(Set(
+# initialize=[None],
+# name='UnindexedComponent_set',
+# doc='A global Pyomo Set for unindexed (scalar) IndexedComponent objects',
+# ), globals())
+
+
+RealSet = Reals.__class__
+IntegerSet = Integers.__class__
+BinarySet = Binary.__class__
+BooleanSet = Boolean.__class__
+
+
+#
+# Backwards compatibility: declare the RealInterval and IntegerInterval
+# classes (leveraging the new global RangeSet objects)
+#
+
+class RealInterval(RealSet):
+ @deprecated("RealInterval has been deprecated. Please use "
+ "RangeSet(lower, upper, 0)", version='TBD')
+ def __new__(cls, **kwds):
+ kwds.setdefault('class_name', 'RealInterval')
+ return super(RealInterval, cls).__new__(RealSet, **kwds)
+
+class IntegerInterval(IntegerSet):
+ @deprecated("IntegerInterval has been deprecated. Please use "
+ "RangeSet(lower, upper, 1)", version='TBD')
+ def __new__(cls, **kwds):
+ kwds.setdefault('class_name', 'IntegerInterval')
+ return super(IntegerInterval, cls).__new__(IntegerSet, **kwds)
diff --git a/pyomo/core/base/set_types.py b/pyomo/core/base/set_types.py
index b2a8a26a3b5..aa2a19f458d 100644
--- a/pyomo/core/base/set_types.py
+++ b/pyomo/core/base/set_types.py
@@ -8,51 +8,12 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from pyomo.core.kernel.set_types import *
-from pyomo.core.kernel.set_types import (_VirtualSet,
- _virtual_sets)
-from pyomo.core.base.sets import SimpleSet
+from pyomo.core.base.set import (
+ Reals, PositiveReals, NonPositiveReals, NegativeReals, NonNegativeReals,
+ Integers, PositiveIntegers, NonPositiveIntegers,
+ NegativeIntegers, NonNegativeIntegers,
+ Boolean, Binary,
+ Any, AnyWithNone, EmptySet, UnitInterval, PercentFraction,
+ RealInterval, IntegerInterval,
+)
-# we probably do not need _VirtualSet as a base class in this case
-class _AMLVirtualSet(SimpleSet, _VirtualSet):
- def __init__(self, *args, **kwds):
- SimpleSet.__init__(self, *args, **kwds)
- self.virtual=True
- self.concrete=False
-
- def data(self):
- raise TypeError("Cannot access data for a virtual set")
-
-class _AnySet(_AMLVirtualSet):
- """A virtual set that allows any value"""
-
- def __init__(self,*args,**kwds):
- """Constructor"""
- _AMLVirtualSet.__init__(self,*args,**kwds)
-
- def __contains__(self, element):
- return True
-
-class _EmptySet(_AMLVirtualSet):
- """A virtual set that allows no values"""
-
- def __init__(self,*args,**kwds):
- """Constructor"""
- _AMLVirtualSet.__init__(self,*args,**kwds)
-
- def __contains__(self, element):
- return False
-
-class _AnySetWithNone(_AnySet):
- """A virtual set that allows any value (including None)"""
-
- def __contains__(self, element):
- logger.warning("DEPRECATION WARNING: Use the Any set instead of AnyWithNone")
- return True
-
-#
-# Concrete instances of the standard sets
-#
-Any=_AnySet(name="Any", doc="A set of any data")
-EmptySet=_EmptySet(name="EmptySet", doc="A set of no data")
-AnyWithNone=_AnySetWithNone(name="AnyWithNone", doc="A set of any data (including None)")
diff --git a/pyomo/core/base/sets.py b/pyomo/core/base/sets.py
index fa0c79802e3..42c2d11e1c2 100644
--- a/pyomo/core/base/sets.py
+++ b/pyomo/core/base/sets.py
@@ -14,1831 +14,13 @@
__all__ = ['Set', 'set_options', 'simple_set_rule', 'SetOf']
-import logging
-import sys
-import types
-import copy
-import itertools
-from weakref import ref as weakref_ref
-
-from pyutilib.misc import flatten_tuple as pyutilib_misc_flatten_tuple
-
-from pyomo.common.timing import ConstructionTimer
-from pyomo.core.base.misc import apply_indexed_rule, \
- apply_parameterized_indexed_rule, sorted_robust
-from pyomo.core.base.plugin import ModelComponentFactory
-from pyomo.core.base.component import Component, ComponentData
-from pyomo.core.base.indexed_component import IndexedComponent, \
- UnindexedComponent_set
-from pyomo.core.base.numvalue import native_numeric_types
-
-from six import itervalues, iteritems, string_types
-from six.moves import xrange
-
-logger = logging.getLogger('pyomo.core')
-
-def process_setarg(arg):
- """
- Process argument and return an associated set object.
-
- This method is used by IndexedComponent
- """
- import pyomo.core.base.set as new_set
- if isinstance(arg, (_SetDataBase, new_set._SetDataBase)):
- # Argument is a non-indexed Set instance
- return arg
- elif isinstance(arg,IndexedSet):
- # Argument is an indexed Set instance
- raise TypeError("Cannot index a component with an indexed set")
- elif isinstance(arg,Component):
- # Argument is some other component
- raise TypeError("Cannot index a component with a non-set "
- "component: %s" % (arg.name))
- else:
- try:
- #
- # If the argument has a set_options attribute, then use
- # it to initialize a set
- #
- options = getattr(arg,'set_options')
- options['initialize'] = arg
- return Set(**options)
- except:
- pass
- # Argument is assumed to be an initialization function
- return Set(initialize=arg)
-
-
-def set_options(**kwds):
- """
- This is a decorator for set initializer functions. This
- decorator allows an arbitrary dictionary of values to passed
- through to the set constructor.
-
- Examples:
- @set_options(dimen=3)
- def B_index(model):
- return [(i,i+1,i*i) for i in model.A]
-
- @set_options(domain=Integers)
- def B_index(model):
- return range(10)
- """
- def decorator(func):
- func.set_options = kwds
- return func
- return decorator
-
-
-def simple_set_rule( fn ):
- """
- This is a decorator that translates None into Set.End.
- This supports a simpler syntax in set rules, though these can be
- more difficult to debug when errors occur.
-
- Example:
-
- @simple_set_rule
- def A_rule(model, i, j):
- ...
- """
-
- def wrapper_function ( *args, **kwargs ):
- value = fn( *args, **kwargs )
- if value is None:
- return Set.End
- return value
- return wrapper_function
-
-
-def _value_sorter(self, obj):
- """Utility to sort the values of a Set.
-
- This returns the values of the Set in a consistent order. For
- ordered Sets, simply return the ordered list. For unordered Sets,
- first try the standard sorted order, and if that fails (for example
- with mixed-type Sets in Python3), use the sorted_robust utility to
- generate sortable keys.
-
- """
- if self.ordered:
- return obj.value_list
- else:
- return sorted_robust(obj)
-
-
-# A trivial class that we can use to test if an object is a "legitimate"
-# set (either SimpleSet, or a member of an IndexedSet)
-class _SetDataBase(ComponentData):
- __slots__ = tuple()
-
-
-class _SetData(_SetDataBase):
- """
- This class defines the data for an unordered set.
-
- Constructor Arguments:
- owner The Set object that owns this data.
- bounds A tuple of bounds for set values: (lower, upper)
-
- Public Class Attributes:
- value_list The list of values
- value The set of values
- _bounds The tuple of bound values
- """
-
- __slots__ = ('value_list', 'value', '_bounds')
-
- def __init__(self, owner, bounds):
- #
- # The following is equivalent to calling
- # the base ComponentData constructor.
- #
- self._component = weakref_ref(owner)
- #
- self._clear()
- self._bounds = bounds
-
- def __getstate__(self):
- """
- This method must be defined because this class uses slots.
- """
- state = super(_SetData, self).__getstate__()
- for i in _SetData.__slots__:
- state[i] = getattr(self, i)
- return state
-
- # Note: because None of the slots on this class need to be edited,
- # we don't need to implement a specialized __setstate__ method.
-
- def __getitem__(self, key):
- """
- Return the specified member of the set.
-
- This method generates an exception because the set is unordered.
- """
- raise ValueError("Cannot index an unordered set '%s'" % self._component().name)
-
- def bounds(self):
- """
- Return bounds information. The default value is 'None', which
- indicates that this set does not contain bounds. Otherwise, this is
- assumed to be a tuple: (lower, upper).
- """
- return self._bounds
-
- def data(self):
- """
- The underlying set data.
-
- Note that this method is preferred to the direct use of the
- 'value' attribute in most cases. The reason is that the
- underlying set values may not be stored as a Python set() object.
- In fact, the underlying set values may not be explicitly stored
- in the Set() object at all!
- """
- return self.value
-
- def _clear(self):
- """
- Reset the set data
- """
- self.value = set()
- self.value_list = []
-
- def _add(self, val, verify=True):
- """
- Add an element, and optionally verify that it is a valid type.
-
- The type verification is done by the owning component.
- """
- if verify:
- self._component()._verify(val)
- if not val in self.value:
- self.value.add(val)
- self.value_list.append(val)
-
- def _discard(self, val):
- """
- Discard an element of this set. This does not return an error
- if the element does not already exist.
-
- NOTE: This operation is probably expensive, as it should require a walk through a list. An
- OrderedDict object might be more efficient, but it's notoriously slow in Python 2.x
-
- NOTE: We could make this more efficient by mimicing the logic in the _OrderedSetData class.
- But that would make the data() method expensive (since it is creating a set). It's
- not obvious which is the better choice.
- """
- try:
- self.value.remove(val)
- self.value_list.remove(val)
- except KeyError:
- pass
-
- def __len__(self):
- """
- Return the number of elements in the set.
- """
- return len(self.value)
-
- def __iter__(self):
- """
- Return an iterator for the set.
- """
- return self.value_list.__iter__()
-
- def __contains__(self, val):
- """
- Return True if the set contains a given value.
- """
- return val in self.value
-
-
-class _OrderedSetData(_SetDataBase):
- """
- This class defines the data for an ordered set.
-
- Constructor Arguments:
- owner The Set object that owns this data.
- bounds A tuple of bounds for set values: (lower, upper)
-
- Public Class Attributes:
- value The set values
- _bounds The tuple of bound values
- order_dict A dictionary that maps from element value to element id.
- Indices in this dictionary start with 1 (not 0).
-
- The ordering supported in this class depends on the 'ordered' attribute
- of the owning component:
- InsertionOrder The order_dict maps from the insertion order
- back to the member of the value array.
- SortedOrder The ordered attribute of the owning component can
- be used to define the sort order. By default,
- the Python ordering of the set types is used.
- Note that a _stable_ sort method is required
- if the discard method is used.
- """
-
- __slots__ = ('value', 'value_list', 'order_dict', '_bounds', '_is_sorted')
-
- def __init__(self, owner, bounds):
- #
- # The following is equivalent to calling
- # the base ComponentData constructor.
- #
- self._component = weakref_ref(owner)
- #
- self._bounds = bounds
- if self.parent_component().ordered is Set.InsertionOrder:
- self._is_sorted = 0
- else:
- self._is_sorted = 1
- self._clear()
-
- def __getstate__(self):
- """
- This method must be defined because this class uses slots.
- """
- state = super(_OrderedSetData, self).__getstate__()
- for i in _OrderedSetData.__slots__:
- state[i] = getattr(self, i)
- return state
-
- # Note: because None of the slots on this class need to be edited,
- # we don't need to implement a specialized __setstate__ method.
-
- def bounds(self):
- """
- Return bounds information. The default value is 'None', which
- indicates that this set does not contain bounds. Otherwise, this is
- assumed to be a tuple: (lower, upper).
- """
- return self._bounds
-
- def data(self):
- """
- Return the underlying set data.
-
- Note that this method returns a value that is different from the
- 'value' attribute. The underlying set values are not be stored
- as a Python set() object.
- """
- return self.value
-
- def _sort(self):
- """
- Sort the set using the 'ordered' attribute of the owning
- component. This recreates the order_dict dictionary, which indicates
- that the set is sorted.
- """
- _sorter = self.parent_component().ordered
- self.value_list = sorted(
- self.value_list,
- key=None if _sorter is Set.SortedOrder else _sorter
- )
- self.order_dict = {j:i for i,j in enumerate(self.value_list)}
- self._is_sorted = 1
-
- def _clear(self):
- """
- Reset the set data
- """
- self.value = set()
- self.value_list = []
- self.order_dict = {}
- if self._is_sorted:
- self._is_sorted = 1
-
- def _add(self, val, verify=True):
- """
- Add an element, and optionally verify that it is a valid type.
-
- The type verification is done by the owning component.
- """
- if verify:
- self._component()._verify(val)
- self.order_dict[val] = len(self.value_list)
- self.value_list.append(val)
- self.value.add(val)
- if self._is_sorted:
- self._is_sorted = 2
-
- def _discard(self, val):
- """
- Discard an element of this set. This does not return an error
- if the element does not already exist.
- """
- try:
- _id = self.order_dict.pop(val)
- except KeyError:
- return
- del self.value_list[_id]
- self.value.remove(val)
- #
- # Update the order_dict: this assumes the user-specified sorter
- # (if one was used) is stable.
- #
- for i in xrange(_id,len(self.value_list)):
- self.order_dict[self.value_list[i]] = i
-
- def __len__(self):
- """
- Return the number of elements in the set.
- """
- return len(self.value_list)
-
- def __iter__(self):
- """
- Return an iterator for the set.
- """
- if self._is_sorted == 2:
- self._sort()
- return self.value_list.__iter__()
-
- def __contains__(self, val):
- """
- Return True if the set contains a given value.
- """
- return val in self.order_dict
-
- def first(self):
- """
- Return the first element of the set.
- """
- if self._is_sorted == 2:
- self._sort()
- return self[1]
-
- def last(self):
- """
- Return the last element of the set.
- """
- if self._is_sorted == 2:
- self._sort()
- return self[len(self)]
-
- def __getitem__(self, idx):
- """
- Return the specified member of the set.
-
- The public Set API is 1-based, even though the
- internal order_dict is (pythonically) 0-based.
- """
- if self._is_sorted == 2:
- self._sort()
- if idx >= 1:
- if idx > len(self):
- raise IndexError("Cannot index a RangeSet past the last element")
- return self.value_list[idx-1]
- elif idx < 0:
- if len(self)+idx < 0:
- raise IndexError("Cannot index a RangeSet past the first element")
- return self.value_list[idx]
- else:
- raise IndexError("Valid index values for sets are 1 .. len(set) or -1 .. -len(set)")
-
-
- def ord(self, match_element):
- """
- Return the position index of the input value. The
- position indices start at 1.
- """
- if self._is_sorted == 2:
- self._sort()
- try:
- return self.order_dict[match_element] + 1
- except IndexError:
- raise IndexError("Unknown input element="+str(match_element)+" provided as input to ord() method for set="+self.name)
-
- def next(self, match_element, k=1):
- """
- Return the next element in the set. The default
- behavior is to return the very next element. The k
- option can specify how many steps are taken to get
- the next element.
-
- If the next element is beyond the end of the set,
- then an exception is raised.
- """
- try:
- element_position = self.ord(match_element)
- except IndexError:
- raise KeyError("Cannot obtain next() member of set="+self.name+"; input element="+str(match_element)+" is not a member of the set!")
- #
- try:
- return self[element_position+k]
- except KeyError:
- raise KeyError("Cannot obtain next() member of set="+self.name+"; failed to access item in position="+str(element_position+k))
-
- def nextw(self, match_element, k=1):
- """
- Return the next element in the set. The default
- behavior is to return the very next element. The k
- option can specify how many steps are taken to get
- the next element.
-
- If the next element goes beyond the end of the list
- of elements in the set, then this wraps around to
- the beginning of the list.
- """
- try:
- element_position = self.ord(match_element)
- except KeyError:
- raise KeyError("Cannot obtain nextw() member of set="+self.name+"; input element="+str(match_element)+" is not a member of the set!")
- #
- return self[(element_position+k-1) % len(self.value_list) + 1]
-
- def prev(self, match_element, k=1):
- """
- Return the previous element in the set. The default
- behavior is to return the element immediately prior
- to the specified element. The k option can specify
- how many steps are taken to get the previous
- element.
-
- If the previous element is before the start of the
- set, then an exception is raised.
- """
- return self.next(match_element, k=-k)
-
- def prevw(self, match_element, k=1):
- """
- Return the previous element in the set. The default
- behavior is to return the element immediately prior
- to the specified element. The k option can specify
- how many steps are taken to get the previous
- element.
-
- If the previous element is before the start of the
- set, then this wraps around to the end of the list.
- """
- return self.nextw(match_element, k=-k)
-
-class _IndexedSetData(_SetData):
- """
- This class adds the __call__ method, which is expected
- for indexed component data. But we omit this from
- _SetData because we do not want to treat scalar sets as
- functors.
- """
-
- __slots__ = tuple()
-
- def __call__(self):
- """
- Return the underlying set data.
- """
- return self.data()
-
- def clear(self):
- """
- Reset this data.
- """
- self._clear()
-
- def add(self, val):
- """
- Add an element to the set.
- """
- self._add(val)
-
- def discard(self, val):
- """
- Discard an element from the set.
- """
- self._discard(val)
-
-
-class _IndexedOrderedSetData(_OrderedSetData):
- """
- This class adds the __call__ method, which is expected
- for indexed component data. But we omit this from
- _OrderedSetData because we do not want to treat scalar
- sets as functors.
- """
-
- __slots__ = tuple()
-
- def __call__(self):
- """
- Return the underlying set data.
- """
- return self.data()
-
- def clear(self):
- """
- Reset this data.
- """
- self._clear()
-
- def add(self, val):
- """
- Add an element to the set.
- """
- self._add(val)
-
- def discard(self, val):
- """
- Discard an element from the set.
- """
- self._discard(val)
-
-
-@ModelComponentFactory.register("Set data that is used to define a model instance.")
-class Set(IndexedComponent):
- """
- A set object that is used to index other Pyomo objects.
-
- This class has a similar look-and-feel as a Python set class.
- However, the set operations defined in this class return another
- abstract Set object. This class contains a concrete set, which
- can be initialized by the load() method.
-
- Constructor Arguments:
- name
- The name of the set
- doc
- A text string describing this component
- within
- A set that defines the type of values that can be
- contained in this set
- domain
- A set that defines the type of values that can be
- contained in this set
- initialize
- A dictionary or rule for setting up this set with
- existing model data
- validate
- A rule for validating membership in this set. This
- has the functional form: f(data) -> bool, and
- returns true if the data belongs in the set
- dimen
- Specify the set's arity, or None if no arity is enforced
- virtual
- If true, then this is a virtual set that does not
- store data using the class dictionary
- bounds
- A 2-tuple that specifies the range of possible set values.
- ordered
- Specifies whether the set is ordered. Possible values are
-
- * False: Unordered
- * True: Ordered by insertion order
- * InsertionOrder: Ordered by insertion order
- * SortedOrder: Ordered by sort order
- * : Ordered with this comparison function
- filter
- A function that is used to filter set entries.
-
- Public class attributes:
- concrete
- If True, then this set contains elements.(TODO)
- dimen
- The dimension of the data in this set.
- doc
- A text string describing this component
- domain
- A set that defines the type of values that can be
- contained in this set
- filter
- A function that is used to filter set entries.
- initialize
- A dictionary or rule for setting up this set with
- existing model data
- ordered
- Specifies whether the set is ordered.
- validate
- A rule for validating membership in this set.
- virtual
- If True, then this set does not store data using
- the class dictionary
- """
-
- End = (1003,)
- InsertionOrder = (1004,)
- SortedOrder = (1005,)
-
- def __new__(cls, *args, **kwds):
- if cls != Set:
- return super(Set, cls).__new__(cls)
- if not args or (args[0] is UnindexedComponent_set and len(args)==1):
- if kwds.get('ordered',False) is False:
- return SimpleSet.__new__(SimpleSet)
- else:
- return OrderedSimpleSet.__new__(OrderedSimpleSet)
- else:
- return IndexedSet.__new__(IndexedSet)
-
- def __init__(self, *args, **kwds):
- #
- # Default keyword values
- #
- kwds.setdefault("name", "_unknown_")
- self.initialize = kwds.pop("rule", None)
- self.initialize = kwds.pop("initialize", self.initialize)
- self.validate = kwds.pop("validate", None)
- self.ordered = kwds.pop("ordered", False)
- self.filter = kwds.pop("filter", None)
- self.domain = kwds.pop("within", None)
- self.domain = kwds.pop('domain', self.domain )
- #
- if self.ordered is True:
- self.ordered = Set.InsertionOrder
-
- # We can't access self.dimen after its been written, so we use
- # tmp_dimen until the end of __init__
- tmp_dimen = 0
-
- # Get dimen from domain, if possible
- if self.domain is not None:
- tmp_dimen = getattr(self.domain, 'dimen', 0)
- if self._bounds is None and not self.domain is None:
- self._bounds = copy.copy(self.domain._bounds)
-
- # Make sure dimen and implied dimensions don't conflict
- kwd_dimen = kwds.pop("dimen", 0)
- if kwd_dimen != 0:
- if self.domain is not None and tmp_dimen != kwd_dimen:
- raise ValueError(\
- ("Value of keyword 'dimen', %s, differs from the " + \
- "dimension of the superset '%s', %s") % \
- (str(kwd_dimen), str(self.domain.name), str(tmp_dimen)))
- else:
- tmp_dimen = kwd_dimen
-
- kwds.setdefault('ctype', Set)
- IndexedComponent.__init__(self, *args, **kwds)
-
- if tmp_dimen == 0:
- # We set the default to 1
- tmp_dimen = 1
- if self.initialize is not None:
- #
- # Convert initialization value to a list (which are
- # copyable). There are subtlies here: dict should be left
- # alone (as dict's are used for initializing indezed Sets),
- # and lists should be left alone (for efficiency). tuples,
- # generators, and iterators like dict.keys() [in Python 3.x]
- # should definitely be converted to lists.
- #
- if type(self.initialize) is tuple \
- or ( hasattr(self.initialize, "__iter__")
- and not hasattr(self.initialize, "__getitem__") ):
- self.initialize = list(self.initialize)
- #
- # Try to guess dimen from the initialize list
- #
- if not tmp_dimen is None:
- tmp=0
- if type(self.initialize) is tuple:
- tmp = len(self.initialize)
- elif type(self.initialize) is list and len(self.initialize) > 0 \
- and type(self.initialize[0]) is tuple:
- tmp = len(self.initialize[0])
- else:
- tmp = getattr(self.initialize, 'dimen', tmp)
- if tmp != 0:
- if kwd_dimen != 0 and tmp != kwd_dimen:
- raise ValueError("Dimension argument differs from the data in the initialize list")
- tmp_dimen = tmp
-
- self.dimen = tmp_dimen
-
- def _verify(self, element):
- """
- Verify that the element is valid for this set.
- """
- if self.domain is not None and element not in self.domain:
- raise ValueError(
- "The value=%s is not valid for set=%s\n"
- "because it is not within the domain=%s"
- % ( element, self.name, self.domain.name ) )
- if self.validate is not None:
- flag = False
- try:
- if self._parent is not None:
- flag = apply_indexed_rule(self, self.validate, self._parent(), element)
- else:
- flag = apply_indexed_rule(self, self.validate, None, element)
- except:
- pass
- if not flag:
- raise ValueError("The value="+str(element)+" violates the validation rule of set="+self.name)
- if not self.dimen is None:
- if self.dimen > 1 and type(element) is not tuple:
-
- raise ValueError("The value="+str(element)+" is not a tuple for set="+self.name+", which has dimen="+str(self.dimen))
- elif self.dimen == 1 and type(element) is tuple:
- raise ValueError("The value="+str(element)+" is a tuple for set="+self.name+", which has dimen="+str(self.dimen))
- elif type(element) is tuple and len(element) != self.dimen:
- raise ValueError("The value="+str(element)+" does not have dimension="+str(self.dimen)+", which is needed for set="+self.name)
- return True
-
-
-class SimpleSetBase(Set):
- """
- A derived Set object that contains a single set.
- """
-
- def __init__(self, *args, **kwds):
- self.virtual = kwds.pop("virtual", False)
- self.concrete = not self.virtual
- Set.__init__(self, *args, **kwds)
-
- def valid_model_component(self):
- """
- Return True if this can be used as a model component.
- """
- if self.virtual and not self.concrete:
- return False
- return True
-
- def clear(self):
- """
- Clear that data in this component.
- """
- if self.virtual:
- raise TypeError("Cannot clear virtual set object `"+self.name+"'")
- self._clear()
-
- def check_values(self):
- """
- Verify that the values in this set are valid.
- """
- if not self.concrete:
- return
- for val in self:
- self._verify(val)
-
- def add(self, *args):
- """
- Add one or more elements to a set.
- """
- if self.virtual:
- raise TypeError("Cannot add elements to virtual set `"+self.name+"'")
- for val in args:
- tmp = pyutilib_misc_flatten_tuple(val)
- self._verify(tmp)
- try:
- if tmp in self:
- #
- # Generate a warning, since we expect that users will not plan to
- # re-add the same element to a set.
- #
- logger.warning("Element "+str(tmp)+" already exists in set "+self.name+"; no action taken.")
- continue
- self._add(tmp, False)
- except TypeError:
- raise TypeError("Problem inserting "+str(tmp)+" into set "+self.name)
-
- def remove(self, element):
- """
- Remove an element from the set.
-
- If the element is not a member, raise an error.
- """
- if self.virtual:
- raise KeyError("Cannot remove element `"+str(element)+"' from virtual set "+self.name)
- if element not in self:
- raise KeyError("Cannot remove element `"+str(element)+"' from set "+self.name)
- self._discard(element)
-
- def discard(self, element):
- """
- Remove an element from the set.
-
- If the element is not a member, do nothing.
- """
- if self.virtual:
- raise KeyError("Cannot discard element `"+str(element)+"' from virtual set "+self.name)
- self._discard(element)
-
- def _pprint(self):
- """
- Return data that will be printed for this component.
- """
- _ordered = self.ordered
- if type(_ordered) is bool:
- pass
- elif _ordered is Set.InsertionOrder:
- _ordered = 'Insertion'
- elif _ordered is Set.SortedOrder:
- _ordered = 'Sorted'
- else:
- _ordered = '{user}'
- return (
- [("Dim", self.dim()),
- ("Dimen", self.dimen),
- ("Size", len(self)),
- ("Domain", None if self.domain is None else self.domain.name),
- ("Ordered", _ordered),
- ("Bounds", self._bounds)],
- iteritems( {None: self} ),
- None, # ("Members",),
- lambda os, k, v: os.write(str(
- "Virtual" if not self.concrete or v.virtual \
- else v.value_list if v.ordered \
- else sorted(v), )+"\n"),
- )
-
- def _set_repn(self, other):
- """
- Return a Set subset for 'other'
- """
- if isinstance(other, SimpleSet):
- return other
- if isinstance(other, OrderedSimpleSet):
- return other
- return SetOf(other)
-
- def __len__(self):
- """
- Return the number of elements in this set.
- """
- if not self.concrete:
- raise ValueError("The size of a non-concrete set is unknown")
- return len(self.value_list)
-
- def __iter__(self):
- """
- Return an iterator for the underlying set
- """
- if not self._constructed:
- raise RuntimeError(
- "Cannot iterate over abstract Set '%s' before it has "
- "been constructed (initialized)." % (self.name,) )
- if not self.concrete:
- raise TypeError("Cannot iterate over a non-concrete set '%s'" % self.name)
- return self.value_list.__iter__()
- #return super(SimpleSetBase, self).__iter__()
-
- def __reversed__(self):
- """
- Return a reversed iterator
- """
- return reversed(self.__iter__())
-
- def __hash__(self):
- """
- Hash this object
- """
- return Set.__hash__(self)
-
- def __eq__(self,other):
- """
- Equality comparison
- """
- # the obvious test: two references to the same set are the same
- if id(self) == id(other):
- return True
- # easy cases: if other isn't a Set-like thing, then we aren't equal
- if other is None:
- return False
- try:
- tmp = self._set_repn(other)
- except:
- return False
- # if we are both concrete, then we should compare elements
- if self.concrete and tmp.concrete:
- if self.dimen != tmp.dimen:
- return False
- if self.virtual or tmp.virtual:
- # optimization: usually len() is faster than checking
- # all elements... if the len() are different, then we
- # are obviously not equal. We only do this test here
- # because we assume that the __eq__() method for native
- # types (in the case of non-virtual sets) is already
- # smart enough to do this optimization internally if it
- # is applicable.
- if len(self) != len(other):
- return False
- for i in other:
- if not i in self:
- return False
- return True
- else:
- return self.data().__eq__( tmp.data() )
-
- # if we are both virtual, compare hashes
- if self.virtual and tmp.virtual:
- return hash(self) == hash(tmp)
-
- # I give... not equal!
- return False
-
- def __ne__(self,other):
- """
- Inequality comparison
- """
- return not self.__eq__(other)
-
- def __contains__(self, element):
- """
- Return True if element is a member of this set.
- """
- #
- # If the element is a set, then see if this is a subset.
- # We first test if the element is a number or tuple, before
- # doing the expensive calls to isinstance().
- #
- element_t = type(element)
- if not element_t in native_numeric_types and element_t is not tuple:
- if isinstance(element,SimpleSet) or isinstance(element,OrderedSimpleSet):
- return element.issubset(self)
- # else:
- # set_ = SetOf(element)
- # return set_.issubset(self)
-
- #
- # When dealing with a concrete set, just check if the element is
- # in the set. There is no need for extra validation.
- #
- if self._constructed and self.concrete is True:
- return self._set_contains(element)
- #
- # If this is not a valid element, then return False
- #
- try:
- self._verify(element)
- except:
- return False
- #
- # If the validation rule is used then we do not actually
- # check whether the data is in self.value.
- #
- if self.validate is not None and not self.concrete:
- return True
- #
- # The final check: return true if self.concrete is False, since we should
- # have already validated this value. The following, or at least one of
- # the execution paths - is probably redundant with the above.
- #
- return not self.concrete or self._set_contains(element)
-
- def isdisjoint(self, other):
- """
- Return True if the set has no elements in common with 'other'.
- Sets are disjoint if and only if their intersection is the empty set.
- """
- other = self._set_repn(other)
- tmp = self & other
- for elt in tmp:
- return False
- return True
-
- def issubset(self,other):
- """
- Return True if the set is a subset of 'other'.
- """
- if not self.concrete:
- raise TypeError("ERROR: cannot perform \"issubset\" test because the current set is not a concrete set.")
- other = self._set_repn(other)
- if self.dimen != other.dimen:
- raise ValueError("Cannot perform set operation with sets "+self.name+" and "+other.name+" that have different element dimensions: "+str(self.dimen)+" "+str(other.dimen))
- for val in self:
- if val not in other:
- return False
- return True
-
- def issuperset(self, other):
- """
- Return True if the set is a superset of 'other'.
-
- Note that we do not simply call other.issubset(self) because
- 'other' may not be a Set instance.
- """
- other = self._set_repn(other)
- if self.dimen != other.dimen:
- raise ValueError("Cannot perform set operation with sets "+self.name+" and "+other.name+" that have different element dimensions: "+str(self.dimen)+" "+str(other.dimen))
- if not other.concrete:
- raise TypeError("ERROR: cannot perform \"issuperset\" test because the target set is not a concrete set.")
- for val in other:
- if val not in self:
- return False
- return True
-
- def union(self, *args):
- """
- Return the union of this set with one or more sets.
- """
- tmp = self
- for arg in args:
- tmp = _SetUnion(tmp, arg)
- return tmp
-
- def intersection(self, *args):
- """
- Return the intersection of this set with one or more sets
- """
- tmp = self
- for arg in args:
- tmp = _SetIntersection(tmp, arg)
- return tmp
-
- def difference(self, *args):
- """
- Return the difference between this set with one or more sets
- """
- tmp = self
- for arg in args:
- tmp = _SetDifference(tmp, arg)
- return tmp
-
- def symmetric_difference(self, *args):
- """
- Return the symmetric difference of this set with one or more sets
- """
- tmp = self
- for arg in args:
- tmp = _SetSymmetricDifference(tmp, arg)
- return tmp
-
- def cross(self, *args):
- """
- Return the cross-product between this set and one or more sets
- """
- tmp = self
- for arg in args:
- tmp = _SetProduct(tmp, arg)
- return tmp
-
- # <= is equivalent to issubset
- # >= is equivalent to issuperset
- # | is equivalent to union
- # & is equivalent to intersection
- # - is equivalent to difference
- # ^ is equivalent to symmetric_difference
- # * is equivalent to cross
-
- __le__ = issubset
- __ge__ = issuperset
- __or__ = union
- __and__ = intersection
- __sub__ = difference
- __xor__ = symmetric_difference
- __mul__ = cross
-
- def __lt__(self,other):
- """
- Return True if the set is a strict subset of 'other'
-
- TODO: verify that this is more efficient than an explicit implimentation.
- """
- return self <= other and not self == other
-
- def __gt__(self,other):
- """
- Return True if the set is a strict superset of 'other'
-
- TODO: verify that this is more efficient than an explicit implimentation.
- """
- return self >= other and not self == other
-
- def construct(self, values=None):
- """
- Apply the rule to construct values in this set
-
- TODO: rework to avoid redundant code
- """
- if __debug__ and logger.isEnabledFor(logging.DEBUG):
- logger.debug("Constructing SimpleSet, name="+self.name+", from data="+repr(values))
- if self._constructed:
- return
- timer = ConstructionTimer(self)
- self._constructed=True
-
- if self.initialize is None: # TODO: deprecate this functionality
- self.initialize = getattr(self,'rule',None)
- if not self.initialize is None:
- logger.warning("DEPRECATED: The set 'rule' attribute cannot be used to initialize component "+self.name+". Use the 'initialize' attribute")
- #
- # Construct using the input values list
- #
- if values is not None:
- if type(self._bounds) is tuple:
- first=self._bounds[0]
- last=self._bounds[1]
- else:
- first=None
- last=None
- all_numeric=True
- #
- # TODO: verify that values is not a list
- #
- for val in values[None]:
- #
- # Skip the value if it is filtered
- #
- if not self.filter is None and not apply_indexed_rule(self, self.filter, self._parent(), val):
- continue
- self.add(val)
- if type(val) in native_numeric_types:
- if first is None or vallast:
- last=val
- else:
- all_numeric=False
- if all_numeric:
- self._bounds = (first, last)
- #
- # Construct using the initialize rule
- #
- elif type(self.initialize) is types.FunctionType:
- if self._parent is None:
- raise ValueError("Must pass the parent block in to initialize with a function")
- if self.initialize.__code__.co_argcount == 1:
- #
- # Using a rule of the form f(model) -> iterator
- #
- tmp = self.initialize(self._parent())
- for val in tmp:
- if self.dimen == 0:
- if type(val) in [tuple,list]:
- self.dimen=len(val)
- else:
- self.dimen=1
- if not self.filter is None and \
- not apply_indexed_rule(self, self.filter, self._parent(), val):
- continue
- self.add(val)
- else:
- #
- # Using a rule of the form f(model, z) -> element
- #
- ctr=1
- val = apply_indexed_rule(self, self.initialize, self._parent(), ctr)
- if val is None:
- raise ValueError("Set rule returned None instead of Set.Skip")
- if self.dimen == 0:
- if type(val) in [tuple,list] and not val == Set.End:
- self.dimen=len(val)
- else:
- self.dimen=1
- while not (val.__class__ is tuple and val == Set.End):
- # Add the value if the filter is None or the filter return value is True
- if self.filter is None or \
- apply_indexed_rule(self, self.filter, self._parent(), val):
- self.add(val)
- ctr += 1
- val = apply_indexed_rule(self, self.initialize, self._parent(), ctr)
- if val is None:
- raise ValueError("Set rule returned None instead of Set.Skip")
-
- # Update the bounds if after using the rule, the set is
- # a one dimensional list of all numeric values
- if self.dimen == 1:
- if type(self._bounds) is tuple:
- first=self._bounds[0]
- last=self._bounds[1]
- else:
- first=None
- last=None
- all_numeric=True
- for val in self.value:
- if type(val) in native_numeric_types:
- if first is None or vallast:
- last=val
- else:
- all_numeric=False
- break
- if all_numeric:
- self._bounds = (first, last)
-
- #
- # Construct using the default values
- #
- elif self.initialize is not None:
- if type(self.initialize) is dict:
- raise ValueError("Cannot initialize set "+self.name+" with dictionary data")
- if type(self._bounds) is tuple:
- first=self._bounds[0]
- last=self._bounds[1]
- else:
- first=None
- last=None
- all_numeric=True
- for val in self.initialize:
- # Skip the value if it is filtered
- if not self.filter is None and \
- not apply_indexed_rule(self, self.filter, self._parent(), val):
- continue
- if type(val) in native_numeric_types:
- if first is None or vallast:
- last=val
- else:
- all_numeric=False
- self.add(val)
- if all_numeric:
- self._bounds = (first,last)
- timer.report()
-
-
-class SimpleSet(SimpleSetBase,_SetData):
-
- def __init__(self, *args, **kwds):
- self._bounds = kwds.pop('bounds', None)
- SimpleSetBase.__init__(self, *args, **kwds)
- _SetData.__init__(self, self, self._bounds)
-
- def __getitem__(self, key):
- """
- Return the specified member of the set.
-
- This method generates an exception because the set is unordered.
- """
- return _SetData.__getitem__(self, key)
-
- def _set_contains(self, element):
- """
- A wrapper function that tests if the element is in
- the data associated with a concrete set.
- """
- return element in self.value
-
-
-class OrderedSimpleSet(SimpleSetBase,_OrderedSetData):
-
- def __init__(self, *args, **kwds):
- self._bounds = kwds.pop('bounds', None)
- SimpleSetBase.__init__(self, *args, **kwds)
- _OrderedSetData.__init__(self, self, self._bounds)
-
- def __getitem__(self, key):
- """
- Return the specified member of the set.
- """
- return _OrderedSetData.__getitem__(self, key)
-
- def _set_contains(self, element):
- """
- A wrapper function that tests if the element is in
- the data associated with a concrete set.
- """
- return element in self.order_dict
-
-
-# REVIEW - START
-
-@ModelComponentFactory.register("Define a Pyomo Set component using an iterable data object.")
-class SetOf(SimpleSet):
- """
- A derived SimpleSet object that creates a set from external
- data without duplicating it.
- """
-
- def __init__(self, *args, **kwds):
- if len(args) > 1:
- raise TypeError("Only one set data argument can be specified")
- self.dimen = 0
- SimpleSet.__init__(self,**kwds)
- if len(args) == 1:
- self._elements = args[0]
- else:
- self._elements = self.initialize
- self.value = None
- self._constructed = True
- self._bounds = (None, None) # We cannot determine bounds, since the data may change
- self.virtual = False
- try:
- len(self._elements)
- self.concrete = True
- except:
- self.concrete = False
- #
- if self.dimen == 0:
- try:
- for i in self._elements:
- if type(i) is tuple:
- self.dimen = len(i)
- else:
- self.dimen = 1
- break
- except TypeError:
- e = sys.exc_info()[1]
- raise TypeError("Cannot create a Pyomo set: "+e)
-
- def construct(self, values=None):
- """
- Disabled construction method
- """
- ConstructionTimer(self).report()
-
- def __len__(self):
- """
- The number of items in the set.
- """
- try:
- return len(self._elements)
- except:
- pass
- #
- # If self._elements cannot provide size information,
- # then we need to iterate through all set members.
- #
- ctr = 0
- for i in self:
- ctr += 1
- return ctr
-
- def __iter__(self):
- """
- Return an iterator for the underlying set
- """
- for i in self._elements:
- yield i
-
- def _set_contains(self, element):
- """
- A wrapper function that tests if the element is in
- the data associated with a concrete set.
- """
- return element in self._elements
-
- def data(self):
- """
- Return the underlying set data by constructing
- a python set() object explicitly.
- """
- return set(self)
-
-
-class _SetOperator(SimpleSet):
- """A derived SimpleSet object that contains a concrete virtual single set."""
-
- def __init__(self, *args, **kwds):
- if len(args) != 2:
- raise TypeError("Two arguments required for a binary set operator")
- dimen_test = kwds.get('dimen_test',True)
- if 'dimen_test' in kwds:
- del kwds['dimen_test']
- SimpleSet.__init__(self,**kwds)
- self.value = None
- self._constructed = True
- self.virtual = True
- self.concrete = True
- #
- self._setA = args[0]
- if not self._setA.concrete:
- raise TypeError("Cannot perform set operations with non-concrete set '"+self._setA.name+"'")
- if isinstance(args[1],Set):
- self._setB = args[1]
- else:
- self._setB = SetOf(args[1])
- if not self._setB.concrete:
- raise TypeError("Cannot perform set operations with non-concrete set '"+self._setB.name+"'")
- if dimen_test and self._setA.dimen != self._setB.dimen:
- raise ValueError("Cannot perform set operation with sets "+self._setA.name+" and "+self._setB.name+" that have different element dimensions: "+str(self._setA.dimen)+" "+str(self._setB.dimen))
- self.dimen = self._setA.dimen
- #
- self.ordered = self._setA.ordered and self._setB.ordered
-
- #
- # This line is critical in order for nested set expressions to
- # properly clone (e.g., m.D = m.A | m.B | m.C). The intermediate
- # _SetOperation constructs must be added to the model, so we
- # highjack the hack in block.py for IndexedComponent to
- # deal with multiple indexing arguments.
- #
- self._implicit_subsets = [self._setA, self._setB]
-
- def construct(self, values=None):
- """ Disabled construction method """
- timer = ConstructionTimer(self).report()
-
- def __len__(self):
- """The number of items in the set."""
- ctr = 0
- for i in self:
- ctr += 1
- return ctr
-
- def __iter__(self):
- """Return an iterator for the underlying set"""
- raise IOError("Undefined set iterator")
-
- def _set_contains(self, element):
- raise IOError("Undefined set operation")
-
- def data(self):
- """The underlying set data."""
- return set(self)
-
-class _SetUnion(_SetOperator):
-
- def __init__(self, *args, **kwds):
- _SetOperator.__init__(self, *args, **kwds)
-
- def __iter__(self):
- for elt in self._setA:
- yield elt
- for elt in self._setB:
- if not elt in self._setA:
- yield elt
-
- def _set_contains(self, elt):
- return elt in self._setA or elt in self._setB
-
-class _SetIntersection(_SetOperator):
-
- def __init__(self, *args, **kwds):
- _SetOperator.__init__(self, *args, **kwds)
-
- def __iter__(self):
- for elt in self._setA:
- if elt in self._setB:
- yield elt
-
- def _set_contains(self, elt):
- return elt in self._setA and elt in self._setB
-
-class _SetDifference(_SetOperator):
-
- def __init__(self, *args, **kwds):
- _SetOperator.__init__(self, *args, **kwds)
-
- def __iter__(self):
- for elt in self._setA:
- if not elt in self._setB:
- yield elt
-
- def _set_contains(self, elt):
- return elt in self._setA and not elt in self._setB
-
-class _SetSymmetricDifference(_SetOperator):
-
- def __init__(self, *args, **kwds):
- _SetOperator.__init__(self, *args, **kwds)
-
- def __iter__(self):
- for elt in self._setA:
- if not elt in self._setB:
- yield elt
- for elt in self._setB:
- if not elt in self._setA:
- yield elt
-
- def _set_contains(self, elt):
- return (elt in self._setA) ^ (elt in self._setB)
-
-class _SetProduct(_SetOperator):
-
- def __init__(self, *args, **kwd):
- kwd['dimen_test'] = False
-
- # every input argument in a set product must be iterable.
- for arg in args:
- # obviouslly, if the object has an '__iter__' method, then
- # it is iterable. Checking for this prevents us from trying
- # to iterate over unconstructed Sets (which would result in
- # an exception)
- if not hasattr(arg, '__iter__'):
- try:
- iter(arg)
- except TypeError:
- raise TypeError("Each input argument to a _SetProduct constructor must be iterable")
-
- _SetOperator.__init__(self, *args, **kwd)
- # the individual index sets definining the product set.
- if isinstance(self._setA,_SetProduct):
- self.set_tuple = list(self._setA.set_tuple)
- else:
- self.set_tuple = [self._setA]
- if isinstance(self._setB,_SetProduct):
- self.set_tuple += self._setB.set_tuple
- else:
- self.set_tuple.append(self._setB)
- self._setA = self._setB = None
- # set the "dimen" instance attribute.
- self._compute_dimen()
-
- def __iter__(self):
- if self.is_flat_product():
- for i in itertools.product(*self.set_tuple):
- yield i
- else:
- for i in itertools.product(*self.set_tuple):
- yield pyutilib_misc_flatten_tuple(i)
-
- def _set_contains(self, element):
- # Do we really need to check if element is a tuple???
- # if type(element) is not tuple:
- # return False
- try:
- ctr = 0
- for subset in self.set_tuple:
- d = subset.dimen
- if d == 1:
- if not subset._set_contains(element[ctr]):
- return False
- elif d is None:
- for dlen in range(len(element), ctr, -1):
- if subset._set_contains(element[ctr:dlen]):
- d = dlen - ctr
- break
- if d is None:
- if subset._set_contains(element[ctr]):
- d = 1
- else:
- return False
- else:
- # cast to tuple is not needed: slices of tuples
- # return tuples!
- if not subset._set_contains(element[ctr:ctr+d]):
- return False
- ctr += d
- return ctr == len(element)
- except:
- return False
-
- def __len__(self):
- ans = 1
- for _set in self.set_tuple:
- ans *= len(_set)
- return ans
-
- def _compute_dimen(self):
- ans=0
- for _set in self.set_tuple:
- if _set.dimen is None:
- self.dimen=None
- return
- else:
- ans += _set.dimen
- self.dimen = ans
-
- def is_flat_product(self):
- """
- a simple utility to determine if each of the composite sets is
- of dimension one. Knowing this can significantly reduce the
- cost of iteration, as you don't have to call flatten_tuple.
- """
-
- for s in self.set_tuple:
- if s.dimen != 1:
- return False
- return True
-
- def _verify(self, element):
- """
- If this set is virtual, then an additional check is made
- to ensure that the element is in each of the underlying sets.
- """
- tmp = SimpleSet._verify(self, element)
- return tmp
-
- # WEH - when is this needed?
- if not tmp or not self.virtual:
- return tmp
-
- next_tuple_index = 0
- member_set_index = 0
- for member_set in self.set_tuple:
- tuple_slice = element[next_tuple_index:next_tuple_index + member_set.dimen]
- if member_set.dimen == 1:
- tuple_slice = tuple_slice[0]
- if tuple_slice not in member_set:
- return False
- member_set_index += 1
- next_tuple_index += member_set.dimen
- return True
-
-# REVIEW - END
-
-class IndexedSet(Set):
- """
- An array of sets, which are indexed by other sets
- """
-
- def __init__(self, *args, **kwds): #pragma:nocover
- self._bounds = kwds.pop("bounds", None)
- Set.__init__(self, *args, **kwds)
- if 'virtual' in kwds: #pragma:nocover
- raise TypeError("It doesn't make sense to create a virtual set array")
- if self.ordered:
- self._SetData = _IndexedOrderedSetData
- else:
- self._SetData = _IndexedSetData
-
- def size(self):
- """
- Return the number of elements in all of the indexed sets.
- """
- ans = 0
- for cdata in itervalues(self):
- ans += len(cdata)
- return ans
-
- def data(self):
- """
- Return the dictionary of sets
- """
- return self._data
-
- def clear(self):
- """
- Clear that data in this component.
- """
- if self.is_indexed():
- self._data = {}
- else:
- #
- # TODO: verify that this could happen
- #
- pass
-
- def _getitem_when_not_present(self, index):
- """
- Return the default component data value
-
- This returns an exception.
- """
- tmp = self._data[index] = self._SetData(self, self._bounds)
- return tmp
-
- def __setitem__(self, key, vals):
- """
- Add a set to the index.
- """
- if key not in self._index:
- raise KeyError("Cannot set index "+str(key)+" in array set "+self.name)
- #
- # Create a _SetData object if one doesn't already exist
- #
- if key in self._data:
- self._data[key].clear()
- else:
- self._data[key] = self._SetData(self, self._bounds)
- #
- # Add the elements in vals to the _SetData object
- #
- _set = self._data[key]
- for elt in vals:
- _set.add(elt)
-
- def check_values(self):
- """
- Verify the values of all indexed sets.
-
- TODO: document when unverified values could be set.
- """
- for cdata in itervalues(self):
- for val in cdata.value:
- self._verify(val)
-
- def _pprint(self):
- """
- Return data that will be printed for this component.
- """
- _ordered = self.ordered
- if type(_ordered) is bool:
- pass
- elif _ordered is Set.InsertionOrder:
- _ordered = 'Insertion'
- elif _ordered is Set.SortedOrder:
- _ordered = 'Sorted'
- else:
- _ordered = '{user}'
- return (
- [("Dim", self.dim()),
- ("Dimen", self.dimen),
- ("Size", self.size()),
- ("Domain", None if self.domain is None else self.domain.name),
- ("ArraySize", len(self._data)),
- ("Ordered", _ordered),
- ("Bounds", self._bounds)],
- iteritems(self._data),
- ("Members",),
- lambda k, v: [ _value_sorter(self, v) ]
- )
-
- def construct(self, values=None):
- """
- Apply the rule to construct values in each set
- """
- if __debug__ and logger.isEnabledFor(logging.DEBUG):
- logger.debug("Constructing IndexedSet, name="+self.name+", from data="+repr(values))
- if self._constructed:
- return
- timer = ConstructionTimer(self)
- self._constructed=True
- #
- if self.initialize is None: # TODO: deprecate this functionality
- self.initialize = getattr(self,'rule',None)
- if not self.initialize is None:
- logger.warning("DEPRECATED: The set 'rule' attribute cannot be used to initialize component "+self.name+". Use the 'initialize' attribute")
- #
- # Construct using the values dictionary
- #
- if values is not None:
- for key in values:
- if type(key) is tuple and len(key)==1:
- tmpkey=key[0]
- else:
- tmpkey=key
- if tmpkey not in self._index:
- raise KeyError("Cannot construct index "+str(tmpkey)+" in array set "+self.name)
- tmp = self._SetData(self, self._bounds)
- for val in values[key]:
- tmp._add(val)
- self._data[tmpkey] = tmp
- #
- # Construct using the rule
- #
- elif type(self.initialize) is types.FunctionType:
- if self._parent is None:
- raise ValueError("Need parent block to construct a set array with a function")
- for key in self._index:
- tmp = self._SetData(self, self._bounds)
- self._data[key] = tmp
- #
- if isinstance(key,tuple):
- tmpkey = key
- else:
- tmpkey = (key,)
- #
- # self.initialize: model, index -> list
- #
- if self.initialize.__code__.co_argcount == len(tmpkey)+1:
- rule_list = apply_indexed_rule(self, self.initialize, self._parent(), tmpkey)
- for val in rule_list:
- tmp._add( val )
- #
- # self.initialize: model, counter, index -> val
- #
- else:
- ctr=1
- val = apply_parameterized_indexed_rule(self, self.initialize, self._parent(), ctr, tmpkey)
- if val is None:
- raise ValueError("Set rule returned None instead of Set.Skip")
- while not (val.__class__ is tuple and val == Set.End):
- tmp._add( val )
- ctr += 1
- val = apply_parameterized_indexed_rule(self, self.initialize, self._parent(), ctr, tmpkey)
- if val is None:
- raise ValueError("Set rule returned None instead of Set.Skip")
- #
- # Treat self.initialize as an iterable
- #
- elif self.initialize is not None:
- if type(self.initialize) is not dict:
- for key in self._index:
- tmp = self._SetData(self, self._bounds)
- for val in self.initialize:
- tmp._add(val)
- self._data[key] = tmp
- else:
- for key in self.initialize:
- tmp = self._SetData(self, self._bounds)
- for val in self.initialize[key]:
- tmp._add(val)
- self._data[key] = tmp
- timer.report()
-
-
-
+from .set import (
+ process_setarg, set_options, simple_set_rule,
+ _SetDataBase, _SetData, Set, SetOf, IndexedSet,
+)
+
+from pyomo.common.deprecation import deprecation_warning
+deprecation_warning(
+ 'The pyomo.core.base.sets module is deprecated. '
+ 'Import Set objects from pyomo.core.base.set or pyomo.core.',
+ version='TBD')
diff --git a/pyomo/core/base/sos.py b/pyomo/core/base/sos.py
index 811d664cb61..5ac761f0506 100644
--- a/pyomo/core/base/sos.py
+++ b/pyomo/core/base/sos.py
@@ -21,7 +21,6 @@
from pyomo.core.base.component import ActiveComponentData
from pyomo.core.base.indexed_component import ActiveIndexedComponent, UnindexedComponent_set
from pyomo.core.base.set_types import PositiveIntegers
-from pyomo.core.base.sets import Set, _IndexedOrderedSetData
logger = logging.getLogger('pyomo.core')
@@ -236,7 +235,7 @@ def construct(self, data=None):
else:
if not self.is_indexed():
if self._sosSet is None:
- if getattr(self._sosVars.index_set(), 'ordered', False):
+ if getattr(self._sosVars.index_set(), 'isordered', lambda *x: False)():
_sosSet = {None: list(self._sosVars.index_set())}
else:
_sosSet = {None: set(self._sosVars.index_set())}
@@ -256,9 +255,7 @@ def construct(self, data=None):
ordered=False
if type(sosSet) is list or sosSet is UnindexedComponent_set or len(sosSet) == 1:
ordered=True
- if hasattr(sosSet, 'ordered') and sosSet.ordered:
- ordered=True
- if type(sosSet) is _IndexedOrderedSetData:
+ if hasattr(sosSet, 'isordered') and sosSet.isordered():
ordered=True
if not ordered:
raise ValueError("Cannot define a SOS over an unordered index.")
@@ -323,7 +320,7 @@ def pprint(self, ostream=None, verbose=False, prefix=""):
ostream.write(self.doc+'\n')
ostream.write(" ")
ostream.write("\tSize="+str(len(self._data.keys()))+' ')
- if isinstance(self._index,Set):
+ if self.is_indexed():
ostream.write("\tIndex= "+self._index.name+'\n')
else:
ostream.write("\n")
diff --git a/pyomo/core/base/template_expr.py b/pyomo/core/base/template_expr.py
index faf6a29f599..6d7b80e6c92 100644
--- a/pyomo/core/base/template_expr.py
+++ b/pyomo/core/base/template_expr.py
@@ -2,232 +2,18 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-import copy
-import logging
-from pyomo.core.expr import current as EXPR
-from pyomo.core.expr.numvalue import (
- NumericValue, native_numeric_types, as_numeric, value )
-import pyomo.core.base
-from pyomo.core.expr.expr_errors import TemplateExpressionError
+from pyomo.core.expr.template_expr import (
+ IndexTemplate, _GetItemIndexer, TemplateExpressionError
+)
-class IndexTemplate(NumericValue):
- """A "placeholder" for an index value in template expressions.
-
- This class is a placeholder for an index value within a template
- expression. That is, given the expression template for "m.x[i]",
- where `m.z` is indexed by `m.I`, the expression tree becomes:
-
- _GetItem:
- - m.x
- - IndexTemplate(_set=m.I, _value=None)
-
- Constructor Arguments:
- _set: the Set from which this IndexTemplate can take values
- """
-
- __slots__ = ('_set', '_value')
-
- def __init__(self, _set):
- self._set = _set
- self._value = None
-
- def __getstate__(self):
- """
- This method must be defined because this class uses slots.
- """
- state = super(IndexTemplate, self).__getstate__()
- for i in IndexTemplate.__slots__:
- state[i] = getattr(self, i)
- return state
-
- def __deepcopy__(self, memo):
- # Because we leverage deepcopy for expression cloning, we need
- # to see if this is a clone operation and *not* copy the
- # template.
- #
- # TODO: JDS: We should consider converting the IndexTemplate to
- # a proper Component: that way it could leverage the normal
- # logic of using the parent_block scope to dictate the behavior
- # of deepcopy.
- if '__block_scope__' in memo:
- memo[id(self)] = self
- return self
- #
- # "Normal" deepcopying outside the context of pyomo.
- #
- ans = memo[id(self)] = self.__class__.__new__(self.__class__)
- ans.__setstate__(copy.deepcopy(self.__getstate__(), memo))
- return ans
-
- # Note: because NONE of the slots on this class need to be edited,
- # we don't need to implement a specialized __setstate__ method.
-
- def __call__(self, exception=True):
- """
- Return the value of this object.
- """
- if self._value is None:
- if exception:
- raise TemplateExpressionError(self)
- return None
- else:
- return self._value
-
- def is_fixed(self):
- """
- Returns True because this value is fixed.
- """
- return True
-
- def is_constant(self):
- """
- Returns False because this cannot immediately be simplified.
- """
- return False
-
- def is_potentially_variable(self):
- """Returns False because index values cannot be variables.
-
- The IndexTemplate represents a placeholder for an index value
- for an IndexedComponent, and at the moment, Pyomo does not
- support variable indirection.
- """
- return False
-
- def __str__(self):
- return self.getname()
-
- def getname(self, fully_qualified=False, name_buffer=None, relative_to=None):
- return "{"+self._set.getname(fully_qualified, name_buffer, relative_to)+"}"
-
- def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False):
- return self.name
-
- def set_value(self, value):
- # It might be nice to check if the value is valid for the base
- # set, but things are tricky when the base set is not dimention
- # 1. So, for the time being, we will just "trust" the user.
- self._value = value
-
-
-class ReplaceTemplateExpression(EXPR.ExpressionReplacementVisitor):
-
- def __init__(self, substituter, *args):
- super(ReplaceTemplateExpression, self).__init__()
- self.substituter = substituter
- self.substituter_args = args
-
- def visiting_potential_leaf(self, node):
- if type(node) is EXPR.GetItemExpression or type(node) is IndexTemplate:
- return True, self.substituter(node, *self.substituter_args)
-
- return super(
- ReplaceTemplateExpression, self).visiting_potential_leaf(node)
-
-
-def substitute_template_expression(expr, substituter, *args):
- """Substitute IndexTemplates in an expression tree.
-
- This is a general utility function for walking the expression tree
- and subtituting all occurances of IndexTemplate and
- _GetItemExpression nodes.
-
- Args:
- substituter: method taking (expression, *args) and returning
- the new object
- *args: these are passed directly to the substituter
-
- Returns:
- a new expression tree with all substitutions done
- """
- visitor = ReplaceTemplateExpression(substituter, *args)
- return visitor.dfs_postorder_stack(expr)
-
-
-class _GetItemIndexer(object):
- # Note that this class makes the assumption that only one template
- # ever appears in an expression for a single index
-
- def __init__(self, expr):
- self._base = expr._base
- self._args = []
- _hash = [ id(self._base) ]
- for x in expr.args:
- try:
- logging.disable(logging.CRITICAL)
- val = value(x)
- self._args.append(val)
- _hash.append(val)
- except TemplateExpressionError as e:
- if x is not e.template:
- raise TypeError(
- "Cannot use the param substituter with expression "
- "templates\nwhere the component index has the "
- "IndexTemplate in an expression.\n\tFound in %s"
- % ( expr, ))
- self._args.append(e.template)
- _hash.append(id(e.template._set))
- finally:
- logging.disable(logging.NOTSET)
-
- self._hash = tuple(_hash)
-
- def nargs(self):
- return len(self._args)
-
- def arg(self, i):
- return self._args[i]
-
- def __hash__(self):
- return hash(self._hash)
-
- def __eq__(self, other):
- if type(other) is _GetItemIndexer:
- return self._hash == other._hash
- else:
- return False
-
- def __str__(self):
- return "%s[%s]" % (
- self._base.name, ','.join(str(x) for x in self._args) )
-
-
-def substitute_getitem_with_param(expr, _map):
- """A simple substituter to replace _GetItem nodes with mutable Params.
-
- This substituter will replace all _GetItemExpression nodes with a
- new Param. For example, this method will create expressions
- suitable for passing to DAE integrators
- """
- if type(expr) is IndexTemplate:
- return expr
-
- _id = _GetItemIndexer(expr)
- if _id not in _map:
- _map[_id] = pyomo.core.base.param.Param(mutable=True)
- _map[_id].construct()
- _args = []
- _map[_id]._name = "%s[%s]" % (
- expr._base.name, ','.join(str(x) for x in _id._args) )
- return _map[_id]
-
-
-def substitute_template_with_value(expr):
- """A simple substituter to expand expression for current template
-
- This substituter will replace all _GetItemExpression / IndexTemplate
- nodes with the actual _ComponentData based on the current value of
- the IndexTamplate(s)
-
- """
-
- if type(expr) is IndexTemplate:
- return as_numeric(expr())
- else:
- return expr.resolve_template()
+from pyomo.common.deprecation import deprecation_warning
+deprecation_warning(
+ 'The pyomo.core.base.template_expr module is deprecated. '
+ 'Import expression template objects from pyomo.core.expr.template_expr.',
+ version='TBD')
diff --git a/pyomo/core/base/units_container.py b/pyomo/core/base/units_container.py
index c80e2359d62..627f6c207d2 100644
--- a/pyomo/core/base/units_container.py
+++ b/pyomo/core/base/units_container.py
@@ -12,89 +12,109 @@
"""Pyomo Units Container Module
-.. warning:: This module is in beta and is not yet complete.
+This module provides support for including units within Pyomo expressions. This module
+can be used to define units on a model, and to check the consistency of units
+within the underlying constraints and expressions in the model. The module also
+supports conversion of units within expressions to support construction of constraints
+that contain embedded unit conversions.
-This module provides support for including units within Pyomo expressions, and provides
-methods for checking the consistency of units within those expresions.
+To use this package within your Pyomo model, you first need an instance of a
+PyomoUnitsContainer. You can use the module level instance already defined as
+'units'. This object 'contains' the units - that is, you can access units on
+this module using common notation.
-To use this package within your Pyomo model, you first need an instance of a PyomoUnitsContainer.
-You can use the module level instance called `units` and use the pre-defined units in expressions or
-components.
+ .. doctest::
+
+ >>> from pyomo.environ import units as u
+ >>> print(3.0*u.kg)
+ 3.0*kg
-Examples:
- To use a unit within an expression, simply reference the desired unit as an attribute on the
- module singleton `units`.
+Units can be assigned to Var, Param, and ExternalFunction components, and can
+be used directly in expressions (e.g., defining constraints). You can also
+verify that the units are consistent on a model, or on individual components
+like the objective function, constraint, or expression using
+`assert_units_consistent` (from pyomo.util.check_units).
+There are other methods there that may be helpful for verifying correct units on a model.
.. doctest::
- >>> from pyomo.environ import ConcreteModel, Var, Objective, units # import components and 'units' instance
+ >>> from pyomo.environ import ConcreteModel, Var, Objective
+ >>> from pyomo.environ import units as u
+ >>> from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent, check_units_equivalent
>>> model = ConcreteModel()
- >>> model.acc = Var()
- >>> model.obj = Objective(expr=(model.acc*units.m/units.s**2 - 9.81*units.m/units.s**2)**2)
- >>> print(units.get_units(model.obj.expr))
+ >>> model.acc = Var(initialize=5.0, units=u.m/u.s**2)
+ >>> model.obj = Objective(expr=(model.acc - 9.81*u.m/u.s**2)**2)
+ >>> assert_units_consistent(model.obj) # raise exc if units invalid on obj
+ >>> assert_units_consistent(model) # raise exc if units invalid anywhere on the model
+ >>> assert_units_equivalent(model.obj.expr, u.m**2/u.s**4) # raise exc if units not equivalent
+ >>> print(u.get_units(model.obj.expr)) # print the units on the objective
m ** 2 / s ** 4
-
-.. note:: This module has a module level instance of a PyomoUnitsContainer called `units` that you
- should use for creating, retreiving, and checking units
-
-.. note:: This is a work in progress. Once the components units implementations are complete, the units will eventually
- work similar to the following.
-
- .. code-block:: python
-
- from pyomo.environ import ConcreteModel, Var, Objective, units
- model = ConcreteModel()
- model.x = Var(units=units.kg/units.m)
- model.obj = Objective(expr=(model.x - 97.2*units.kg/units.m)**2)
-
-Notes:
- * The implementation is currently based on the `pint `_
- package and supports all the units that are supported by pint.
- * The list of units that are supported by pint can be found at
- the following url: https://github.com/hgrecco/pint/blob/master/pint/default_en.txt
- * Currently, we do NOT test units of unary functions that include native data types
- e.g. explicit float (3.0) since these are removed by the expression system
- before getting to the code that checks the units.
-
-.. note:: In this implementation of units, "offset" units for temperature are not supported within
- expressions (i.e. the non-absolute temperature units including degrees C and degrees F).
- This is because there are many non-obvious combinations that are not allowable. This
- concern becomes clear if you first convert the non-absolute temperature units to absolute
- and then perform the operation. For example, if you write 30 degC + 30 degC == 60 degC,
- but convert each entry to Kelvin, the expression is not true (i.e., 303.15 K + 303.15 K
- is not equal to 333.15 K). Therefore, there are several operations that are not allowable
- with non-absolute units, including addition, multiplication, and division.
-
- Please see the pint documentation `here `_
- for more discussion. While pint implements "delta" units (e.g., delta_degC) to support correct
- unit conversions, it can be difficult to identify and guarantee valid operations in a general
- algebraic modeling environment. While future work may support units with relative scale, the current
- implementation requires use of absolute temperature units (i.e. K and R) within expressions and
- a direct conversion of numeric values using specific functions for converting input data and reporting.
+ >>> print(check_units_equivalent(model.acc, u.m/u.s**2))
+ True
+
+The implementation is currently based on the `pint
+`_ package and supports all the units that
+are supported by pint. The list of units that are supported by pint
+can be found at the following url:
+https://github.com/hgrecco/pint/blob/master/pint/default_en.txt.
+
+If you need a unit that is not in the standard set of defined units,
+you can create your own units by adding to the unit definitions within
+pint. See :py:meth:`PyomoUnitsContainer.load_definitions_from_file` or
+:py:meth:`PyomoUnitsContainer.load_definitions_from_strings` for more
+information.
+
+.. note:: In this implementation of units, "offset" units for
+ temperature are not supported within expressions (i.e. the
+ non-absolute temperature units including degrees C and
+ degrees F). This is because there are many non-obvious
+ combinations that are not allowable. This concern becomes
+ clear if you first convert the non-absolute temperature
+ units to absolute and then perform the operation. For
+ example, if you write 30 degC + 30 degC == 60 degC, but
+ convert each entry to Kelvin, the expression is not true
+ (i.e., 303.15 K + 303.15 K is not equal to 333.15
+ K). Therefore, there are several operations that are not
+ allowable with non-absolute units, including addition,
+ multiplication, and division.
+
+ This module does support conversion of offset units to
+ absolute units numerically, using convert_value_K_to_C,
+ convert_value_C_to_K, convert_value_R_to_F,
+ convert_value_F_to_R. These are useful for converting input
+ data to absolute units, and for converting data to
+ convenient units for reporting.
+
+ Please see the pint documentation `here
+ `_ for more
+ discussion. While pint implements "delta" units (e.g.,
+ delta_degC) to support correct unit conversions, it can be
+ difficult to identify and guarantee valid operations in a
+ general algebraic modeling environment. While future work
+ may support units with relative scale, the current
+ implementation requires use of absolute temperature units
+ (i.e. K and R) within expressions and a direct conversion of
+ numeric values using specific functions for converting input
+ data and reporting.
"""
# TODO
-# * implement specific functions for converting numeric values of absolute temperatures
-# * implement convert functionality
# * create a new pint unit definition file (and load from that file)
-# since the precision in pint seems insufficient for 1e-8 constraint tolerances
-# * clean up use of unit and units in the naming
-# * implement and test pickling and un-pickling
-# * implement ignore_unit(x, expected_unit) that returns a dimensionless version of the expression
-# (Note that this may need to be a special expression object that may appear in the tree)
-# * Add units capabilities to Var and Param
-# * Investigate issues surrounding absolute and relative temperatures (delta units)
-# * Implement external function interface that specifies units for the arguments and the function itself
-
-
-from pyomo.core.expr.numvalue import NumericValue, nonpyomo_leaf_types, value
-from pyomo.core.base.template_expr import IndexTemplate
-from pyomo.core.expr import current as expr
+# since the precision in pint seems insufficient for 1e-8 constraint tolerances
+# * Investigate when we can and cannot handle offset units and expand capabilities if possible
+# * Further investigate issues surrounding absolute and relative temperatures (delta units)
+# * Extend external function interface to support units for the arguments in addition to the function itself
+
import six
-try:
- import pint as pint_module
-except ImportError:
- pint_module = None
+
+from pyomo.common.dependencies import attempt_import
+from pyomo.core.expr.numvalue import NumericValue, nonpyomo_leaf_types, value, native_numeric_types
+from pyomo.core.expr.template_expr import IndexTemplate
+from pyomo.core.expr import current as EXPR
+
+pint_module, pint_available = attempt_import(
+ 'pint', defer_check=True, error_message='The "pint" package failed '
+ 'to import. This package is necessary to use Pyomo units.')
class UnitsError(Exception):
"""
@@ -111,7 +131,7 @@ class InconsistentUnitsError(UnitsError):
"""
An exception indicating that inconsistent units are present on an expression.
- E.g., x == y, where x is in units of units.kg and y is in units of units.meter
+ E.g., x == y, where x is in units of kg and y is in units of meter
"""
def __init__(self, exp1, exp2, msg):
msg = '{}: {} not compatible with {}.'.format(str(msg), str(exp1), str(exp2))
@@ -123,7 +143,7 @@ class _PyomoUnit(NumericValue):
Users should not create instances of _PyomoUnit directly, but rather access
units as attributes on an instance of a :class:`PyomoUnitsContainer`.
- This module contains a global PyomoUnitContainer :py:data:`units`.
+ This module contains a global PyomoUnitsContainer object :py:data:`units`.
See module documentation for more information.
"""
def __init__(self, pint_unit, pint_registry):
@@ -368,12 +388,13 @@ def pprint(self, ostream=None, verbose=False):
# ostream.write('{:!~s}'.format(self._pint_unit))
-class _UnitExtractionVisitor(expr.StreamBasedExpressionVisitor):
+class UnitExtractionVisitor(EXPR.StreamBasedExpressionVisitor):
def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12):
"""
Visitor class used to determine units of an expression. Do not use
- this class directly, but rather use :func:`get_units` or
- :func:`check_units_consistency`.
+ this class directly, but rather use
+ "py:meth:`PyomoUnitsContainer.assert_units_consistent`
+ or :py:meth:`PyomoUnitsContainer.get_units`
Parameters
----------
@@ -384,7 +405,7 @@ def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12):
units_equivalence_tolerance : float (default 1e-12)
Floating point tolerance used when deciding if units are equivalent
- or not. (It can happen that units
+ or not.
Notes
-----
@@ -396,7 +417,7 @@ def __init__(self, pyomo_units_container, units_equivalence_tolerance=1e-12):
particular method that should be called to return the units of the node based
on the units of its child arguments. This map is used in exitNode.
"""
- super(_UnitExtractionVisitor, self).__init__()
+ super(UnitExtractionVisitor, self).__init__()
self._pyomo_units_container = pyomo_units_container
self._pint_registry = self._pyomo_units_container._pint_registry
self._units_equivalence_tolerance = units_equivalence_tolerance
@@ -437,7 +458,7 @@ def _pint_units_equivalent(self, lhs, rhs):
: bool
True if they are equivalent, and False otherwise
"""
- if lhs == rhs:
+ if lhs is rhs:
# units are the same objects (or both None)
return True
elif lhs is None:
@@ -737,16 +758,53 @@ def _get_unit_for_single_child(self, node, list_of_unit_tuples):
"""
assert len(list_of_unit_tuples) == 1
- pyomo_unit = list_of_unit_tuples[0][0]
- pint_unit = list_of_unit_tuples[0][1]
+ pyomo_unit, pint_unit = list_of_unit_tuples[0]
return (pyomo_unit, pint_unit)
+ def _get_units_ExternalFunction(self, node, list_of_unit_tuples):
+ """
+ Check to make sure that any child arguments are consistent with
+ arg_units return the value from node.get_units() This
+ was written for ExternalFunctionExpression where the external
+ function has units assigned to its return value and arguments
+
+ Parameters
+ ----------
+ node : Pyomo expression node
+ The parent node of the children
+
+ list_of_unit_tuples : list
+ This is a list of tuples (one for each of the children) where each tuple
+ is a PyomoUnit, pint unit pair
+
+ Returns
+ -------
+ : tuple (pyomo_unit, pint_unit)
+
+ """
+ # get the list of arg_units
+ arg_units = node.get_arg_units()
+ if arg_units is None:
+ # they should all be dimensionless
+ arg_units = [None]*len(list_of_unit_tuples)
+
+ for (arg_unit, unit_tuple) in zip(arg_units, list_of_unit_tuples):
+ pyomo_arg_unit, pint_arg_unit = self._pyomo_units_container._get_units_tuple(arg_unit)
+ pint_child_unit = unit_tuple[1]
+ print(pint_arg_unit, pint_child_unit)
+ if not self._pint_units_equivalent(pint_arg_unit, pint_child_unit):
+ raise InconsistentUnitsError(arg_unit, unit_tuple[0], 'Inconsistent units found in ExternalFunction.')
+
+ # now return the units in node.get_units
+ return self._pyomo_units_container._get_units_tuple(node.get_units())
+
def _get_dimensionless_with_dimensionless_children(self, node, list_of_unit_tuples):
"""
- Check to make sure that any child arguments are unitless / dimensionless (for functions like exp())
- and return (None, None) if successful. Although odd that this does not just return
- a boolean, it is done this way to match the signature of the other methods used to get
- units for expressions.
+ Check to make sure that any child arguments are unitless /
+ dimensionless (for functions like exp()) and return (None,
+ None) if successful. Although odd that this does not just
+ return a boolean, it is done this way to match the signature
+ of the other methods used to get units for expressions.
Parameters
----------
@@ -760,6 +818,7 @@ def _get_dimensionless_with_dimensionless_children(self, node, list_of_unit_tupl
Returns
-------
: tuple (None, None)
+
"""
for (pyomo_unit, pint_unit) in list_of_unit_tuples:
if not self._pint_unit_equivalent_to_dimensionless(pint_unit):
@@ -884,8 +943,7 @@ def _get_dimensionless_with_radians_child(self, node, list_of_unit_tuples):
"""
assert len(list_of_unit_tuples) == 1
- pyomo_unit = list_of_unit_tuples[0][0]
- pint_unit = list_of_unit_tuples[0][1]
+ pyomo_unit, pint_unit = list_of_unit_tuples[0]
if pint_unit is None:
assert pyomo_unit is None
# unitless, all is OK
@@ -920,8 +978,7 @@ def _get_radians_with_dimensionless_child(self, node, list_of_unit_tuples):
"""
assert len(list_of_unit_tuples) == 1
- pyomo_unit = list_of_unit_tuples[0][0]
- pint_unit = list_of_unit_tuples[0][1]
+ pyomo_unit, pint_unit = list_of_unit_tuples[0]
if not self._pint_unit_equivalent_to_dimensionless(pint_unit):
raise UnitsError('Expected dimensionless argument to function in expression {},'
' but found {}'.format(
@@ -957,32 +1014,32 @@ def _get_unit_sqrt(self, node, list_of_unit_tuples):
return (list_of_unit_tuples[0][0]**0.5, list_of_unit_tuples[0][1]**0.5)
node_type_method_map = {
- expr.EqualityExpression: _get_unit_for_equivalent_children,
- expr.InequalityExpression: _get_unit_for_equivalent_children,
- expr.RangedExpression: _get_unit_for_equivalent_children,
- expr.SumExpression: _get_unit_for_equivalent_children,
- expr.NPV_SumExpression: _get_unit_for_equivalent_children,
- expr.ProductExpression: _get_unit_for_product,
- expr.MonomialTermExpression: _get_unit_for_product,
- expr.NPV_ProductExpression: _get_unit_for_product,
- expr.DivisionExpression: _get_unit_for_division,
- expr.NPV_DivisionExpression: _get_unit_for_division,
- expr.ReciprocalExpression: _get_unit_for_reciprocal,
- expr.NPV_ReciprocalExpression: _get_unit_for_reciprocal,
- expr.PowExpression: _get_unit_for_pow,
- expr.NPV_PowExpression: _get_unit_for_pow,
- expr.NegationExpression: _get_unit_for_single_child,
- expr.NPV_NegationExpression: _get_unit_for_single_child,
- expr.AbsExpression: _get_unit_for_single_child,
- expr.NPV_AbsExpression: _get_unit_for_single_child,
- expr.UnaryFunctionExpression: _get_unit_for_unary_function,
- expr.NPV_UnaryFunctionExpression: _get_unit_for_unary_function,
- expr.Expr_ifExpression: _get_unit_for_expr_if,
+ EXPR.EqualityExpression: _get_unit_for_equivalent_children,
+ EXPR.InequalityExpression: _get_unit_for_equivalent_children,
+ EXPR.RangedExpression: _get_unit_for_equivalent_children,
+ EXPR.SumExpression: _get_unit_for_equivalent_children,
+ EXPR.NPV_SumExpression: _get_unit_for_equivalent_children,
+ EXPR.ProductExpression: _get_unit_for_product,
+ EXPR.MonomialTermExpression: _get_unit_for_product,
+ EXPR.NPV_ProductExpression: _get_unit_for_product,
+ EXPR.DivisionExpression: _get_unit_for_division,
+ EXPR.NPV_DivisionExpression: _get_unit_for_division,
+ EXPR.ReciprocalExpression: _get_unit_for_reciprocal,
+ EXPR.NPV_ReciprocalExpression: _get_unit_for_reciprocal,
+ EXPR.PowExpression: _get_unit_for_pow,
+ EXPR.NPV_PowExpression: _get_unit_for_pow,
+ EXPR.NegationExpression: _get_unit_for_single_child,
+ EXPR.NPV_NegationExpression: _get_unit_for_single_child,
+ EXPR.AbsExpression: _get_unit_for_single_child,
+ EXPR.NPV_AbsExpression: _get_unit_for_single_child,
+ EXPR.UnaryFunctionExpression: _get_unit_for_unary_function,
+ EXPR.NPV_UnaryFunctionExpression: _get_unit_for_unary_function,
+ EXPR.Expr_ifExpression: _get_unit_for_expr_if,
IndexTemplate: _get_dimensionless_no_children,
- expr.GetItemExpression: _get_dimensionless_with_dimensionless_children,
- expr.ExternalFunctionExpression: _get_dimensionless_with_dimensionless_children,
- expr.NPV_ExternalFunctionExpression: _get_dimensionless_with_dimensionless_children,
- expr.LinearExpression: _get_unit_for_linear_expression
+ EXPR.GetItemExpression: _get_dimensionless_with_dimensionless_children,
+ EXPR.ExternalFunctionExpression: _get_units_ExternalFunction,
+ EXPR.NPV_ExternalFunctionExpression: _get_units_ExternalFunction,
+ EXPR.LinearExpression: _get_unit_for_linear_expression
}
unary_function_method_map = {
@@ -1013,13 +1070,27 @@ def exitNode(self, node, data):
# first check if the node is a leaf
if type(node) in nonpyomo_leaf_types \
or not node.is_expression_type():
- if isinstance(node, _PyomoUnit):
+ if type(node) in native_numeric_types:
+ # this is a number - return dimensionless
+ return (None, None)
+ elif isinstance(node, _PyomoUnit):
return (node, node._get_pint_unit())
-
- # TODO: Check for Var or Param and return their units...
+ # CDL using the hasattr code below since it is more general
+ #elif isinstance(node, _VarData) or \
+ # isinstance(node, _ParamData):
+ # pyomo_unit, pint_unit = self._pyomo_units_container._get_units_tuple(node.get_units())
+ # return (pyomo_unit, pint_unit)
+ elif hasattr(node, 'get_units'):
+ pyomo_unit, pint_unit = self._pyomo_units_container._get_units_tuple(node.get_units())
+ return (pyomo_unit, pint_unit)
+
# I have a leaf, but this is not a PyomoUnit - (treat as dimensionless)
return (None, None)
+ # not a leaf - check if it is a named expression
+ if hasattr(node, 'is_named_expression_type') and node.is_named_expression_type():
+ return self._get_unit_for_single_child(node, data)
+
# not a leaf - get the appropriate function for type of the node
node_func = self.node_type_method_map.get(type(node), None)
if node_func is not None:
@@ -1042,40 +1113,93 @@ def exitNode(self, node, data):
class PyomoUnitsContainer(object):
"""Class that is used to create and contain units in Pyomo.
- This is the class that is used to create, contain, and interact with units in Pyomo.
- The module (:mod:`pyomo.core.base.units_container`) also contains a module attribute
- called `units` that is a singleton instance of a PyomoUnitsContainer. This singleton should be
- used instead of creating your own instance of a :py:class:`PyomoUnitsContainer`.
- For an overview of the usage of this class, see the module documentation
+ This is the class that is used to create, contain, and interact
+ with units in Pyomo. The module
+ (:mod:`pyomo.core.base.units_container`) also contains a module
+ level units container :py:data:`units` that is an instance of a
+ PyomoUnitsContainer. This module instance should typically be used
+ instead of creating your own instance of a
+ :py:class:`PyomoUnitsContainer`. For an overview of the usage of
+ this class, see the module documentation
(:mod:`pyomo.core.base.units_container`)
- This class is based on the "pint" module. Documentation for available units can be found
- at the following url: https://github.com/hgrecco/pint/blob/master/pint/default_en.txt
+ This class is based on the "pint" module. Documentation for
+ available units can be found at the following url:
+ https://github.com/hgrecco/pint/blob/master/pint/default_en.txt
+
+ .. note::
+
+ Pre-defined units can be accessed through attributes on the
+ PyomoUnitsContainer class; however, these attributes are created
+ dynamically through the __getattr__ method, and are not present
+ on the class until they are requested.
- Note: Pre-defined units can be accessed through attributes on the PyomoUnitsContainer
- class; however, these attributes are created dynamically through the __getattr__ method,
- and are not present on the class until they are requested.
"""
def __init__(self):
- """Create a PyomoUnitsContainer instance. """
- # Developers: Do not interact with this attribute directly, but instead
- # access through the property _pint_registry since that is where the import
- # of the 'pint' module is checked
- self.__pint_registry = None
-
- @property
- def _pint_registry(self):
- """ Return the pint.UnitsRegistry instance corresponding to this container. """
- if pint_module is None:
- # pint was not imported for some reason
- raise RuntimeError("The PyomoUnitsContainer in the units_container module requires"
- " the package 'pint', but this package could not be imported."
- " Please make sure you have 'pint' installed.")
-
- if self.__pint_registry is None:
- self.__pint_registry = pint_module.UnitRegistry()
-
- return self.__pint_registry
+ """Create a PyomoUnitsContainer instance."""
+ self._pint_registry = pint_module.UnitRegistry()
+
+ def load_definitions_from_file(self, definition_file):
+ """Load new units definitions from a file
+
+ This method loads additional units definitions from a user
+ specified definition file. An example of a definitions file
+ can be found at:
+ https://github.com/hgrecco/pint/blob/master/pint/default_en.txt
+
+ If we have a file called ``my_additional_units.txt`` with the
+ following lines::
+
+ USD = [currency]
+
+ Then we can add this to the container with:
+
+ .. doctest::
+ :hide:
+
+ # get a local units object (to avoid duplicate registration
+ # with the example in load_definitions_from_strings)
+ >>> import pyomo.core.base.units_container as _units
+ >>> u = _units.PyomoUnitsContainer()
+ >>> with open('my_additional_units.txt', 'w') as FILE:
+ ... tmp = FILE.write("USD = [currency]\\n")
+
+ .. doctest::
+
+ >>> u.load_definitions_from_file('my_additional_units.txt')
+ >>> print(u.USD)
+ USD
+
+ """
+ self._pint_registry.load_definitions(definition_file)
+
+ def load_definitions_from_strings(self, definition_string_list):
+ """Load new units definitions from a string
+
+ This method loads additional units definitions from a list of
+ strings (one for each line). An example of the definitions
+ strings can be found at:
+ https://github.com/hgrecco/pint/blob/master/pint/default_en.txt
+
+ For example, to add the currency dimension and US dollars as a
+ unit, use
+
+ .. doctest::
+ :hide:
+
+ # get a local units object (to avoid duplicate registration
+ # with the example in load_definitions_from_strings)
+ >>> import pyomo.core.base.units_container as _units
+ >>> u = _units.PyomoUnitsContainer()
+
+ .. doctest::
+
+ >>> u.load_definitions_from_strings(['USD = [currency]'])
+ >>> print(u.USD)
+ USD
+
+ """
+ self._pint_registry.load_definitions(definition_string_list)
def __getattr__(self, item):
"""
@@ -1119,10 +1243,7 @@ def __getattr__(self, item):
if pint_unit is None:
raise AttributeError('Attribute {0} not found.'.format(str(item)))
- def create_PyomoUnit(self, pint_unit):
- return _PyomoUnit(pint_unit, self._pint_registry)
-
- # TODO: Add support to specify a units definition file instead of this programatic interface
+ # We added support to specify a units definition file instead of this programatic interface
# def create_new_base_dimension(self, dimension_name, base_unit_name):
# """
# Use this method to create a new base dimension (e.g. a new dimension other than Length, Mass) for the unit manager.
@@ -1189,13 +1310,21 @@ def _get_units_tuple(self, expr):
-------
: tuple (PyomoUnit, pint unit)
"""
- pyomo_unit, pint_unit = _UnitExtractionVisitor(self).walk_expression(expr=expr)
+ if expr is None:
+ return (None, None)
+ pyomo_unit, pint_unit = UnitExtractionVisitor(self).walk_expression(expr=expr)
+ if pint_unit == self._pint_registry.dimensionless:
+ pint_unit = None
+ if pyomo_unit is self.dimensionless:
+ pyomo_unit = None
+
if pint_unit is not None:
assert pyomo_unit is not None
if type(pint_unit) != type(self._pint_registry.kg):
pint_unit = pint_unit.units
return (_PyomoUnit(pint_unit, self._pint_registry), pint_unit)
+
return (None, None)
def get_units(self, expr):
@@ -1225,93 +1354,155 @@ def get_units(self, expr):
# visitor code to only track the pint units
return pyomo_unit
- def check_units_consistency(self, expr, allow_exceptions=True):
+ def _pint_convert_temp_from_to(self, numerical_value, pint_from_units, pint_to_units):
+ if type(numerical_value) not in native_numeric_types:
+ raise UnitsError('Conversion routines for absolute and relative temperatures require a numerical value only.'
+ ' Pyomo objects (Var, Param, expressions) are not supported. Please use value(x) to'
+ ' extract the numerical value if necessary.')
+
+ src_quantity = self._pint_registry.Quantity(numerical_value, pint_from_units)
+ dest_quantity = src_quantity.to(pint_to_units)
+ return dest_quantity.magnitude
+
+ def convert_temp_K_to_C(self, value_in_K):
+ """
+ Convert a value in Kelvin to degrees Celcius. Note that this method
+ converts a numerical value only. If you need temperature
+ conversions in expressions, please work in absolute
+ temperatures only.
+ """
+ return self._pint_convert_temp_from_to(value_in_K, self._pint_registry.K, self._pint_registry.degC)
+
+ def convert_temp_C_to_K(self, value_in_C):
+ """
+ Convert a value in degrees Celcius to Kelvin Note that this
+ method converts a numerical value only. If you need
+ temperature conversions in expressions, please work in
+ absolute temperatures only.
+ """
+ return self._pint_convert_temp_from_to(value_in_C, self._pint_registry.degC, self._pint_registry.K)
+
+ def convert_temp_R_to_F(self, value_in_R):
+ """
+ Convert a value in Rankine to degrees Fahrenheit. Note that
+ this method converts a numerical value only. If you need
+ temperature conversions in expressions, please work in
+ absolute temperatures only.
+ """
+ return self._pint_convert_temp_from_to(value_in_R, self._pint_registry.rankine, self._pint_registry.degF)
+
+ def convert_temp_F_to_R(self, value_in_F):
+ """
+ Convert a value in degrees Fahrenheit to Rankine. Note that
+ this method converts a numerical value only. If you need
+ temperature conversions in expressions, please work in
+ absolute temperatures only.
+ """
+ return self._pint_convert_temp_from_to(value_in_F, self._pint_registry.degF, self._pint_registry.rankine)
+
+ def convert(self, src, to_units=None):
"""
- Check the consistency of the units within an expression. IF allow_exceptions is False,
- then this function swallows the exception and returns only True or False. Otherwise,
- it will throw an exception if the units are inconsistent.
+ This method returns an expression that contains the
+ explicit conversion from one unit to another.
Parameters
----------
- expr : Pyomo expression
- The source expression to check.
-
- allow_exceptions: bool
- True if you want any exceptions to be thrown, False if you only want a boolean
- (and the exception is ignored).
+ src : Pyomo expression
+ The source value that will be converted. This could be a
+ Pyomo Var, Pyomo Param, or a more complex expression.
+ to_units : Pyomo units expression
+ The desired target units for the new expression
Returns
-------
- : bool
- True if units are consistent, and False if not
-
- Raises
- ------
- :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`
-
+ ret : Pyomo expression
"""
- try:
- pyomo_unit, pint_unit = self._get_units_tuple(expr=expr)
- except (UnitsError, InconsistentUnitsError):
- if allow_exceptions:
- raise
- return False
+ src_pyomo_unit, src_pint_unit = self._get_units_tuple(src)
+ to_pyomo_unit, to_pint_unit = self._get_units_tuple(to_units)
+
+ if src_pyomo_unit is None and to_pyomo_unit is None:
+ return src
- return True
+ # no offsets, we only need a factor to convert between the two
+ fac_b_src, base_units_src = self._pint_registry.get_base_units(src_pint_unit, check_nonmult=True)
+ fac_b_dest, base_units_dest = self._pint_registry.get_base_units(to_pint_unit, check_nonmult=True)
+
+ if base_units_src != base_units_dest:
+ raise InconsistentUnitsError(src_pint_unit, to_pint_unit,
+ 'Error in convert: units not compatible.')
+ return fac_b_src/fac_b_dest*to_pyomo_unit/src_pyomo_unit*src
- def check_units_equivalent(self, expr1, expr2):
+ def convert_value(self, num_value, from_units=None, to_units=None):
"""
- Check if the units associated with each of the expressions are equivalent.
+ This method performs explicit conversion of a numerical value
+ from one unit to another, and returns the new value.
+
+ The argument "num_value" must be a native numeric type (e.g. float).
+ Note that this method returns a numerical value only, and not an
+ expression with units.
Parameters
----------
- expr1 : Pyomo expression
- The first expression.
- expr2 : Pyomo expression
- The second expression.
+ num_value : float or other native numeric type
+ The value that will be converted
+ from_units : Pyomo units expression
+ The units to convert from
+ to_units : Pyomo units expression
+ The units to convert to
Returns
-------
- : bool
- True if the expressions have equivalent units, False otherwise.
-
- Raises
- ------
- :py:class:`pyomo.core.base.units_container.UnitsError`, :py:class:`pyomo.core.base.units_container.InconsistentUnitsError`
+ float : The converted value
"""
- pyomo_unit1, pint_unit1 = self._get_units_tuple(expr1)
- pyomo_unit2, pint_unit2 = self._get_units_tuple(expr2)
- return _UnitExtractionVisitor(self)._pint_units_equivalent(pint_unit1, pint_unit2)
+ if type(num_value) not in native_numeric_types:
+ raise UnitsError('The argument "num_value" in convert_value must be a native numeric type, but'
+ ' instead type {} was found.'.format(type(num_value)))
+
+ from_pyomo_unit, from_pint_unit = self._get_units_tuple(from_units)
+ to_pyomo_unit, to_pint_unit = self._get_units_tuple(to_units)
- # def convert_value(self, src_value, from_units=None, to_units=None):
- # """
- # This method performs explicit conversion of a numerical value in
- # one unit to a numerical value in another unit.
- #
- # Parameters
- # ----------
- # src_value : float
- # The numeric value that will be converted
- # from_units : Pyomo expression with units
- # The source units for value
- # to_units : Pyomo expression with units
- # The desired target units for the new value
- #
- # Returns
- # -------
- # float : The new value (src_value converted from from_units to to_units)
- # """
- # from_pyomo_unit, from_pint_unit = self._get_units_tuple(from_units)
- # to_pyomo_unit, to_pint_unit = self._get_units_tuple(to_units)
- #
- # src_quantity = src_value * pint_src_unit
- # dest_quantity = src_quantity.to(pint_dest_unit)
- # return dest_quantity.magnitude
+ # ToDo: This check may be overkill - pint will raise an error that may be sufficient
+ fac_b_src, base_units_src = self._pint_registry.get_base_units(from_pint_unit, check_nonmult=True)
+ fac_b_dest, base_units_dest = self._pint_registry.get_base_units(to_pint_unit, check_nonmult=True)
+ if base_units_src != base_units_dest:
+ raise UnitsError('Cannot convert {0:s} to {1:s}. Units are not compatible.'.format(str(from_pyomo_unit), str(to_pyomo_unit)))
-#: Module level instance of a PyomoUnitsContainer to use for all units within a Pyomo model
-# See module level documentation for an example.
-units = PyomoUnitsContainer()
+ # convert the values
+ src_quantity = num_value * from_pint_unit
+ dest_quantity = src_quantity.to(to_pint_unit)
+ return dest_quantity.magnitude
+class DeferredUnitsSingleton(PyomoUnitsContainer):
+ """A class supporting deferred interrogation of pint_available.
+
+ This class supports creating a module-level singleton, but deferring
+ the interrogation of the pint_available flag until the first time
+ the object is actually used. If pint is available, this instance
+ object is replaced by an actual PyomoUnitsContainer. Otherwise this
+ leverages the pint_module to raise an (informative)
+ DeferredImportError exception.
+
+ """
+
+ def __init__(self):
+ # do NOT call the base class __init__ so that the pint_module is
+ # not accessed
+ pass
+
+ def __getattribute__(self, attr):
+ if pint_available:
+ self.__class__ = PyomoUnitsContainer
+ self.__init__()
+ return getattr(self, attr)
+ else:
+ # Generate the ImportError
+ return getattr(pint_module, attr)
+
+# Define a module level instance of a PyomoUnitsContainer to use for
+# all units within a Pyomo model. If pint is not available, this will
+# cause an error at the first usage See module level documentation for
+# an example.
+units = DeferredUnitsSingleton()
diff --git a/pyomo/core/base/util.py b/pyomo/core/base/util.py
index 516a744c003..24b982646c6 100644
--- a/pyomo/core/base/util.py
+++ b/pyomo/core/base/util.py
@@ -2,8 +2,8 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
@@ -16,7 +16,7 @@
import inspect
import six
-from six import iteritems
+from six import iteritems, iterkeys
if six.PY2:
getargspec = inspect.getargspec
@@ -111,7 +111,7 @@ def disable_methods(methods):
that override key methods to raise exceptions. When the construct()
method is called, the class instance changes type back to the
original scalar component and the full class functionality is
- restored. The prevents most class methods from having to begin with
+ restored. This prevents most class methods from having to begin with
"`if not self.parent_component()._constructed: raise RuntimeError`"
"""
def class_decorator(cls):
@@ -119,6 +119,8 @@ def class_decorator(cls):
base = cls.__bases__[0]
def construct(self, data=None):
+ if hasattr(self, '_name') and self._name == self.__class__.__name__:
+ self._name = base.__name__
self.__class__ = base
return base.construct(self, data)
construct.__doc__ = base.construct.__doc__
@@ -152,6 +154,14 @@ def Initializer(init,
allow_generators=False,
treat_sequences_as_mappings=True,
arg_not_specified=None):
+ """Standardized processing of Component keyword arguments
+
+ Component keyword arguments accept a number of possible inputs, from
+ scalars to dictionaries, to functions (rules) and generators. This
+ function standardizes the processing of keyword arguments and
+ returns "initializer classes" that are specialized to the specific
+ data type provided.
+ """
if init.__class__ in native_types:
if init is arg_not_specified:
return None
@@ -176,27 +186,62 @@ def Initializer(init,
return ItemInitializer(init)
else:
return ConstantInitializer(init)
- elif inspect.isgenerator(init) or hasattr(init, 'next') \
- or hasattr(init, '__next__'):
+ elif inspect.isgenerator(init) or (
+ ( hasattr(init, 'next') or hasattr(init, '__next__') )
+ and not hasattr(init, '__len__')):
+ # This catches generators and iterators (like enumerate()), but
+ # skips "reusable" iterators like range() as well as Pyomo
+ # (finite) Set objects.
if not allow_generators:
raise ValueError("Generators are not allowed")
- return ConstantInitializer(init)
+ # Deepcopying generators is problematic (e.g., it generates a
+ # segfault in pypy3 7.3.0). We will immediately expand the
+ # generator into a tuple and then store it as a constant.
+ return ConstantInitializer(tuple(init))
else:
return ConstantInitializer(init)
+
class InitializerBase(object):
+ """Base class for all Initializer objects"""
__slots__ = ()
verified = False
def __getstate__(self):
+ """Class serializer
+
+ This class must declare __getstate__ because it is slotized.
+ This implementation should be sufficient for simple derived
+ classes (where __slots__ are only declared on the most derived
+ class).
+ """
return {k:getattr(self,k) for k in self.__slots__}
def __setstate__(self, state):
for key, val in iteritems(state):
object.__setattr__(self, key, val)
+ def constant(self):
+ """Return True if this initializer is constant across all indices"""
+ return False
+
+ def contains_indices(self):
+ """Return True if this initializer contains embedded indices"""
+ return False
+
+ def indices(self):
+ """Return a generator over the embedded indices
+
+ This will raise a RuntimeError if this initializer does not
+ contain embedded indices
+ """
+ raise RuntimeError("Initializer %s does not contain embedded indices"
+ % (type(self).__name__,))
+
+
class ConstantInitializer(InitializerBase):
+ """Initializer for constant values"""
__slots__ = ('val','verified')
def __init__(self, val):
@@ -209,7 +254,9 @@ def __call__(self, parent, idx):
def constant(self):
return True
+
class ItemInitializer(InitializerBase):
+ """Initializer for dict-like values supporting __getitem__()"""
__slots__ = ('_dict',)
def __init__(self, _dict):
@@ -218,10 +265,15 @@ def __init__(self, _dict):
def __call__(self, parent, idx):
return self._dict[idx]
- def constant(self):
- return False
+ def contains_indices(self):
+ return True
+
+ def indices(self):
+ return iterkeys(self._dict)
+
class IndexedCallInitializer(InitializerBase):
+ """Initializer for functions and callable objects"""
__slots__ = ('_fcn',)
def __init__(self, _fcn):
@@ -237,23 +289,27 @@ def __call__(self, parent, idx):
else:
return self._fcn(parent, idx)
- def constant(self):
- return False
class CountedCallGenerator(object):
- def __init__(self, fcn, scalar, parent, idx):
+ """Generator implementing the "counted call" initialization scheme
+
+ This generator implements the older "counted call" scheme, where the
+ first argument past the parent block is a monotonically-increasing
+ integer beginning at 1.
+ """
+ def __init__(self, ctype, fcn, scalar, parent, idx):
# Note: this is called by a component using data from a Set (so
# any tuple-like type should have already been checked and
# converted to a tuple; or flattening is turned off and it is
# the user's responsibility to sort things out.
self._count = 0
if scalar:
- self._fcn = lambda c: self._filter(fcn(parent, c))
+ self._fcn = lambda c: self._filter(ctype, fcn(parent, c))
elif idx.__class__ is tuple:
- self._fcn = lambda c: self._filter(fcn(parent, c, *idx))
+ self._fcn = lambda c: self._filter(ctype, fcn(parent, c, *idx))
else:
- self._fcn = lambda c: self._filter(fcn(parent, c, idx))
+ self._fcn = lambda c: self._filter(ctype, fcn(parent, c, idx))
def __iter__(self):
return self
@@ -262,22 +318,24 @@ def __next__(self):
self._count += 1
return self._fcn(self._count)
+ next = __next__
+
@staticmethod
- def _filter(x):
+ def _filter(ctype, x):
if x is None:
raise ValueError(
- """Counted Set rule returned None instead of Set.End.
- Counted Set rules of the form fcn(model, count, *idx) will be called
+ """Counted %s rule returned None instead of %s.End.
+ Counted %s rules of the form fcn(model, count, *idx) will be called
repeatedly with an increasing count parameter until the rule returns
- Set.End. None is not a valid Set member in this case due to the
- likelihood that an error in the rule can incorrectly return None.""")
+ %s.End. None is not a valid return value in this case due to the
+ likelihood that an error in the rule can incorrectly return None."""
+ % ((ctype.__name__,)*4))
return x
- next = __next__
-
-
class CountedCallInitializer(InitializerBase):
+ """Initializer for functions implementing the "counted call" API.
+ """
# Pyomo has a historical feature for some rules, where the number of
# times[*1] the rule was called could be passed as an additional
# argument between the block and the index. This was primarily
@@ -300,12 +358,13 @@ class CountedCallInitializer(InitializerBase):
# consistent form of the original implementation for backwards
# compatability, but I believe that we should deprecate this syntax
# entirely.
- __slots__ = ('_fcn','_is_counted_rule', '_scalar',)
+ __slots__ = ('_fcn','_is_counted_rule', '_scalar','_ctype')
def __init__(self, obj, _indexed_init):
self._fcn = _indexed_init._fcn
self._is_counted_rule = None
self._scalar = not obj.is_indexed()
+ self._ctype = obj.ctype
if self._scalar:
self._is_counted_rule = True
@@ -320,7 +379,8 @@ def __call__(self, parent, idx):
else:
return self._fcn(parent, idx)
if self._is_counted_rule == True:
- return CountedCallGenerator(self._fcn, self._scalar, parent, idx)
+ return CountedCallGenerator(
+ self._ctype, self._fcn, self._scalar, parent, idx)
# Note that this code will only be called once, and only if
# the object is not a scalar.
@@ -332,10 +392,9 @@ def __call__(self, parent, idx):
self._is_counted_rule = False
return self.__call__(parent, idx)
- def constant(self):
- return False
class ScalarCallInitializer(InitializerBase):
+ """Initializer for functions taking only the parent block argument."""
__slots__ = ('_fcn',)
def __init__(self, _fcn):
@@ -343,7 +402,3 @@ def __init__(self, _fcn):
def __call__(self, parent, idx):
return self._fcn(parent)
-
- def constant(self):
- return False
-
diff --git a/pyomo/core/base/var.py b/pyomo/core/base/var.py
index 82e5c788c20..29470d6ed4b 100644
--- a/pyomo/core/base/var.py
+++ b/pyomo/core/base/var.py
@@ -16,12 +16,12 @@
from pyomo.common.modeling import NoArgumentGiven
from pyomo.common.timing import ConstructionTimer
from pyomo.core.base.numvalue import NumericValue, value, is_fixed
-from pyomo.core.base.set_types import BooleanSet, IntegerSet, RealSet, Reals
+from pyomo.core.base.set_types import Reals, Binary
from pyomo.core.base.plugin import ModelComponentFactory
from pyomo.core.base.component import ComponentData
from pyomo.core.base.indexed_component import IndexedComponent, UnindexedComponent_set
from pyomo.core.base.misc import apply_indexed_rule
-from pyomo.core.base.sets import Set
+from pyomo.core.base.set import Set, _SetDataBase
from pyomo.core.base.util import is_functor
from six import iteritems, itervalues
@@ -97,18 +97,25 @@ def bounds(self, val):
raise AttributeError("Assignment not allowed. Use the setub and setlb methods")
def is_integer(self):
- """Returns True when the domain class is IntegerSet."""
- # optimization: this is the most common case
- if self.domain.__class__ is IntegerSet:
+ """Returns True when the domain is a contiguous integer range."""
+ # optimization: Reals and Binary are the most common cases, so
+ # we will explicitly test that before generating the interval
+ if self.domain is Reals:
+ return False
+ elif self.domain is Binary:
return True
- return isinstance(self.domain, IntegerSet)
+ _interval = self.domain.get_interval()
+ return _interval is not None and _interval[2] == 1
def is_binary(self):
- """Returns True when the domain class is BooleanSet."""
- # optimization: this is the most common case
- if self.domain.__class__ is BooleanSet:
+ """Returns True when the domain is restricted to Binary values."""
+ # optimization: Reals and Binary are the most common cases, so
+ # we will explicitly test that before generating the interval
+ if self.domain is Reals:
+ return False
+ elif self.domain is Binary:
return True
- return isinstance(self.domain, BooleanSet)
+ return self.domain.get_interval() == (0,1,1)
# TODO?
# def is_semicontinuous(self):
@@ -123,11 +130,15 @@ def is_binary(self):
# return self.domain.__class__ is SemiIntegerSet
def is_continuous(self):
- """Returns True when the domain is an instance of RealSet."""
- # optimization: this is the most common case
- if self.domain.__class__ is RealSet:
+ """Returns True when the domain is a continuous real range"""
+ # optimization: Reals and Binary are the most common cases, so
+ # we will explicitly test that before generating the interval
+ if self.domain is Reals:
return True
- return isinstance(self.domain, RealSet)
+ elif self.domain is Binary:
+ return False
+ _interval = self.domain.get_interval()
+ return _interval is not None and _interval[2] == 0
def is_fixed(self):
"""Returns True if this variable is fixed, otherwise returns False."""
@@ -137,18 +148,10 @@ def is_constant(self):
"""Returns False because this is not a constant in an expression."""
return False
- def is_parameter_type(self):
- """Returns False because this is not a parameter object."""
- return False
-
def is_variable_type(self):
"""Returns True because this is a variable."""
return True
- def is_expression_type(self):
- """Returns False because this is not an expression"""
- return False
-
def is_potentially_variable(self):
"""Returns True because this is a variable."""
return True
@@ -182,7 +185,8 @@ def _valid_value(self, val, use_exception=True):
ans = val is None or val in self.domain
if not ans and use_exception:
raise ValueError("Numeric value `%s` (%s) is not in "
- "domain %s" % (val, type(val), self.domain))
+ "domain %s for Var %s" %
+ (val, type(val), self.domain, self.name))
return ans
def clear(self):
@@ -332,14 +336,17 @@ def __init__(self, domain=Reals, component=None):
self.stale = True
# don't call the property setter here because
# the SimplVar constructor will fail
- if hasattr(domain, 'bounds'):
+ #
+ # TODO: this should be migrated over to using a SetInitializer
+ # to handle the checking / conversion of the argument to a
+ # proper Pyomo Set and not use isinstance() of a private class.
+ if isinstance(domain, _SetDataBase):
self._domain = domain
elif domain is not None:
raise ValueError(
"%s is not a valid domain. Variable domains must be an "
- "instance of one of %s, or an object that declares a method "
- "for bounds (like a Pyomo Set). Examples: NonNegativeReals, "
- "Integers, Binary" % (domain, (RealSet, IntegerSet, BooleanSet)))
+ "instance of a Pyomo Set. Examples: NonNegativeReals, "
+ "Integers, Binary" % (domain,))
def __getstate__(self):
state = super(_GeneralVarData, self).__getstate__()
@@ -373,14 +380,16 @@ def domain(self):
@domain.setter
def domain(self, domain):
"""Set the domain for this variable."""
- if hasattr(domain, 'bounds'):
+ # TODO: this should be migrated over to using a SetInitializer
+ # to handle the checking / conversion of the argument to a
+ # proper Pyomo Set and not use isinstance() of a private class.
+ if isinstance(domain, _SetDataBase):
self._domain = domain
else:
raise ValueError(
"%s is not a valid domain. Variable domains must be an "
- "instance of one of %s, or an object that declares a method "
- "for bounds (like a Pyomo Set). Examples: NonNegativeReals, "
- "Integers, Binary" % (domain, (RealSet, IntegerSet, BooleanSet)))
+ "instance of a Pyomo Set. Examples: NonNegativeReals, "
+ "Integers, Binary" % (domain,))
@property
def lb(self):
@@ -408,6 +417,12 @@ def ub(self):
def ub(self, val):
raise AttributeError("Assignment not allowed. Use the setub method")
+ def get_units(self):
+ """Return the units for this variable entry."""
+ # parent_component() returns self if this is scalar, or the owning
+ # component if not scalar
+ return self.parent_component()._units
+
# fixed is an attribute
# stale is an attribute
@@ -476,6 +491,8 @@ class Var(IndexedComponent):
`index_set()` when constructing the Var (True) or just the
variables returned by `initialize`/`rule` (False). Defaults
to True.
+ units (pyomo units expression, optional): Set the units corresponding
+ to the entries in this variable.
"""
_ComponentDataClass = _GeneralVarData
@@ -498,7 +515,8 @@ def __init__(self, *args, **kwd):
domain = kwd.pop('domain', domain)
bounds = kwd.pop('bounds', None)
self._dense = kwd.pop('dense', True)
-
+ self._units = kwd.pop('units', None)
+
#
# Initialize the base class
#
@@ -535,10 +553,6 @@ def __init__(self, *args, **kwd):
elif bounds is not None:
raise ValueError("Variable 'bounds' keyword must be a tuple or function")
- def is_expression_type(self):
- """Returns False because this is not an expression"""
- return False
-
def flag_as_stale(self):
"""
Set the 'stale' attribute of every variable data object to True.
@@ -568,6 +582,10 @@ def set_values(self, new_values, valid=False):
for index, new_value in iteritems(new_values):
self[index].set_value(new_value, valid)
+ def get_units(self):
+ """Return the units expression for this Var."""
+ return self._units
+
def construct(self, data=None):
"""Construct this component."""
if __debug__ and logger.isEnabledFor(logging.DEBUG): #pragma:nocover
@@ -967,7 +985,7 @@ class VarList(IndexedVar):
def __init__(self, **kwds):
#kwds['dense'] = False
- args = (Set(),)
+ args = (Set(dimen=1),)
IndexedVar.__init__(self, *args, **kwds)
def construct(self, data=None):
@@ -975,6 +993,12 @@ def construct(self, data=None):
if __debug__ and logger.isEnabledFor(logging.DEBUG):
logger.debug("Constructing variable list %s", self.name)
+ if self._constructed:
+ return
+ # Note: do not set _constructed here, or the super() call will
+ # not actually construct the component.
+ self.index_set().construct()
+
# We need to ensure that the indices needed for initialization are
# added to the underlying implicit set. We *could* verify that the
# indices in the initialization dict are all sequential integers,
diff --git a/pyomo/core/expr/calculus/diff_with_pyomo.py b/pyomo/core/expr/calculus/diff_with_pyomo.py
index 9234975c13d..af6063b463e 100644
--- a/pyomo/core/expr/calculus/diff_with_pyomo.py
+++ b/pyomo/core/expr/calculus/diff_with_pyomo.py
@@ -299,6 +299,22 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict):
raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node)))
+def _diff_ExternalFunctionExpression(node, val_dict, der_dict):
+ """
+
+ Parameters
+ ----------
+ node: pyomo.core.expr.numeric_expr.ProductExpression
+ val_dict: ComponentMap
+ der_dict: ComponentMap
+ """
+ der = der_dict[node]
+ vals = tuple(val_dict[i] for i in node.args)
+ derivs = node._fcn.evaluate_fgh(vals)[1]
+ for ndx, arg in enumerate(node.args):
+ der_dict[arg] += der * derivs[ndx]
+
+
_diff_map = dict()
_diff_map[_expr.ProductExpression] = _diff_ProductExpression
_diff_map[_expr.DivisionExpression] = _diff_DivisionExpression
@@ -308,6 +324,50 @@ def _diff_UnaryFunctionExpression(node, val_dict, der_dict):
_diff_map[_expr.MonomialTermExpression] = _diff_ProductExpression
_diff_map[_expr.NegationExpression] = _diff_NegationExpression
_diff_map[_expr.UnaryFunctionExpression] = _diff_UnaryFunctionExpression
+_diff_map[_expr.ExternalFunctionExpression] = _diff_ExternalFunctionExpression
+
+
+class _NamedExpressionCollector(ExpressionValueVisitor):
+ def __init__(self):
+ self.named_expressions = list()
+
+ def visit(self, node, values):
+ return None
+
+ def visiting_potential_leaf(self, node):
+ if node.__class__ in nonpyomo_leaf_types:
+ return True, None
+
+ if not node.is_expression_type():
+ return True, None
+
+ if node.is_named_expression_type():
+ self.named_expressions.append(node)
+ return False, None
+
+ return False, None
+
+
+def _collect_ordered_named_expressions(expr):
+ """
+ The purpose of this function is to collect named expressions in a
+ particular order. The order is very important. In the resulting
+ list each named expression can only appear once, and any named
+ expressions that are used in other named expressions have to come
+ after the named expression that use them.
+ """
+ visitor = _NamedExpressionCollector()
+ visitor.dfs_postorder_stack(expr)
+ named_expressions = visitor.named_expressions
+ seen = set()
+ res = list()
+ for e in reversed(named_expressions):
+ if id(e) in seen:
+ continue
+ seen.add(id(e))
+ res.append(e)
+ res = list(reversed(res))
+ return res
class _ReverseADVisitorLeafToRoot(ExpressionValueVisitor):
@@ -364,16 +424,15 @@ def visiting_potential_leaf(self, node):
if not node.is_expression_type():
return True, None
+ if node.is_named_expression_type():
+ return True, None
+
if node.__class__ in _diff_map:
_diff_map[node.__class__](node, self.val_dict, self.der_dict)
- elif node.is_named_expression_type():
- der = self.der_dict[node]
- self.der_dict[node.expr] += der
+ return False, None
else:
raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node)))
- return False, None
-
def reverse_ad(expr):
"""
@@ -395,9 +454,13 @@ def reverse_ad(expr):
visitorA = _ReverseADVisitorLeafToRoot(val_dict, der_dict)
visitorA.dfs_postorder_stack(expr)
+ named_expressions = _collect_ordered_named_expressions(expr)
der_dict[expr] = 1
visitorB = _ReverseADVisitorRootToLeaf(val_dict, der_dict)
visitorB.dfs_postorder_stack(expr)
+ for named_expr in named_expressions:
+ der_dict[named_expr.expr] = der_dict[named_expr]
+ visitorB.dfs_postorder_stack(named_expr.expr)
return der_dict
@@ -456,16 +519,15 @@ def visiting_potential_leaf(self, node):
if not node.is_expression_type():
return True, None
+ if node.is_named_expression_type():
+ return True, None
+
if node.__class__ in _diff_map:
_diff_map[node.__class__](node, self.val_dict, self.der_dict)
- elif node.is_named_expression_type():
- der = self.der_dict[node]
- self.der_dict[node.expr] += der
+ return False, None
else:
raise DifferentiationException('Unsupported expression type for differentiation: {0}'.format(type(node)))
- return False, None
-
def reverse_sd(expr):
"""
@@ -487,10 +549,12 @@ def reverse_sd(expr):
visitorA = _ReverseSDVisitorLeafToRoot(val_dict, der_dict)
visitorA.dfs_postorder_stack(expr)
+ named_expressions = _collect_ordered_named_expressions(expr)
der_dict[expr] = 1
visitorB = _ReverseSDVisitorRootToLeaf(val_dict, der_dict)
visitorB.dfs_postorder_stack(expr)
+ for named_expr in named_expressions:
+ der_dict[named_expr.expr] = der_dict[named_expr]
+ visitorB.dfs_postorder_stack(named_expr.expr)
return der_dict
-
-
diff --git a/pyomo/core/expr/current.py b/pyomo/core/expr/current.py
index 874f79b06fb..050561366fd 100755
--- a/pyomo/core/expr/current.py
+++ b/pyomo/core/expr/current.py
@@ -43,6 +43,7 @@ class Mode(object):
_generate_relational_expression,
_chainedInequality,
)
+ from pyomo.core.expr.template_expr import *
from pyomo.core.expr import visitor as _visitor
from pyomo.core.expr.visitor import *
# FIXME: we shouldn't need circular dependencies between modules
diff --git a/pyomo/core/expr/logical_expr.py b/pyomo/core/expr/logical_expr.py
index 1ef5d41aa45..d2844850dca 100644
--- a/pyomo/core/expr/logical_expr.py
+++ b/pyomo/core/expr/logical_expr.py
@@ -33,7 +33,7 @@
)
from .numeric_expr import _LinearOperatorExpression, _process_arg
-if _using_chained_inequality: #pragma: no cover
+if _using_chained_inequality:
class _chainedInequality(object):
prev = None
@@ -70,7 +70,7 @@ def error_message(msg=None):
if value(expression <= 5):
""" % args
-else: #pragma: no cover
+else:
_chainedInequality = None
@@ -185,7 +185,7 @@ def __getstate__(self):
return state
def __nonzero__(self):
- if _using_chained_inequality and not self.is_constant(): #pragma: no cover
+ if _using_chained_inequality and not self.is_constant():
deprecation_warning("Chained inequalities are deprecated. "
"Use the inequality() function to "
"express ranged inequality expressions.") # Remove in Pyomo 6.0
@@ -313,7 +313,7 @@ def is_potentially_variable(self):
if _using_chained_inequality:
- def _generate_relational_expression(etype, lhs, rhs): #pragma: no cover
+ def _generate_relational_expression(etype, lhs, rhs):
# We cannot trust Python not to recycle ID's for temporary POD data
# (e.g., floats). So, if it is a "native" type, we will record the
# value, otherwise we will record the ID. The tuple for native
@@ -406,7 +406,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no
elif etype == _lt:
strict = True
else:
- raise ValueError("Unknown relational expression type '%s'" % etype) #pragma: no cover
+ raise ValueError("Unknown relational expression type '%s'" % etype)
if lhs_is_relational:
if lhs.__class__ is InequalityExpression:
if rhs_is_relational:
@@ -435,7 +435,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no
else:
- def _generate_relational_expression(etype, lhs, rhs): #pragma: no cover
+ def _generate_relational_expression(etype, lhs, rhs):
rhs_is_relational = False
lhs_is_relational = False
@@ -472,7 +472,7 @@ def _generate_relational_expression(etype, lhs, rhs): #pragma: no
elif etype == _lt:
strict = True
else:
- raise ValueError("Unknown relational expression type '%s'" % etype) #pragma: no cover
+ raise ValueError("Unknown relational expression type '%s'" % etype)
if lhs_is_relational:
if lhs.__class__ is InequalityExpression:
if rhs_is_relational:
diff --git a/pyomo/core/expr/numeric_expr.py b/pyomo/core/expr/numeric_expr.py
index 1f015911106..418061dad2c 100644
--- a/pyomo/core/expr/numeric_expr.py
+++ b/pyomo/core/expr/numeric_expr.py
@@ -19,6 +19,7 @@
from pyutilib.math.util import isclose
from pyomo.common.deprecation import deprecated
+from pyomo.common.errors import DeveloperError
from .expr_common import (
_add, _sub, _mul, _div,
@@ -629,10 +630,7 @@ def getname(self, *args, **kwds): #pragma: no cover
return self._fcn.getname(*args, **kwds)
def _compute_polynomial_degree(self, result):
- # If the expression is constant, then
- # this is detected earlier. Hence, we can safely
- # return None.
- return None
+ return 0 if all(arg == 0 for arg in result) else None
def _apply_operation(self, result):
return self._fcn.evaluate( result )
@@ -640,6 +638,13 @@ def _apply_operation(self, result):
def _to_string(self, values, verbose, smap, compute_values):
return "{0}({1})".format(self.getname(), ", ".join(values))
+ def get_arg_units(self):
+ """ Return the units for this external functions arguments """
+ return self._fcn.get_arg_units()
+
+ def get_units(self):
+ """ Get the units of the return value for this external function """
+ return self._fcn.get_units()
class NPV_ExternalFunctionExpression(ExternalFunctionExpression):
__slots__ = ()
@@ -1058,85 +1063,6 @@ def add(self, new_arg):
return self
-class GetItemExpression(ExpressionBase):
- """
- Expression to call :func:`__getitem__` on the base object.
- """
- __slots__ = ('_base',)
- PRECEDENCE = 1
-
- def _precedence(self): #pragma: no cover
- return GetItemExpression.PRECEDENCE
-
- def __init__(self, args, base=None):
- """Construct an expression with an operation and a set of arguments"""
- self._args_ = args
- self._base = base
-
- def nargs(self):
- return len(self._args_)
-
- def create_node_with_local_data(self, args):
- return self.__class__(args, self._base)
-
- def __getstate__(self):
- state = super(GetItemExpression, self).__getstate__()
- for i in GetItemExpression.__slots__:
- state[i] = getattr(self, i)
- return state
-
- def getname(self, *args, **kwds):
- return self._base.getname(*args, **kwds)
-
- def is_potentially_variable(self):
- if any(arg.is_potentially_variable() for arg in self._args_
- if arg.__class__ not in nonpyomo_leaf_types):
- return True
- for x in itervalues(self._base):
- if x.__class__ not in nonpyomo_leaf_types \
- and x.is_potentially_variable():
- return True
- return False
-
- def is_fixed(self):
- if any(self._args_):
- for x in itervalues(self._base):
- if not x.__class__ in nonpyomo_leaf_types and not x.is_fixed():
- return False
- return True
-
- def _is_fixed(self, values):
- for x in itervalues(self._base):
- if not x.__class__ in nonpyomo_leaf_types and not x.is_fixed():
- return False
- return True
-
- def _compute_polynomial_degree(self, result): # TODO: coverage
- if any(x != 0 for x in result):
- return None
- ans = 0
- for x in itervalues(self._base):
- if x.__class__ in nonpyomo_leaf_types:
- continue
- tmp = x.polynomial_degree()
- if tmp is None:
- return None
- elif tmp > ans:
- ans = tmp
- return ans
-
- def _apply_operation(self, result): # TODO: coverage
- return value(self._base.__getitem__( tuple(result) ))
-
- def _to_string(self, values, verbose, smap, compute_values):
- if verbose:
- return "{0}({1})".format(self.getname(), values[0])
- return "%s%s" % (self.getname(), values[0])
-
- def resolve_template(self): # TODO: coverage
- return self._base.__getitem__(tuple(value(i) for i in self._args_))
-
-
class Expr_ifExpression(ExpressionBase):
"""
A logical if-then-else expression::
@@ -1178,11 +1104,13 @@ def getname(self, *args, **kwds):
def _is_fixed(self, args):
assert(len(args) == 3)
- if args[0]: #self._if.is_constant():
+ if args[0]: # self._if.is_fixed():
+ if args[1] and args[2]:
+ return True
if value(self._if):
- return args[1] #self._then.is_constant()
+ return args[1] # self._then.is_fixed()
else:
- return args[2] #self._else.is_constant()
+ return args[2] # self._else.is_fixed()
else:
return False
@@ -1204,6 +1132,8 @@ def is_potentially_variable(self):
def _compute_polynomial_degree(self, result):
_if, _then, _else = result
if _if == 0:
+ if _then == _else:
+ return _then
try:
return _then if value(self._if) else _else
except ValueError:
@@ -1363,18 +1293,16 @@ def getname(self, *args, **kwds):
return 'sum'
def _compute_polynomial_degree(self, result):
- return 1 if len(self.linear_vars) > 0 else 0
+ return 1 if not self.is_fixed() else 0
def is_constant(self):
return len(self.linear_vars) == 0
+ def _is_fixed(self, values=None):
+ return all(v.fixed for v in self.linear_vars)
+
def is_fixed(self):
- if len(self.linear_vars) == 0:
- return True
- for v in self.linear_vars:
- if not v.fixed:
- return False
- return True
+ return self._is_fixed()
def _to_string(self, values, verbose, smap, compute_values):
tmp = []
@@ -1649,23 +1577,20 @@ def _decompose_linear_terms(expr, multiplier=1):
def _process_arg(obj):
- try:
- if obj.is_parameter_type() and not obj._component()._mutable and obj._constructed:
- # Return the value of an immutable SimpleParam or ParamData object
- return obj()
-
- elif obj.__class__ is NumericConstant:
- return obj.value
-
- return obj
- except AttributeError:
- if obj.is_indexed():
- raise TypeError(
- "Argument for expression is an indexed numeric "
- "value\nspecified without an index:\n\t%s\nIs this "
- "value defined over an index that you did not specify?"
- % (obj.name, ) )
- raise
+ # Note: caller is responsible for filtering out native types and
+ # expressions.
+ if obj.is_numeric_type() and obj.is_constant():
+ # Resolve constants (e.g., immutable scalar Params & NumericConstants)
+ return value(obj)
+ # User assistance: provide a helpful exception when using an indexed
+ # object in an expression
+ if obj.is_component_type() and obj.is_indexed():
+ raise TypeError(
+ "Argument for expression is an indexed numeric "
+ "value\nspecified without an index:\n\t%s\nIs this "
+ "value defined over an index that you did not specify?"
+ % (obj.name, ) )
+ return obj
#@profile
diff --git a/pyomo/core/expr/numvalue.py b/pyomo/core/expr/numvalue.py
index ea56cc4963c..250410dcbe7 100644
--- a/pyomo/core/expr/numvalue.py
+++ b/pyomo/core/expr/numvalue.py
@@ -24,6 +24,7 @@
_iadd, _isub, _imul, _idiv,
_ipow, _lt, _le, _eq)
+from pyomo.core.pyomoobject import PyomoObject
from pyomo.core.expr.expr_errors import TemplateExpressionError
logger = logging.getLogger('pyomo.core')
@@ -108,7 +109,7 @@ def __setstate__(self, state):
#: like numpy.
#:
#: :data:`native_types` = :data:`native_numeric_types ` + { str }
-native_types = set([ bool, str, type(None) ])
+native_types = set([ bool, str, type(None), slice ])
if PY3:
native_types.add(bytes)
native_boolean_types.add(bytes)
@@ -532,7 +533,7 @@ def check_if_numeric_type_and_cache(obj):
return retval
-class NumericValue(object):
+class NumericValue(PyomoObject):
"""
This is the base class for numeric values used in Pyomo.
"""
@@ -614,6 +615,10 @@ def cname(self, *args, **kwds):
"DEPRECATED: The cname() method has been renamed to getname()." )
return self.getname(*args, **kwds)
+ def is_numeric_type(self):
+ """Return True if this class is a Pyomo numeric object"""
+ return True
+
def is_constant(self):
"""Return True if this numeric value is a constant value"""
return False
@@ -622,28 +627,8 @@ def is_fixed(self):
"""Return True if this is a non-constant value that has been fixed"""
return False
- def is_parameter_type(self):
- """Return False unless this class is a parameter object"""
- return False
-
- def is_variable_type(self):
- """Return False unless this class is a variable object"""
- return False
-
def is_potentially_variable(self):
"""Return True if variables can appear in this expression"""
- return True
-
- def is_named_expression_type(self):
- """Return True if this numeric value is a named expression"""
- return False
-
- def is_expression_type(self):
- """Return True if this numeric value is an expression"""
- return False
-
- def is_component_type(self):
- """Return True if this class is a Pyomo component"""
return False
def is_relational(self):
@@ -1024,9 +1009,6 @@ def is_constant(self):
def is_fixed(self):
return True
- def is_potentially_variable(self):
- return False
-
def _compute_polynomial_degree(self, result):
return 0
diff --git a/pyomo/core/expr/sympy_tools.py b/pyomo/core/expr/sympy_tools.py
index 687f212d5f9..cae5a0fff5d 100644
--- a/pyomo/core/expr/sympy_tools.py
+++ b/pyomo/core/expr/sympy_tools.py
@@ -12,36 +12,26 @@
import pyutilib.misc
from pyomo.core.expr import current
from pyomo.common import DeveloperError
+from pyomo.common.dependencies import attempt_import
from pyomo.core.expr import current as EXPR, native_types
from pyomo.core.expr.numvalue import value
from pyomo.core.kernel.component_map import ComponentMap
from pyomo.common.errors import NondifferentiableError
-sympy_available = True
-try:
- import sympy
-
- def _prod(*x):
- ans = x[0]
- for i in x[1:]:
- ans *= i
- return ans
+#
+# Sympy takes a significant time to load; defer importing it unless
+# someone actually needs the interface.
+#
- def _sum(*x):
- return sum(x_ for x_ in x)
+_operatorMap = {}
+_pyomo_operator_map = {}
+_functionMap = {}
- def _nondifferentiable(*x):
- if type(x[1]) is tuple:
- # sympy >= 1.3 returns tuples (var, order)
- wrt = x[1][0]
- else:
- # early versions of sympy returned the bare var
- wrt = x[1]
- raise NondifferentiableError(
- "The sub-expression '%s' is not differentiable with respect to %s"
- % (x[0], wrt) )
+def _configure_sympy(sympy, available):
+ if not available:
+ return
- _operatorMap = {
+ _operatorMap.update({
sympy.Add: _sum,
sympy.Mul: _prod,
sympy.Pow: lambda x, y: x**y,
@@ -65,16 +55,16 @@ def _nondifferentiable(*x):
sympy.Abs: lambda x: abs(x),
sympy.Derivative: _nondifferentiable,
sympy.Tuple: lambda *x: x,
- }
+ })
- _pyomo_operator_map = {
+ _pyomo_operator_map.update({
EXPR.SumExpression: sympy.Add,
EXPR.ProductExpression: sympy.Mul,
EXPR.NPV_ProductExpression: sympy.Mul,
EXPR.MonomialTermExpression: sympy.Mul,
- }
+ })
- _functionMap = {
+ _functionMap.update({
'exp': sympy.exp,
'log': sympy.log,
'log10': lambda x: sympy.log(x)/sympy.log(10),
@@ -93,9 +83,30 @@ def _nondifferentiable(*x):
'ceil': sympy.ceiling,
'floor': sympy.floor,
'sqrt': sympy.sqrt,
- }
-except ImportError:
- sympy_available = False
+ })
+
+sympy, sympy_available = attempt_import('sympy', callback=_configure_sympy)
+
+
+def _prod(*x):
+ ans = x[0]
+ for i in x[1:]:
+ ans *= i
+ return ans
+
+def _sum(*x):
+ return sum(x_ for x_ in x)
+
+def _nondifferentiable(*x):
+ if type(x[1]) is tuple:
+ # sympy >= 1.3 returns tuples (var, order)
+ wrt = x[1][0]
+ else:
+ # early versions of sympy returned the bare var
+ wrt = x[1]
+ raise NondifferentiableError(
+ "The sub-expression '%s' is not differentiable with respect to %s"
+ % (x[0], wrt) )
class PyomoSympyBimap(object):
def __init__(self):
@@ -128,9 +139,13 @@ def sympyVars(self):
class Pyomo2SympyVisitor(EXPR.StreamBasedExpressionVisitor):
def __init__(self, object_map):
+ sympy.Add # this ensures _configure_sympy gets run
super(Pyomo2SympyVisitor, self).__init__()
self.object_map = object_map
+ def initializeWalker(self, expr):
+ return self.beforeChild(None, expr, None)
+
def exitNode(self, node, values):
if node.__class__ is EXPR.UnaryFunctionExpression:
return _functionMap[node._name](values[0])
@@ -140,7 +155,7 @@ def exitNode(self, node, values):
else:
return _op(*tuple(values))
- def beforeChild(self, node, child):
+ def beforeChild(self, node, child, child_idx):
#
# Don't replace native or sympy types
#
@@ -164,9 +179,13 @@ def beforeChild(self, node, child):
class Sympy2PyomoVisitor(EXPR.StreamBasedExpressionVisitor):
def __init__(self, object_map):
+ sympy.Add # this ensures _configure_sympy gets run
super(Sympy2PyomoVisitor, self).__init__()
self.object_map = object_map
+ def initializeWalker(self, expr):
+ return self.beforeChild(None, expr, None)
+
def enterNode(self, node):
return (node._args, [])
@@ -180,7 +199,7 @@ def exitNode(self, node, values):
"map" % type(_sympyOp) )
return _op(*tuple(values))
- def beforeChild(self, node, child):
+ def beforeChild(self, node, child, child_idx):
if not child._args:
item = self.object_map.getPyomoSymbol(child, None)
if item is None:
@@ -195,16 +214,9 @@ def sympyify_expression(expr):
#
object_map = PyomoSympyBimap()
visitor = Pyomo2SympyVisitor(object_map)
- is_expr, ans = visitor.beforeChild(None, expr)
- if not is_expr:
- return object_map, ans
-
return object_map, visitor.walk_expression(expr)
def sympy2pyomo_expression(expr, object_map):
visitor = Sympy2PyomoVisitor(object_map)
- is_expr, ans = visitor.beforeChild(None, expr)
- if not is_expr:
- return ans
return visitor.walk_expression(expr)
diff --git a/pyomo/core/expr/taylor_series.py b/pyomo/core/expr/taylor_series.py
index 953b4997152..68f503b4550 100644
--- a/pyomo/core/expr/taylor_series.py
+++ b/pyomo/core/expr/taylor_series.py
@@ -48,7 +48,7 @@ def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric,
res = value(expr)
if order >= 1:
derivs = differentiate(expr=expr, wrt_list=e_vars, mode=diff_mode)
- res += sum(value(derivs[i]) * (e_vars[i] - e_vars[i].value) for i in range(len(e_vars)))
+ res += sum((e_vars[i] - e_vars[i].value) * value(derivs[i]) for i in range(len(e_vars)))
"""
This last bit of code is just for higher order taylor series expansions.
@@ -68,6 +68,6 @@ def taylor_series_expansion(expr, diff_mode=differentiate.Modes.reverse_numeric,
tmp = coef
for ndx in ndx_list:
tmp *= (e_vars[ndx] - e_vars[ndx].value)
- res += tmp * sum(value(_derivs[i]) * (e_vars[i] - e_vars[i].value) for i in range(len(e_vars)))
+ res += tmp * sum((e_vars[i] - e_vars[i].value) * value(_derivs[i]) for i in range(len(e_vars)))
return res
diff --git a/pyomo/core/expr/template_expr.py b/pyomo/core/expr/template_expr.py
new file mode 100644
index 00000000000..04a01e514a7
--- /dev/null
+++ b/pyomo/core/expr/template_expr.py
@@ -0,0 +1,790 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import copy
+import itertools
+import logging
+import sys
+from six import iteritems, itervalues
+from six.moves import builtins
+
+from pyomo.core.expr.expr_errors import TemplateExpressionError
+from pyomo.core.expr.numvalue import (
+ NumericValue, native_numeric_types, native_types, nonpyomo_leaf_types,
+ as_numeric, value,
+)
+from pyomo.core.expr.numeric_expr import ExpressionBase, SumExpression
+from pyomo.core.expr.visitor import (
+ ExpressionReplacementVisitor, StreamBasedExpressionVisitor
+)
+
+logger = logging.getLogger(__name__)
+
+class _NotSpecified(object): pass
+
+class GetItemExpression(ExpressionBase):
+ """
+ Expression to call :func:`__getitem__` on the base object.
+ """
+ PRECEDENCE = 1
+
+ def _precedence(self):
+ return GetItemExpression.PRECEDENCE
+
+ def __init__(self, args):
+ """Construct an expression with an operation and a set of arguments"""
+ self._args_ = args
+
+ def nargs(self):
+ return len(self._args_)
+
+ def __getattr__(self, attr):
+ if attr.startswith('__') and attr.endswith('__'):
+ raise AttributeError()
+ return GetAttrExpression((self, attr))
+
+ def __iter__(self):
+ return iter(value(self))
+
+ def __len__(self):
+ return len(value(self))
+
+ def getname(self, *args, **kwds):
+ return self._args_[0].getname(*args, **kwds)
+
+ def is_potentially_variable(self):
+ _false = lambda: False
+ if any( getattr(arg, 'is_potentially_variable', _false)()
+ for arg in self._args_ ):
+ return True
+ base = self._args_[0]
+ if base.is_expression_type():
+ base = value(base)
+ # TODO: fix value iteration when generating templates
+ #
+ # There is a nasty problem here: we want to iterate over all the
+ # members of the base and see if *any* of them are potentially
+ # variable. Unfortunately, this method is called during
+ # expression generation, and we *could* be generating a
+ # template. When that occurs, iterating over the base will
+ # yield a new IndexTemplate (which will in turn raise an
+ # exception because IndexTemplates are not constant). The real
+ # solution is probably to re-think how we define
+ # is_potentially_variable, but for now we will only handle
+ # members that are explicitly stored in the _data dict. Not
+ # general (because a Component could implement a non-standard
+ # storage scheme), but as of now [30 Apr 20], there are no known
+ # Components where this assumption will cause problems.
+ return any( getattr(x, 'is_potentially_variable', _false)()
+ for x in itervalues(getattr(base, '_data', {})) )
+
+ def _is_fixed(self, values):
+ if not all(values[1:]):
+ return False
+ _true = lambda: True
+ return all( getattr(x, 'is_fixed', _true)()
+ for x in itervalues(values[0]) )
+
+ def _compute_polynomial_degree(self, result):
+ if any(x != 0 for x in result[1:]):
+ return None
+ ans = 0
+ for x in itervalues(result[0]):
+ if x.__class__ in nonpyomo_leaf_types \
+ or not hasattr(x, 'polynomial_degree'):
+ continue
+ tmp = x.polynomial_degree()
+ if tmp is None:
+ return None
+ elif tmp > ans:
+ ans = tmp
+ return ans
+
+ def _apply_operation(self, result):
+ obj = result[0].__getitem__( tuple(result[1:]) )
+ if obj.__class__ in nonpyomo_leaf_types:
+ return obj
+ # Note that because it is possible (likely) that the result
+ # could be an IndexedComponent_slice object, must test "is
+ # True", as the slice will return a list of values.
+ if obj.is_numeric_type() is True:
+ obj = value(obj)
+ return obj
+
+ def _to_string(self, values, verbose, smap, compute_values):
+ values = tuple(_[1:-1] if _[0]=='(' and _[-1]==')' else _
+ for _ in values)
+ if verbose:
+ return "getitem(%s, %s)" % (values[0], ', '.join(values[1:]))
+ return "%s[%s]" % (values[0], ','.join(values[1:]))
+
+ def _resolve_template(self, args):
+ return args[0].__getitem__(tuple(args[1:]))
+
+
+class GetAttrExpression(ExpressionBase):
+ """
+ Expression to call :func:`__getattr__` on the base object.
+ """
+ __slots__ = ()
+ PRECEDENCE = 1
+
+ def _precedence(self):
+ return GetAttrExpression.PRECEDENCE
+
+ def nargs(self):
+ return len(self._args_)
+
+ def __getattr__(self, attr):
+ if attr.startswith('__') and attr.endswith('__'):
+ raise AttributeError()
+ return GetAttrExpression((self, attr))
+
+ def __getitem__(self, *idx):
+ return GetItemExpression((self,) + idx)
+
+ def __iter__(self):
+ return iter(value(self))
+
+ def __len__(self):
+ return len(value(self))
+
+ def getname(self, *args, **kwds):
+ return 'getattr'
+
+ def _compute_polynomial_degree(self, result):
+ if result[1] != 0:
+ return None
+ return result[0]
+
+ def _apply_operation(self, result):
+ assert len(result) == 2
+ obj = getattr(result[0], result[1])
+ if obj.__class__ in nonpyomo_leaf_types:
+ return obj
+ # Note that because it is possible (likely) that the result
+ # could be an IndexedComponent_slice object, must test "is
+ # True", as the slice will return a list of values.
+ if obj.is_numeric_type() is True:
+ obj = value(obj)
+ return obj
+
+ def _to_string(self, values, verbose, smap, compute_values):
+ assert len(values) == 2
+ if verbose:
+ return "getattr(%s, %s)" % tuple(values)
+ # Note that the string argument for getattr comes quoted, so we
+ # need to remove the quotes.
+ attr = values[1]
+ if attr[0] in '\"\'' and attr[0] == attr[-1]:
+ attr = attr[1:-1]
+ return "%s.%s" % (values[0], attr)
+
+ def _resolve_template(self, args):
+ return getattr(*tuple(args))
+
+
+class _TemplateSumExpression_argList(object):
+ """A virtual list to represent the expanded SumExpression args
+
+ This class implements a "virtual args list" for
+ TemplateSumExpressions without actually generating the expanded
+ expression. It can be accessed either in "one-pass" without
+ generating a list of template argument values (more efficient), or
+ as a random-access list (where it will have to create the full list
+ of argument values (less efficient).
+
+ The instance can be used as a context manager to both lock the
+ IndexTemplate values within this context and to restore their original
+ values upon exit.
+
+ It is (intentionally) not iterable.
+
+ """
+ def __init__(self, TSE):
+ self._tse = TSE
+ self._i = 0
+ self._init_vals = None
+ self._iter = self._get_iter()
+ self._lock = None
+
+ def __len__(self):
+ return self._tse.nargs()
+
+ def __getitem__(self, i):
+ if self._i == i:
+ self._set_iter_vals(next(self._iter))
+ self._i += 1
+ elif self._i is not None:
+ # Switch to random-access mode. If we have already
+ # retrieved one of the indices, then we need to regenerate
+ # the iterator from scratch.
+ self._iter = list(self._get_iter() if self._i else self._iter)
+ self._set_iter_vals(self._iter[i])
+ else:
+ self._set_iter_vals(self._iter[i])
+ return self._tse._local_args_[0]
+
+ def __enter__(self):
+ self._lock = self
+ self._lock_iters()
+
+ def __exit__(self, exc_type, exc_value, tb):
+ self._unlock_iters()
+ self._lock = None
+
+ def _get_iter(self):
+ # Note: by definition, all _set pointers within an itergroup
+ # point to the same Set
+ _sets = tuple(iterGroup[0]._set for iterGroup in self._tse._iters)
+ return itertools.product(*_sets)
+
+ def _lock_iters(self):
+ self._init_vals = tuple(
+ tuple(
+ it.lock(self._lock) for it in iterGroup
+ ) for iterGroup in self._tse._iters )
+
+ def _unlock_iters(self):
+ self._set_iter_vals(self._init_vals)
+ for iterGroup in self._tse._iters:
+ for it in iterGroup:
+ it.unlock(self._lock)
+
+ def _set_iter_vals(self, val):
+ for i, iterGroup in enumerate(self._tse._iters):
+ if len(iterGroup) == 1:
+ iterGroup[0].set_value(val[i], self._lock)
+ else:
+ for j, v in enumerate(val[i]):
+ iterGroup[j].set_value(v, self._lock)
+
+
+class TemplateSumExpression(ExpressionBase):
+ """
+ Expression to represent an unexpanded sum over one or more sets.
+ """
+ __slots__ = ('_iters', '_local_args_')
+ PRECEDENCE = 1
+
+ def _precedence(self):
+ return TemplateSumExpression.PRECEDENCE
+
+ def __init__(self, args, _iters):
+ assert len(args) == 1
+ self._args_ = args
+ self._iters = _iters
+
+ def nargs(self):
+ # Note: by definition, all _set pointers within an itergroup
+ # point to the same Set
+ ans = 1
+ for iterGroup in self._iters:
+ ans *= len(iterGroup[0]._set)
+ return ans
+
+ @property
+ def args(self):
+ return _TemplateSumExpression_argList(self)
+
+ @property
+ def _args_(self):
+ return _TemplateSumExpression_argList(self)
+
+ @_args_.setter
+ def _args_(self, args):
+ self._local_args_ = args
+
+ def create_node_with_local_data(self, args):
+ return self.__class__(args, self._iters)
+
+ def __getstate__(self):
+ state = super(TemplateSumExpression, self).__getstate__()
+ for i in TemplateSumExpression.__slots__:
+ state[i] = getattr(self, i)
+ return state
+
+ def getname(self, *args, **kwds):
+ return "SUM"
+
+ def is_potentially_variable(self):
+ if any(arg.is_potentially_variable() for arg in self._local_args_
+ if arg.__class__ not in nonpyomo_leaf_types):
+ return True
+ return False
+
+ def _is_fixed(self, values):
+ return all(values)
+
+ def _compute_polynomial_degree(self, result):
+ if None in result:
+ return None
+ return result[0]
+
+ def _apply_operation(self, result):
+ return sum(result)
+
+ def _to_string(self, values, verbose, smap, compute_values):
+ ans = ''
+ val = values[0]
+ if val[0]=='(' and val[-1]==')' and _balanced_parens(val[1:-1]):
+ val = val[1:-1]
+ iterStrGenerator = (
+ ( ', '.join(str(i) for i in iterGroup),
+ iterGroup[0]._set.to_string(verbose=verbose) )
+ for iterGroup in self._iters
+ )
+ if verbose:
+ iterStr = ', '.join('iter(%s, %s)' % x for x in iterStrGenerator)
+ return 'templatesum(%s, %s)' % (val, iterStr)
+ else:
+ iterStr = ' '.join('for %s in %s' % x for x in iterStrGenerator)
+ return 'SUM(%s %s)' % (val, iterStr)
+
+ def _resolve_template(self, args):
+ return SumExpression(args)
+
+
+class IndexTemplate(NumericValue):
+ """A "placeholder" for an index value in template expressions.
+
+ This class is a placeholder for an index value within a template
+ expression. That is, given the expression template for "m.x[i]",
+ where `m.z` is indexed by `m.I`, the expression tree becomes:
+
+ _GetItem:
+ - m.x
+ - IndexTemplate(_set=m.I, _value=None)
+
+ Constructor Arguments:
+ _set: the Set from which this IndexTemplate can take values
+ """
+
+ __slots__ = ('_set', '_value', '_index', '_id', '_lock')
+
+ def __init__(self, _set, index=0, _id=None):
+ self._set = _set
+ self._value = _NotSpecified
+ self._index = index
+ self._id = _id
+ self._lock = None
+
+ def __getstate__(self):
+ """
+ This method must be defined because this class uses slots.
+ """
+ state = super(IndexTemplate, self).__getstate__()
+ for i in IndexTemplate.__slots__:
+ state[i] = getattr(self, i)
+ return state
+
+ def __deepcopy__(self, memo):
+ # Because we leverage deepcopy for expression cloning, we need
+ # to see if this is a clone operation and *not* copy the
+ # template.
+ #
+ # TODO: JDS: We should consider converting the IndexTemplate to
+ # a proper Component: that way it could leverage the normal
+ # logic of using the parent_block scope to dictate the behavior
+ # of deepcopy.
+ if '__block_scope__' in memo:
+ memo[id(self)] = self
+ return self
+ #
+ # "Normal" deepcopying outside the context of pyomo.
+ #
+ ans = memo[id(self)] = self.__class__.__new__(self.__class__)
+ ans.__setstate__(copy.deepcopy(self.__getstate__(), memo))
+ return ans
+
+ # Note: because NONE of the slots on this class need to be edited,
+ # we don't need to implement a specialized __setstate__ method.
+
+ def __call__(self, exception=True):
+ """
+ Return the value of this object.
+ """
+ if self._value is _NotSpecified:
+ if exception:
+ raise TemplateExpressionError(
+ self, "Evaluating uninitialized IndexTemplate (%s)"
+ % (self,))
+ return None
+ else:
+ return self._value
+
+ def _resolve_template(self, args):
+ assert not args
+ return self()
+
+ def is_fixed(self):
+ """
+ Returns True because this value is fixed.
+ """
+ return True
+
+ def is_constant(self):
+ """
+ Returns False because this cannot immediately be simplified.
+ """
+ return False
+
+ def is_potentially_variable(self):
+ """Returns False because index values cannot be variables.
+
+ The IndexTemplate represents a placeholder for an index value
+ for an IndexedComponent, and at the moment, Pyomo does not
+ support variable indirection.
+ """
+ return False
+
+ def __str__(self):
+ return self.getname()
+
+ def getname(self, fully_qualified=False, name_buffer=None, relative_to=None):
+ if self._id is not None:
+ return "_%s" % (self._id,)
+
+ _set_name = self._set.getname(fully_qualified, name_buffer, relative_to)
+ if self._index is not None and self._set.dimen != 1:
+ _set_name += "(%s)" % (self._index,)
+ return "{"+_set_name+"}"
+
+ def to_string(self, verbose=None, labeler=None, smap=None, compute_values=False):
+ return self.name
+
+ def set_value(self, values=_NotSpecified, lock=None):
+ # It might be nice to check if the value is valid for the base
+ # set, but things are tricky when the base set is not dimention
+ # 1. So, for the time being, we will just "trust" the user.
+ # After all, the actual Set will raise exceptions if the value
+ # is not present.
+ if lock is not self._lock:
+ raise RuntimeError(
+ "The TemplateIndex %s is currently locked by %s and "
+ "cannot be set through lock %s" % (self, self._lock, lock))
+ if values is _NotSpecified:
+ self._value = _NotSpecified
+ return
+ if type(values) is not tuple:
+ values = (values,)
+ if self._index is not None:
+ if len(values) == 1:
+ self._value = values[0]
+ else:
+ raise ValueError("Passed multiple values %s to a scalar "
+ "IndexTemplate %s" % (values, self))
+ else:
+ self._value = values
+
+ def lock(self, lock):
+ assert self._lock is None
+ self._lock = lock
+ return self._value
+
+ def unlock(self, lock):
+ assert self._lock is lock
+ self._lock = None
+
+
+def resolve_template(expr):
+ """Resolve a template into a concrete expression
+
+ This takes a template expression and returns the concrete equivalent
+ by substituting the current values of all IndexTemplate objects and
+ resolving (evaluating and removing) all GetItemExpression,
+ GetAttrExpression, and TemplateSumExpression expression nodes.
+
+ """
+ def beforeChild(node, child, child_idx):
+ # Efficiency: do not decend into leaf nodes.
+ if type(child) in native_types or not child.is_expression_type():
+ if hasattr(child, '_resolve_template'):
+ return False, child._resolve_template(())
+ return False, child
+ else:
+ return True, None
+
+ def exitNode(node, args):
+ if hasattr(node, '_resolve_template'):
+ return node._resolve_template(args)
+ if len(args) == node.nargs() and all(
+ a is b for a,b in zip(node.args, args)):
+ return node
+ return node.create_node_with_local_data(args)
+
+ return StreamBasedExpressionVisitor(
+ initializeWalker=lambda x: beforeChild(None, x, None),
+ beforeChild=beforeChild,
+ exitNode=exitNode,
+ ).walk_expression(expr)
+
+
+class ReplaceTemplateExpression(ExpressionReplacementVisitor):
+
+ def __init__(self, substituter, *args):
+ super(ReplaceTemplateExpression, self).__init__()
+ self.substituter = substituter
+ self.substituter_args = args
+
+ def visiting_potential_leaf(self, node):
+ if type(node) is GetItemExpression or type(node) is IndexTemplate:
+ return True, self.substituter(node, *self.substituter_args)
+
+ return super(
+ ReplaceTemplateExpression, self).visiting_potential_leaf(node)
+
+
+def substitute_template_expression(expr, substituter, *args):
+ """Substitute IndexTemplates in an expression tree.
+
+ This is a general utility function for walking the expression tree
+ and subtituting all occurances of IndexTemplate and
+ _GetItemExpression nodes.
+
+ Args:
+ substituter: method taking (expression, *args) and returning
+ the new object
+ *args: these are passed directly to the substituter
+
+ Returns:
+ a new expression tree with all substitutions done
+ """
+ visitor = ReplaceTemplateExpression(substituter, *args)
+ return visitor.dfs_postorder_stack(expr)
+
+
+class _GetItemIndexer(object):
+ # Note that this class makes the assumption that only one template
+ # ever appears in an expression for a single index
+
+ def __init__(self, expr):
+ self._base = expr.arg(0)
+ self._args = []
+ _hash = [ id(self._base) ]
+ for x in expr.args[1:]:
+ try:
+ logging.disable(logging.CRITICAL)
+ val = value(x)
+ self._args.append(val)
+ _hash.append(val)
+ except TemplateExpressionError as e:
+ if x is not e.template:
+ raise TypeError(
+ "Cannot use the param substituter with expression "
+ "templates\nwhere the component index has the "
+ "IndexTemplate in an expression.\n\tFound in %s"
+ % ( expr, ))
+ self._args.append(e.template)
+ _hash.append(id(e.template._set))
+ finally:
+ logging.disable(logging.NOTSET)
+
+ self._hash = tuple(_hash)
+
+ def nargs(self):
+ return len(self._args)
+
+ def arg(self, i):
+ return self._args[i]
+
+ @property
+ def base(self):
+ return self._base
+
+ @property
+ def args(self):
+ return self._args
+
+ def __hash__(self):
+ return hash(self._hash)
+
+ def __eq__(self, other):
+ if type(other) is _GetItemIndexer:
+ return self._hash == other._hash
+ else:
+ return False
+
+ def __str__(self):
+ return "%s[%s]" % (
+ self._base.name, ','.join(str(x) for x in self._args) )
+
+
+def substitute_getitem_with_param(expr, _map):
+ """A simple substituter to replace _GetItem nodes with mutable Params.
+
+ This substituter will replace all _GetItemExpression nodes with a
+ new Param. For example, this method will create expressions
+ suitable for passing to DAE integrators
+ """
+ import pyomo.core.base.param
+ if type(expr) is IndexTemplate:
+ return expr
+
+ _id = _GetItemIndexer(expr)
+ if _id not in _map:
+ _map[_id] = pyomo.core.base.param.Param(mutable=True)
+ _map[_id].construct()
+ _map[_id]._name = "%s[%s]" % (
+ _id.base.name, ','.join(str(x) for x in _id.args) )
+ return _map[_id]
+
+
+def substitute_template_with_value(expr):
+ """A simple substituter to expand expression for current template
+
+ This substituter will replace all _GetItemExpression / IndexTemplate
+ nodes with the actual _ComponentData based on the current value of
+ the IndexTemplate(s)
+
+ """
+
+ if type(expr) is IndexTemplate:
+ return as_numeric(expr())
+ else:
+ return resolve_template(expr)
+
+
+class _set_iterator_template_generator(object):
+ """Replacement iterator that returns IndexTemplates
+
+ In order to generate template expressions, we hijack the normal Set
+ iteration mechanisms so that this iterator is returned instead of
+ the usual iterator. This iterator will return IndexTemplate
+ object(s) instead of the actual Set items the first time next() is
+ called.
+ """
+ def __init__(self, _set, context):
+ self._set = _set
+ self.context = context
+
+ def __iter__(self):
+ return self
+
+ def __next__(self):
+ # Prevent context from ever being called more than once
+ if self.context is None:
+ raise StopIteration()
+ context, self.context = self.context, None
+
+ _set = self._set
+ d = _set.dimen
+ if d is None or type(d) is not int:
+ idx = (IndexTemplate(_set, None, context.next_id()),)
+ else:
+ idx = tuple(
+ IndexTemplate(_set, i, context.next_id()) for i in range(d)
+ )
+ context.cache.append(idx)
+ if len(idx) == 1:
+ return idx[0]
+ else:
+ return idx
+
+ next = __next__
+
+class _template_iter_context(object):
+ """Manage the iteration context when generating templatized rules
+
+ This class manages the context tracking when generating templatized
+ rules. It has two methods (`sum_template` and `get_iter`) that
+ replace standard functions / methods (`sum` and
+ :py:meth:`_FiniteSetMixin.__iter__`, respectively). It also tracks
+ unique identifiers for IndexTemplate objects and their groupings
+ within `sum()` generators.
+ """
+ def __init__(self):
+ self.cache = []
+ self._id = 0
+
+ def get_iter(self, _set):
+ return _set_iterator_template_generator(_set, self)
+
+ def npop_cache(self, n):
+ result = self.cache[-n:]
+ self.cache[-n:] = []
+ return result
+
+ def next_id(self):
+ self._id += 1
+ return self._id
+
+ def sum_template(self, generator):
+ init_cache = len(self.cache)
+ expr = next(generator)
+ final_cache = len(self.cache)
+ return TemplateSumExpression(
+ (expr,), self.npop_cache(final_cache-init_cache)
+ )
+
+
+def templatize_rule(block, rule, index_set):
+ import pyomo.core.base.set
+ context = _template_iter_context()
+ internal_error = None
+ _old_iters = (
+ pyomo.core.base.set._FiniteSetMixin.__iter__,
+ GetItemExpression.__iter__,
+ GetAttrExpression.__iter__,
+ )
+ _old_sum = builtins.sum
+ try:
+ # Override Set iteration to return IndexTemplates
+ pyomo.core.base.set._FiniteSetMixin.__iter__ \
+ = GetItemExpression.__iter__ \
+ = GetAttrExpression.__iter__ \
+ = lambda x: context.get_iter(x).__iter__()
+ # Override sum with our sum
+ builtins.sum = context.sum_template
+ # Get the index templates needed for calling the rule
+ if index_set is not None:
+ if not index_set.isfinite():
+ raise TemplateExpressionError(
+ None,
+ "Cannot templatize rule with non-finite indexing set")
+ indices = next(iter(index_set))
+ try:
+ context.cache.pop()
+ except IndexError:
+ assert indices is None
+ indices = ()
+ else:
+ indices = ()
+ if type(indices) is not tuple:
+ indices = (indices,)
+ # Call the rule, returning the template expression and the
+ # top-level IndexTemplate(s) generated when calling the rule.
+ #
+ # TBD: Should this just return a "FORALL()" expression node that
+ # behaves similarly to the GetItemExpression node?
+ return rule(block, *indices), indices
+ except:
+ internal_error = sys.exc_info()
+ raise
+ finally:
+ pyomo.core.base.set._FiniteSetMixin.__iter__, \
+ GetItemExpression.__iter__, \
+ GetAttrExpression.__iter__ = _old_iters
+ builtins.sum = _old_sum
+ if len(context.cache):
+ if internal_error is not None:
+ logger.error("The following exception was raised when "
+ "templatizing the rule '%s':\n\t%s"
+ % (rule.__name__, internal_error[1]))
+ raise TemplateExpressionError(
+ None,
+ "Explicit iteration (for loops) over Sets is not supported "
+ "by template expressions. Encountered loop over %s"
+ % (context.cache[-1][0]._set,))
+ return None, indices
+
+
+def templatize_constraint(con):
+ return templatize_rule(con.parent_block(), con.rule, con.index_set())
diff --git a/pyomo/core/expr/visitor.py b/pyomo/core/expr/visitor.py
index e939c506aa8..a1f0bc2b913 100644
--- a/pyomo/core/expr/visitor.py
+++ b/pyomo/core/expr/visitor.py
@@ -10,10 +10,19 @@
from __future__ import division
+import inspect
import logging
+import six
from copy import deepcopy
from collections import deque
+if six.PY2:
+ getargspec = inspect.getargspec
+else:
+ # For our needs, getfullargspec is a drop-in replacement for
+ # getargspec (which was removed in Python 3.x)
+ getargspec = inspect.getfullargspec
+
logger = logging.getLogger('pyomo.core')
from pyutilib.misc.visitor import SimpleVisitor, ValueVisitor
@@ -22,6 +31,7 @@
from .symbol_map import SymbolMap
from . import expr_common as common
from .expr_errors import TemplateExpressionError
+from pyomo.common.deprecation import deprecation_warning
from pyomo.core.expr.numvalue import (
nonpyomo_leaf_types,
native_numeric_types,
@@ -49,6 +59,7 @@ class StreamBasedExpressionVisitor(object):
through callback functions as the traversal enters and leaves nodes
in the tree:
+ initializeWalker(expr) -> walk, result
enterNode(N1) -> args, data
{for N2 in args:}
beforeChild(N1, N2) -> descend, child_result
@@ -58,10 +69,20 @@ class StreamBasedExpressionVisitor(object):
acceptChildResult(N1, data, child_result) -> data
afterChild(N1, N2) -> None
exitNode(N1, data) -> N1_result
+ finalizeWalker(result) -> result
Individual event callbacks match the following signatures:
- args, data = enterNode(self, node):
+ walk, result = initializeWalker(self, expr):
+
+ initializeWalker() is called to set the walker up and perform
+ any preliminary processing on the root node. The method returns
+ a flag indicating if the tree should be walked and a result. If
+ `walk` is True, then result is ignored. If `walk` is False,
+ then `result` is returned as the final result from the walker,
+ bypassing all other callbacks (including finalizeResult).
+
+ args, data = enterNode(self, node):
enterNode() is called when the walker first enters a node (from
above), and is passed the node being entered. It is expected to
@@ -83,10 +104,11 @@ class StreamBasedExpressionVisitor(object):
this node. If not specified, the default action is to return
the data object from enterNode().
- descend, child_result = beforeChild(self, node, child):
+ descend, child_result = beforeChild(self, node, child, child_idx):
beforeChild() is called by a node for every child before
- entering the child node. The node and child nodes are passed as
+ entering the child node. The node, child node, and child index
+ (position in the args list from enterNode()) are passed as
arguments. beforeChild should return a tuple (descend,
child_result). If descend is False, the child node will not be
entered and the value returned to child_result will be passed to
@@ -94,24 +116,25 @@ class StreamBasedExpressionVisitor(object):
equivalent to (True, None). The default behavior if not
specified is equivalent to (True, None).
- data = acceptChildResult(self, node, data, child_result):
+ data = acceptChildResult(self, node, data, child_result, child_idx):
acceptChildResult() is called for each child result being
returned to a node. This callback is responsible for recording
the result for later processing or passing up the tree. It is
- passed the node, the result data structure (see enterNode()),
- and the child result. The data structure (possibly modified or
- replaced) must be returned. If acceptChildResult is not
- specified, it does nothing if data is None, otherwise it calls
- data.append(result).
+ passed the node, result data structure (see enterNode()), child
+ result, and the child index (position in args from enterNode()).
+ The data structure (possibly modified or replaced) must be
+ returned. If acceptChildResult is not specified, it does
+ nothing if data is None, otherwise it calls data.append(result).
- afterChild(self, node, child):
+ afterChild(self, node, child, child_idx):
afterChild() is called by a node for every child node
immediately after processing the node is complete before control
- moves to the next child or up to the parent node. The node and
- child node are passed, and nothing is returned. If afterChild
- is not specified, no action takes place.
+ moves to the next child or up to the parent node. The node,
+ child node, an child index (position in args from enterNode())
+ are passed, and nothing is returned. If afterChild is not
+ specified, no action takes place.
finalizeResult(self, result):
@@ -132,7 +155,7 @@ class StreamBasedExpressionVisitor(object):
# derived classes or specified as callback functions to the class
# constructor:
client_methods = ('enterNode','exitNode','beforeChild','afterChild',
- 'acceptChildResult','finalizeResult')
+ 'acceptChildResult','initializeWalker','finalizeResult')
def __init__(self, **kwds):
# This is slightly tricky: We want derived classes to be able to
# override the "None" defaults here, and for keyword arguments
@@ -147,6 +170,26 @@ def __init__(self, **kwds):
if kwds:
raise RuntimeError("Unrecognized keyword arguments: %s" % (kwds,))
+ # Handle deprecated APIs
+ _fcns = (('beforeChild',2), ('acceptChildResult',3), ('afterChild',2))
+ for name, nargs in _fcns:
+ fcn = getattr(self, name)
+ if fcn is None:
+ continue
+ _args = getargspec(fcn)
+ _self_arg = 1 if inspect.ismethod(fcn) else 0
+ if len(_args.args) == nargs + _self_arg and _args.varargs is None:
+ deprecation_warning(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the %s() "
+ "method. Please update your walker callbacks." % (name,))
+ def wrap(fcn, nargs):
+ def wrapper(*args):
+ return fcn(*args[:nargs])
+ return wrapper
+ setattr(self, name, wrap(fcn, nargs))
+
+
def walk_expression(self, expr):
"""Walk an expression, calling registered callbacks.
"""
@@ -159,12 +202,16 @@ def walk_expression(self, expr):
# tuple/list of child nodes (arguments),
# number of child nodes (arguments),
# data object to aggregate results from child nodes,
- # current child node )
+ # current child node index )
#
# The walker only needs a single pointer to the end of the list
# (ptr). The beginning of the list is indicated by a None
# parent pointer.
#
+ if self.initializeWalker is not None:
+ walk, result = self.initializeWalker(expr)
+ if not walk:
+ return result
if self.enterNode is not None:
tmp = self.enterNode(expr)
if tmp is None:
@@ -180,115 +227,130 @@ def walk_expression(self, expr):
args = ()
else:
args = expr.args
+ if hasattr(args, '__enter__'):
+ args.__enter__()
node = expr
- child_idx = 0
- ptr = (None, node, args, len(args), data, child_idx)
-
- while 1:
- if child_idx < ptr[3]:
- # This node still has children to process
- child = ptr[2][child_idx]
- # Increment the child index pointer here for
- # consistency. Note that this means that for the bulk
- # of the time, 'child_idx' is actually the index of the
- # *next* child to be processed, and will not match the
- # value of ptr[5]. This provides a modest performance
- # improvement, as we only have to recreate the ptr tuple
- # just before we descend further into the tree (i.e., we
- # avoid recreating the tuples for the special case where
- # beforeChild indicates that we should not descend
- # further).
- child_idx += 1
-
- # Notify this node that we are about to descend into a
- # child.
- if self.beforeChild is not None:
- tmp = self.beforeChild(node, child)
- if tmp is None:
- descend = True
- child_result = None
- else:
- descend, child_result = tmp
- if not descend:
- # We are aborting processing of this child node.
- # Tell this node to accept the child result and
- # we will move along
- if self.acceptChildResult is not None:
- data = self.acceptChildResult(
- node, data, child_result)
- elif data is not None:
- data.append(child_result)
- # And let the node know that we are done with a
- # child node
- if self.afterChild is not None:
- self.afterChild(node, child)
- # Jump to the top to continue processing the
- # next child node
- continue
-
- # Update the child argument counter in the stack.
- # Because we are using tuples, we need to recreate the
- # "ptr" object (linked list node)
- ptr = ptr[:4] + (data, child_idx,)
-
- # We are now going to actually enter this node. The
- # node will tell us the list of its child nodes that we
- # need to process
- if self.enterNode is not None:
- tmp = self.enterNode(child)
- if tmp is None:
- args = data = None
+ # Note that because we increment child_idx just before fetching
+ # the child node, it must be initialized to -1, and ptr[3] must
+ # always be *one less than* the number of arguments
+ child_idx = -1
+ ptr = (None, node, args, len(args)-1, data, child_idx)
+
+ try:
+ while 1:
+ if child_idx < ptr[3]:
+ # Increment the child index pointer here for
+ # consistency. Note that this means that for the bulk
+ # of the time, 'child_idx' will not match the value of
+ # ptr[5]. This provides a modest performance
+ # improvement, as we only have to recreate the ptr tuple
+ # just before we descend further into the tree (i.e., we
+ # avoid recreating the tuples for the special case where
+ # beforeChild indicates that we should not descend
+ # further).
+ child_idx += 1
+ # This node still has children to process
+ child = ptr[2][child_idx]
+
+ # Notify this node that we are about to descend into a
+ # child.
+ if self.beforeChild is not None:
+ tmp = self.beforeChild(node, child, child_idx)
+ if tmp is None:
+ descend = True
+ child_result = None
+ else:
+ descend, child_result = tmp
+ if not descend:
+ # We are aborting processing of this child node.
+ # Tell this node to accept the child result and
+ # we will move along
+ if self.acceptChildResult is not None:
+ data = self.acceptChildResult(
+ node, data, child_result, child_idx)
+ elif data is not None:
+ data.append(child_result)
+ # And let the node know that we are done with a
+ # child node
+ if self.afterChild is not None:
+ self.afterChild(node, child, child_idx)
+ # Jump to the top to continue processing the
+ # next child node
+ continue
+
+ # Update the child argument counter in the stack.
+ # Because we are using tuples, we need to recreate the
+ # "ptr" object (linked list node)
+ ptr = ptr[:4] + (data, child_idx,)
+
+ # We are now going to actually enter this node. The
+ # node will tell us the list of its child nodes that we
+ # need to process
+ if self.enterNode is not None:
+ tmp = self.enterNode(child)
+ if tmp is None:
+ args = data = None
+ else:
+ args, data = tmp
else:
- args, data = tmp
- else:
- args = None
- data = []
- if args is None:
- if type(child) in nonpyomo_leaf_types \
- or not child.is_expression_type():
- # Leaves (either non-pyomo types or
- # non-Expressions) have no child arguments, so
- # are just put on the stack
- args = ()
+ args = None
+ data = []
+ if args is None:
+ if type(child) in nonpyomo_leaf_types \
+ or not child.is_expression_type():
+ # Leaves (either non-pyomo types or
+ # non-Expressions) have no child arguments, so
+ # are just put on the stack
+ args = ()
+ else:
+ args = child.args
+ if hasattr(args, '__enter__'):
+ args.__enter__()
+ node = child
+ child_idx = -1
+ ptr = (ptr, node, args, len(args)-1, data, child_idx)
+
+ else: # child_idx == ptr[3]:
+ # We are done with this node. Call exitNode to compute
+ # any result
+ if hasattr(ptr[2], '__exit__'):
+ ptr[2].__exit__(None, None, None)
+ if self.exitNode is not None:
+ node_result = self.exitNode(node, data)
else:
- args = child.args
- node = child
- child_idx = 0
- ptr = (ptr, node, args, len(args), data, child_idx)
-
- else:
- # We are done with this node. Call exitNode to compute
- # any result
- if self.exitNode is not None:
- node_result = self.exitNode(node, data)
- else:
- node_result = data
-
- # Pop the node off the linked list
+ node_result = data
+
+ # Pop the node off the linked list
+ ptr = ptr[0]
+ # If we have returned to the beginning, return the final
+ # answer
+ if ptr is None:
+ if self.finalizeResult is not None:
+ return self.finalizeResult(node_result)
+ else:
+ return node_result
+ # Not done yet, update node to point to the new active
+ # node
+ node, child = ptr[1], node
+ data = ptr[4]
+ child_idx = ptr[5]
+
+ # We need to alert the node to accept the child's result:
+ if self.acceptChildResult is not None:
+ data = self.acceptChildResult(
+ node, data, node_result, child_idx)
+ elif data is not None:
+ data.append(node_result)
+
+ # And let the node know that we are done with a child node
+ if self.afterChild is not None:
+ self.afterChild(node, child, child_idx)
+
+ finally:
+ while ptr is not None:
+ if hasattr(ptr[2], '__exit__'):
+ ptr[2].__exit__(None, None, None)
ptr = ptr[0]
- # If we have returned to the beginning, return the final
- # answer
- if ptr is None:
- if self.finalizeResult is not None:
- return self.finalizeResult(node_result)
- else:
- return node_result
- # Not done yet, update node to point to the new active
- # node
- node, child = ptr[1], node
- data = ptr[4]
- child_idx = ptr[5]
-
- # We need to alert the node to accept the child's result:
- if self.acceptChildResult is not None:
- data = self.acceptChildResult(node, data, node_result)
- elif data is not None:
- data.append(node_result)
-
- # And let the node know that we are done with a child node
- if self.afterChild is not None:
- self.afterChild(node, child)
-
class SimpleExpressionVisitor(object):
@@ -864,7 +926,7 @@ def sizeof_expression(expr):
"""
def enter(node):
return None, 1
- def accept(node, data, child_result):
+ def accept(node, data, child_result, child_idx):
return data + child_result
return StreamBasedExpressionVisitor(
enterNode=enter,
@@ -890,13 +952,15 @@ def visiting_potential_leaf(self, node):
if node.__class__ in nonpyomo_leaf_types:
return True, node
- if node.is_variable_type():
- return True, value(node)
+ if node.is_expression_type():
+ return False, None
- if not node.is_expression_type():
+ if node.is_numeric_type():
return True, value(node)
+ else:
+ return True, node
+
- return False, None
class FixedExpressionError(Exception):
@@ -926,22 +990,33 @@ def visiting_potential_leaf(self, node):
if node.__class__ in nonpyomo_leaf_types:
return True, node
- if node.is_parameter_type():
- if node._component()._mutable:
- raise FixedExpressionError()
- return True, value(node)
-
+ if node.is_expression_type():
+ return False, None
- if node.is_variable_type():
- if node.fixed:
- raise FixedExpressionError()
- else:
+ if node.is_numeric_type():
+ # Get the object value. This will also cause templates to
+ # raise TemplateExpressionErrors
+ try:
+ val = value(node)
+ except TemplateExpressionError:
+ raise
+ except:
+ # Uninitialized Var/Param objects should be given the
+ # opportunity to map the error to a NonConstant / Fixed
+ # expression error
+ if not node.is_fixed():
+ raise NonConstantExpressionError()
+ if not node.is_constant():
+ raise FixedExpressionError()
+ raise
+
+ if not node.is_fixed():
raise NonConstantExpressionError()
+ if not node.is_constant():
+ raise FixedExpressionError()
+ return True, val
- if not node.is_expression_type():
- return True, value(node)
-
- return False, None
+ return True, node
def evaluate_expression(exp, exception=True, constant=False):
@@ -973,29 +1048,18 @@ def evaluate_expression(exp, exception=True, constant=False):
try:
return visitor.dfs_postorder_stack(exp)
- except NonConstantExpressionError: #pragma: no cover
- if exception:
- raise
- return None
-
- except FixedExpressionError: #pragma: no cover
- if exception:
- raise
- return None
-
- except TemplateExpressionError: #pragma: no cover
- if exception:
- raise
- return None
-
- except ValueError:
- if exception:
- raise
- return None
-
- except TypeError:
- # This can be raised in Python3 when evaluating a operation
- # returns a complex number (e.g., sqrt(-1))
+ except ( TemplateExpressionError, ValueError, TypeError,
+ NonConstantExpressionError, FixedExpressionError ):
+ # Errors that we want to be able to suppress:
+ #
+ # TemplateExpressionError: raised when generating expression
+ # templates
+ # FixedExpressionError, NonConstantExpressionError: raised
+ # when processing expressions that are expected to be fixed
+ # (e.g., indices)
+ # ValueError: "standard" expression value errors
+ # TypeError: This can be raised in Python3 when evaluating a
+ # operation returns a complex number (e.g., sqrt(-1))
if exception:
raise
return None
@@ -1164,13 +1228,16 @@ def visiting_potential_leaf(self, node):
Return True if the node is not expanded.
"""
- if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable():
+ if node.__class__ in nonpyomo_leaf_types:
return True, 0
- if not node.is_expression_type():
- return True, 0 if node.is_fixed() else 1
+ if node.is_expression_type():
+ return False, None
- return False, None
+ if node.is_numeric_type():
+ return True, 0 if node.is_fixed() else 1
+ else:
+ return True, node
def polynomial_degree(node):
@@ -1209,13 +1276,16 @@ def visiting_potential_leaf(self, node):
Return True if the node is not expanded.
"""
- if node.__class__ in nonpyomo_leaf_types or not node.is_potentially_variable():
+ if node.__class__ in nonpyomo_leaf_types:
return True, True
- elif not node.is_expression_type():
+ elif node.is_expression_type():
+ return False, None
+
+ elif node.is_numeric_type():
return True, node.is_fixed()
- return False, None
+ return True, node
def _expression_is_fixed(node):
@@ -1288,15 +1358,18 @@ def visiting_potential_leaf(self, node):
if node.__class__ in nonpyomo_leaf_types:
return True, str(node)
+ if node.is_expression_type():
+ return False, None
+
if node.is_variable_type():
if not node.fixed:
return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=False)
return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values)
- if not node.is_expression_type():
+ if hasattr(node, 'to_string'):
return True, node.to_string(verbose=self.verbose, smap=self.smap, compute_values=self.compute_values)
-
- return False, None
+ else:
+ return True, str(node)
def expression_to_string(expr, verbose=None, labeler=None, smap=None, compute_values=False):
diff --git a/pyomo/core/kernel/matrix_constraint.py b/pyomo/core/kernel/matrix_constraint.py
index 77b9efb85f7..044631cf4a4 100644
--- a/pyomo/core/kernel/matrix_constraint.py
+++ b/pyomo/core/kernel/matrix_constraint.py
@@ -8,6 +8,10 @@
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
+from pyomo.common.dependencies import (
+ numpy, numpy_available as has_numpy,
+ scipy, scipy_available as has_scipy,
+)
import pyomo.core.expr
from pyomo.core.expr.numvalue import NumericValue
from pyomo.core.kernel.constraint import \
@@ -17,18 +21,6 @@
import six
from six.moves import zip, xrange
-try:
- import numpy
- has_numpy = True
-except: #pragma:nocover
- has_numpy = False
-
-try:
- import scipy
- has_scipy = True
-except: #pragma:nocover
- has_scipy = False
-
_noarg = object()
#
diff --git a/pyomo/core/kernel/piecewise_library/transforms.py b/pyomo/core/kernel/piecewise_library/transforms.py
index 8fb73249e21..db5f1df8928 100644
--- a/pyomo/core/kernel/piecewise_library/transforms.py
+++ b/pyomo/core/kernel/piecewise_library/transforms.py
@@ -30,7 +30,7 @@
# handle the between sizes.
from pyomo.core.expr.numvalue import value as _value
-from pyomo.core.kernel.set_types import Binary
+from pyomo.core.kernel.set_types import IntegerSet
from pyomo.core.kernel.block import block
from pyomo.core.kernel.expression import (expression,
expression_tuple)
@@ -694,7 +694,7 @@ def polytope_verts(p):
for p in polytopes
for v in vertices)
y = self.v['y'] = variable_tuple(
- variable(domain=Binary)
+ variable(domain_type=IntegerSet, lb=0, ub=1)
for p in polytopes)
# create piecewise constraints
@@ -782,7 +782,7 @@ def vertex_polys(v):
lmbda = self.v['lambda'] = variable_tuple(
variable(lb=0) for v in vertices)
y = self.v['y'] = variable_tuple(
- variable(domain=Binary)
+ variable(domain_type=IntegerSet, lb=0, ub=1)
for p in polytopes)
lmbda_tuple = tuple(lmbda)
@@ -868,7 +868,7 @@ def __init__(self, *args, **kwds):
variable() for p in polytopes)
lmbda_tuple = tuple(lmbda)
y = self.v['y'] = variable_tuple(
- variable(domain=Binary) for p in polytopes)
+ variable(domain_type=IntegerSet, lb=0, ub=1) for p in polytopes)
y_tuple = tuple(y)
# create piecewise constraints
@@ -950,7 +950,8 @@ def __init__(self, *args, **kwds):
delta[-1].lb = 0
delta_tuple = tuple(delta)
y = self.v['y'] = variable_tuple(
- variable(domain=Binary) for p in polytopes[:-1])
+ variable(domain_type=IntegerSet, lb=0, ub=1)
+ for p in polytopes[:-1])
# create piecewise constraints
self.c = constraint_list()
@@ -1041,7 +1042,7 @@ def polytope_verts(p):
for p in polytopes
for v in polytope_verts(p))
y = self.v['y'] = variable_tuple(
- variable(domain=Binary) for i in range(L))
+ variable(domain_type=IntegerSet, lb=0, ub=1) for i in range(L))
# create piecewise constraints
self.c = constraint_list()
@@ -1168,7 +1169,7 @@ def __init__(self, *args, **kwds):
lmbda = self.v['lambda'] = variable_tuple(
variable(lb=0) for v in vertices)
y = self.v['y'] = variable_list(
- variable(domain=Binary) for s in S)
+ variable(domain_type=IntegerSet, lb=0, ub=1) for s in S)
# create piecewise constraints
self.c = constraint_list()
diff --git a/pyomo/core/kernel/piecewise_library/transforms_nd.py b/pyomo/core/kernel/piecewise_library/transforms_nd.py
index cba8cc20c5d..c930d3f7b67 100644
--- a/pyomo/core/kernel/piecewise_library/transforms_nd.py
+++ b/pyomo/core/kernel/piecewise_library/transforms_nd.py
@@ -22,7 +22,7 @@
import collections
from pyomo.core.kernel.block import block
-from pyomo.core.kernel.set_types import Binary
+from pyomo.core.kernel.set_types import IntegerSet
from pyomo.core.kernel.variable import (variable,
variable_dict,
variable_tuple)
@@ -348,7 +348,7 @@ def __init__(self, *args, **kwds):
lmbda = self.v['lambda'] = variable_tuple(
variable(lb=0) for v in vertices)
y = self.v['y'] = variable_tuple(
- variable(domain=Binary) for s in simplices)
+ variable(domain_type=IntegerSet, lb=0, ub=1) for s in simplices)
lmbda_tuple = tuple(lmbda)
# create constraints
diff --git a/pyomo/core/kernel/piecewise_library/util.py b/pyomo/core/kernel/piecewise_library/util.py
index 1ab15cd7cc0..14072d10323 100644
--- a/pyomo/core/kernel/piecewise_library/util.py
+++ b/pyomo/core/kernel/piecewise_library/util.py
@@ -15,20 +15,9 @@
from six.moves import xrange
from six import advance_iterator
-numpy_available = False
-try:
- import numpy
- numpy_available = True
-except: #pragma:nocover
- pass
-
-scipy_available = False
-try:
- import scipy
- import scipy.spatial
- scipy_available = True
-except: #pragma:nocover
- pass
+from pyomo.common.dependencies import (
+ numpy, numpy_available, scipy, scipy_available
+)
class PiecewiseValidationError(Exception):
"""An exception raised when validation of piecewise
@@ -179,9 +168,6 @@ def generate_delaunay(variables, num=10, **kwds):
Returns:
A scipy.spatial.Delaunay object.
"""
- if not (numpy_available and scipy_available): #pragma:nocover
- raise ImportError(
- "numpy and scipy are required")
linegrids = []
for v in variables:
if v.has_lb() and v.has_ub():
diff --git a/pyomo/core/kernel/set_types.py b/pyomo/core/kernel/set_types.py
index d0995078ce8..c77cb970d52 100644
--- a/pyomo/core/kernel/set_types.py
+++ b/pyomo/core/kernel/set_types.py
@@ -20,232 +20,62 @@
_virtual_sets = []
-class _VirtualSet(object):
- """
- A set that does not contain elements, but instead overrides the
- __contains__ method to define set membership.
- """
-
- def __init__(self, name=None, doc=None, bounds=None, validate=None):
- self.name = name
- self.doc = doc
- self._bounds = bounds
- if self._bounds is None:
- self._bounds = (None, None)
- self.validate = validate
-
- global _virtual_sets
- _virtual_sets.append(self)
-
- def __lt__(self, other):
- raise TypeError("'<' not supported")
-
- def __le__(self, other):
- raise TypeError("<=' not supported")
-
- def __gt__(self, other):
- raise TypeError("'>' not supported")
-
- def __ge__(self, other):
- raise TypeError("'>=' not supported")
-
- def __str__(self):
- if self.name is None:
- return super(_VirtualSet, self).__str__()
- else:
- return str(self.name)
-
- def bounds(self):
- return self._bounds
-
- def __contains__(self, other):
- valid = True
- if self.validate is not None:
- valid = self.validate(other)
- if valid:
- if (self._bounds is not None):
- if self._bounds[0] is not None:
- valid &= (other >= self._bounds[0])
- if self._bounds[1] is not None:
- valid &= (other <= self._bounds[1])
- return valid
-
-class RealSet(_VirtualSet):
- """A virtual set that represents real values"""
-
- def __init__(self, *args, **kwds):
- """Constructor"""
- _VirtualSet.__init__(self, *args, **kwds)
-
- def __contains__(self, element):
- """Report whether an element is an 'int', 'long' or 'float' value.
-
- (Called in response to the expression 'element in self'.)
- """
- return element.__class__ in native_numeric_types and \
- _VirtualSet.__contains__(self, element)
-
-class IntegerSet(_VirtualSet):
- """A virtual set that represents integer values"""
-
- def __init__(self, *args, **kwds):
- """Constructor"""
- _VirtualSet.__init__(self, *args, **kwds)
-
- def __contains__(self, element):
- """Report whether an element is an 'int'.
-
- (Called in response to the expression 'element in self'.)
- """
- return element.__class__ in native_integer_types and \
- _VirtualSet.__contains__(self, element)
-
-class BooleanSet(_VirtualSet):
- """A virtual set that represents boolean values"""
-
- def __init__(self, *args, **kwds):
- """Construct the set of booleans, which contains no explicit values"""
- assert 'bounds' not in kwds
- kwds['bounds'] = (0,1)
- _VirtualSet.__init__(self, *args, **kwds)
-
- def __contains__(self, element):
- """Report whether an element is a boolean.
-
- (Called in response to the expression 'element in self'.)
- """
- return ((element.__class__ in native_boolean_types) or \
- (element.__class__ in native_numeric_types)) and \
- (element in (0, 1, True, False)) and \
- _VirtualSet.__contains__(self, element)
- # where does it end? (i.e., why not 'true', 'TRUE, etc.?)
- #and ( element in (0, 1, True, False, 'True', 'False', 'T', 'F') )
-
-# GH 2/2016: I'm doing this to make instances of
-# RealInterval and IntegerInterval pickle-able
-# objects. However, these two classes seem like
-# they could be real memory hogs when used as
-# variable domains (for instance via the
-# relax_integrality transformation). Should we
-# consider reimplementing them as more
-# lightweight objects?
-class _validate_interval(object):
- __slots__ = ("_obj",)
- def __init__(self, obj): self._obj = weakref_ref(obj)
- def __getstate__(self): return (self._obj(),)
- def __setstate__(self, state): self._obj = weakref_ref(state[0])
- def __call__(self, x):
- assert x is not None
- obj = self._obj()
- return (((obj._bounds[0] is None) or \
- (x >= obj._bounds[0])) and \
- ((obj._bounds[1] is None) or \
- (x <= obj._bounds[1])))
-
-class RealInterval(RealSet):
- """A virtual set that represents an interval of real values"""
-
- def __init__(self, name=None, **kwds):
- """Constructor"""
- if 'bounds' not in kwds:
- kwds['bounds'] = (None,None)
- kwds['validate'] = _validate_interval(self)
- # GH: Assigning a name here so that var.pprint() does not
- # output _unknown_ in the book examples
- if name is None:
- kwds['name'] = "RealInterval"+str(kwds['bounds'])
- else:
- kwds['name'] = name
- RealSet.__init__(self, **kwds)
-
-class IntegerInterval(IntegerSet):
- """A virtual set that represents an interval of integer values"""
-
- def __init__(self, name=None, **kwds):
- """Constructor"""
- if 'bounds' not in kwds:
- kwds['bounds'] = (None,None)
- kwds['validate'] = _validate_interval(self)
- # GH: Assigning a name here so that var.pprint() does not
- # output _unknown_ in the book examples
- if name is None:
- kwds['name'] = "IntegerInterval"+str(kwds['bounds'])
- else:
- kwds['name'] = name
- IntegerSet.__init__(self, **kwds)
-
-Reals=RealSet(name="Reals", doc="A set of real values")
-def validate_PositiveValues(x): return x > 0
-def validate_NonPositiveValues(x): return x <= 0
-def validate_NegativeValues(x): return x < 0
-def validate_NonNegativeValues(x): return x >= 0
-def validate_PercentFraction(x): return x >= 0 and x <= 1.0
-
-PositiveReals = RealSet(
- name="PositiveReals",
- doc="A set of positive real values",
- validate=validate_PositiveValues,
- bounds=(0, None)
-)
-NonPositiveReals = RealSet(
- name="NonPositiveReals",
- doc="A set of non-positive real values",
- validate=validate_NonPositiveValues,
- bounds=(None, 0)
-)
-NegativeReals = RealSet(
- name="NegativeReals",
- doc="A set of negative real values",
- validate=validate_NegativeValues,
- bounds=(None, 0)
-)
-NonNegativeReals = RealSet(
- name="NonNegativeReals",
- doc="A set of non-negative real values",
- validate=validate_NonNegativeValues,
- bounds=(0, None)
-)
-PercentFraction = RealSet(
- name="PercentFraction",
- doc="A set of real values in the interval [0,1]",
- validate=validate_PercentFraction,
- bounds=(0.0,1.0)
-)
-UnitInterval = RealSet(
- name="UnitInterval",
- doc="A set of real values in the interval [0,1]",
- validate=validate_PercentFraction,
- bounds=(0.0,1.0)
-)
-
-Integers = IntegerSet(
- name="Integers",
- doc="A set of integer values"
-)
-PositiveIntegers = IntegerSet(
- name="PositiveIntegers",
- doc="A set of positive integer values",
- validate=validate_PositiveValues,
- bounds=(1, None)
-)
-NonPositiveIntegers = IntegerSet(
- name="NonPositiveIntegers",
- doc="A set of non-positive integer values",
- validate=validate_NonPositiveValues,
- bounds=(None, 0)
-)
-NegativeIntegers = IntegerSet(
- name="NegativeIntegers",
- doc="A set of negative integer values",
- validate=validate_NegativeValues,
- bounds=(None, -1)
-)
-NonNegativeIntegers = IntegerSet(
- name="NonNegativeIntegers",
- doc="A set of non-negative integer values",
- validate=validate_NonNegativeValues,
- bounds=(0, None)
-)
-
-Boolean = BooleanSet(name="Boolean", doc="A set of boolean values")
-Binary = BooleanSet(name="Binary", doc="A set of boolean values")
+#
+# Dummy types used by Kernel as domain flags
+#
+class RealSet(object):
+ @staticmethod
+ def get_interval():
+ return (None, None, 0)
+
+ @staticmethod
+ def is_continuous():
+ return True
+
+ @staticmethod
+ def is_integer():
+ return False
+
+ @staticmethod
+ def is_binary():
+ return False
+
+
+class IntegerSet(object):
+ @staticmethod
+ def get_interval():
+ return (None, None, 1)
+
+ @staticmethod
+ def is_continuous():
+ return False
+
+ @staticmethod
+ def is_integer():
+ return True
+
+ @staticmethod
+ def is_binary():
+ return False
+
+
+class BinarySet(object):
+ @staticmethod
+ def get_interval():
+ return (0, 1, 1)
+
+ @staticmethod
+ def is_continuous():
+ return False
+
+ @staticmethod
+ def is_integer():
+ return True
+
+ @staticmethod
+ def is_binary():
+ return True
+
+#TODO: Deprecate BooleanSet (that will soon be replaced by a true BooleanSet
+# admitting {True, False})
+BooleanSet = BinarySet
diff --git a/pyomo/core/kernel/variable.py b/pyomo/core/kernel/variable.py
index e953a2ea34a..bd22bdcdd12 100644
--- a/pyomo/core/kernel/variable.py
+++ b/pyomo/core/kernel/variable.py
@@ -18,10 +18,7 @@
from pyomo.core.kernel.container_utils import \
define_simple_containers
from pyomo.core.kernel.set_types import (RealSet,
- IntegerSet,
- BooleanSet,
- RealInterval,
- IntegerInterval)
+ IntegerSet)
_pos_inf = float('inf')
_neg_inf = float('-inf')
@@ -36,15 +33,12 @@ def _extract_domain_type_and_bounds(domain_type,
"'domain_type' keywords can be changed "
"from their default value when "
"initializing a variable.")
- domain_type = type(domain)
- # handle some edge cases
- if domain_type is BooleanSet:
- domain_type = IntegerSet
- elif domain_type is RealInterval:
+ domain_lb, domain_ub, domain_step = domain.get_interval()
+ if domain_step == 0:
domain_type = RealSet
- elif domain_type is IntegerInterval:
+ elif domain_step == 1:
domain_type = IntegerSet
- domain_lb, domain_ub = domain.bounds()
+ # else: domain_type will remain None and generate an exception below
if domain_lb is not None:
if lb is not None:
raise ValueError(
@@ -188,30 +182,26 @@ def slack(self):
def is_continuous(self):
"""Returns :const:`True` when the domain type is
:class:`RealSet`."""
- return issubclass(self.domain_type, RealSet)
+ return self.domain_type.get_interval()[2] == 0
# this could be expanded to include semi-continuous
# where as is_integer would not
def is_discrete(self):
"""Returns :const:`True` when the domain type is
:class:`IntegerSet`."""
- return issubclass(self.domain_type, IntegerSet)
+ return self.domain_type.get_interval()[2] not in (0, None)
def is_integer(self):
"""Returns :const:`True` when the domain type is
:class:`IntegerSet`."""
- return issubclass(self.domain_type, IntegerSet)
+ return self.domain_type.get_interval()[2] == 1
def is_binary(self):
"""Returns :const:`True` when the domain type is
:class:`IntegerSet` and the bounds are within
[0,1]."""
- lb, ub = self.bounds
- return self.is_integer() and \
- (lb is not None) and \
- (ub is not None) and \
- (value(lb) >= 0) and \
- (value(ub) <= 1)
+ return self.domain_type.get_interval()[2] == 1 \
+ and (value(self.lb), value(self.ub)) in {(0,1), (0,0), (1,1)}
# TODO?
# def is_semicontinuous(self):
diff --git a/pyomo/core/plugins/transform/add_slack_vars.py b/pyomo/core/plugins/transform/add_slack_vars.py
index 5a85c5d6965..db0840303f9 100644
--- a/pyomo/core/plugins/transform/add_slack_vars.py
+++ b/pyomo/core/plugins/transform/add_slack_vars.py
@@ -11,9 +11,6 @@
import logging
logger = logging.getLogger('pyomo.core')
-# DEBUG
-from nose.tools import set_trace
-
@TransformationFactory.register('core.add_slack_variables', \
doc="Create a model where we add slack variables to every constraint "
diff --git a/pyomo/core/plugins/transform/discrete_vars.py b/pyomo/core/plugins/transform/discrete_vars.py
index a952bbb9470..65e480a7674 100644
--- a/pyomo/core/plugins/transform/discrete_vars.py
+++ b/pyomo/core/plugins/transform/discrete_vars.py
@@ -2,8 +2,8 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
@@ -13,112 +13,128 @@
from six import itervalues
-from pyomo.core.base import (
+from pyomo.common import deprecated
+from pyomo.core.base import (
Transformation,
TransformationFactory,
- Binary,
- Boolean,
- Integers,
- Reals,
- PositiveIntegers,
- PositiveReals,
- NonPositiveIntegers,
- NonPositiveReals,
- NegativeIntegers,
- NegativeReals,
- NonNegativeIntegers,
- NonNegativeReals,
- IntegerInterval,
- RealInterval,
Var,
Suffix,
+ Reals,
)
-_discrete_relaxation_map = {
- Binary : NonNegativeReals,
- Boolean : NonNegativeReals,
- Integers : Reals,
- PositiveIntegers : PositiveReals,
- NonPositiveIntegers : NonPositiveReals,
- NegativeIntegers : NegativeReals,
- NonNegativeIntegers : NonNegativeReals,
- IntegerInterval : RealInterval,
-}
-
-
#
-# This transformation relaxes known discrete domains to their continuous
+# This transformation relaxes integer ranges to their continuous
# counterparts
#
-@TransformationFactory.register( 'core.relax_discrete',
- doc="Relax known discrete domains to continuous counterparts" )
-class RelaxDiscreteVars(Transformation):
+@TransformationFactory.register(
+ 'core.relax_integer_vars',
+ doc="Relax integer variables to continuous counterparts" )
+class RelaxIntegerVars(Transformation):
def __init__(self):
- super(RelaxDiscreteVars, self).__init__()
+ super(RelaxIntegerVars, self).__init__()
- def _apply_to(self, model, **kwds):
+ def _apply_to(self, model, **kwds):
options = kwds.pop('options', {})
if kwds.get('undo', options.get('undo', False)):
- for v, d in itervalues(model._relaxed_discrete_vars[None]):
+ for v, d in itervalues(model._relaxed_integer_vars[None]):
+ bounds = v.bounds
v.domain = d
- model.del_component("_relaxed_discrete_vars")
+ v.setlb(bounds[0])
+ v.setub(bounds[1])
+ model.del_component("_relaxed_integer_vars")
return
-
+ # True by default, you can specify False if you want
+ descend = kwds.get('transform_deactivated_blocks',
+ options.get('transform_deactivated_blocks', True))
+ active = None if descend else True
+
# Relax the model
relaxed_vars = {}
_base_model_vars = model.component_data_objects(
- Var, active=True, descend_into=True )
+ Var, active=active, descend_into=True )
for var in _base_model_vars:
- if var.domain in _discrete_relaxation_map:
- if var.domain is Binary or var.domain is Boolean:
- var.setlb(0)
- var.setub(1)
- # Note: some indexed components can only have their
- # domain set on the parent component (the individual
- # indices cannot be set independently)
- _c = var.parent_component()
- if id(_c) in _discrete_relaxation_map:
+ if not var.is_integer():
+ continue
+ # Note: some indexed components can only have their
+ # domain set on the parent component (the individual
+ # indices cannot be set independently)
+ _c = var.parent_component()
+ try:
+ lb, ub = var.bounds
+ _domain = var.domain
+ var.domain = Reals
+ var.setlb(lb)
+ var.setub(ub)
+ relaxed_vars[id(var)] = (var, _domain)
+ except:
+ if id(_c) in relaxed_vars:
continue
- try:
- _domain = var.domain
- var.domain = _discrete_relaxation_map[_domain]
- relaxed_vars[id(var)] = (var, _domain)
- except:
- _domain = _c.domain
- _c.domain = _discrete_relaxation_map[_domain]
- relaxed_vars[id(_c)] = (_c, _domain)
- model._relaxed_discrete_vars = Suffix(direction=Suffix.LOCAL)
- model._relaxed_discrete_vars[None] = relaxed_vars
+ _domain = _c.domain
+ lb, ub = _c.bounds
+ _c.domain = Reals
+ _c.setlb(lb)
+ _c.setub(ub)
+ relaxed_vars[id(_c)] = (_c, _domain)
+ model._relaxed_integer_vars = Suffix(direction=Suffix.LOCAL)
+ model._relaxed_integer_vars[None] = relaxed_vars
+
+
+@TransformationFactory.register(
+ 'core.relax_discrete',
+ doc="[DEPRECATED] Relax integer variables to continuous counterparts" )
+class RelaxDiscreteVars(RelaxIntegerVars):
+ """
+ This plugin relaxes integrality in a Pyomo model.
+ """
+
+ @deprecated(
+ "core.relax_discrete is deprecated. Use core.relax_integer_vars",
+ version='TBD')
+ def __init__(self, **kwds):
+ super(RelaxDiscreteVars, self).__init__(**kwds)
#
# This transformation fixes known discrete domains to their current values
#
-@TransformationFactory.register('core.fix_discrete',
- doc="Fix known discrete domains to continuous counterparts")
-class FixDiscreteVars(Transformation):
+@TransformationFactory.register(
+ 'core.fix_integer_vars',
+ doc="Fix all integer variables to their current values")
+class FixIntegerVars(Transformation):
def __init__(self):
- super(FixDiscreteVars, self).__init__()
+ super(FixIntegerVars, self).__init__()
def _apply_to(self, model, **kwds):
options = kwds.pop('options', {})
if kwds.get('undo', options.get('undo', False)):
- for v in model._fixed_discrete_vars[None]:
+ for v in model._fixed_integer_vars[None]:
v.unfix()
- model.del_component("_fixed_discrete_vars")
+ model.del_component("_fixed_integer_vars")
return
fixed_vars = []
_base_model_vars = model.component_data_objects(
Var, active=True, descend_into=True)
for var in _base_model_vars:
- # Instead of checking against `_discrete_relaxation_map.keys()`
- # we just check the item properties to fix #995
- # When #326 has been resolved, we can check against the dict-keys again
- if not var.is_continuous() and not var.is_fixed():
+ # Instead of checking against
+ # `_integer_relaxation_map.keys()` we just check the item
+ # properties to fix #995 When #326 has been resolved, we can
+ # check against the dict-keys again
+ if var.is_integer() and not var.is_fixed():
fixed_vars.append(var)
var.fix()
- model._fixed_discrete_vars = Suffix(direction=Suffix.LOCAL)
- model._fixed_discrete_vars[None] = fixed_vars
+ model._fixed_integer_vars = Suffix(direction=Suffix.LOCAL)
+ model._fixed_integer_vars[None] = fixed_vars
+
+
+@TransformationFactory.register(
+ 'core.fix_discrete',
+ doc="[DEPRECATED] Fix all integer variables to their current values")
+class FixDiscreteVars(FixIntegerVars):
+ @deprecated(
+ "core.fix_discrete is deprecated. Use core.fix_integer_vars",
+ version='TBD')
+ def __init__(self, **kwds):
+ super(FixDiscreteVars, self).__init__(**kwds)
diff --git a/pyomo/core/plugins/transform/nonnegative_transform.py b/pyomo/core/plugins/transform/nonnegative_transform.py
index 8605718c762..f50dd4490fd 100644
--- a/pyomo/core/plugins/transform/nonnegative_transform.py
+++ b/pyomo/core/plugins/transform/nonnegative_transform.py
@@ -21,6 +21,9 @@
from pyomo.core.plugins.transform.util import collectAbstractComponents
+import logging
+logger = logging.getLogger('pyomo.core')
+
class VarmapVisitor(EXPR.ExpressionReplacementVisitor):
def __init__(self, varmap):
@@ -157,12 +160,8 @@ def _create_using(self, model, **kwds):
v_ndx = str(ndx)
# Get the variable bounds
- lb = var[ndx].lb
- ub = var[ndx].ub
- if lb is not None:
- lb = value(lb)
- if ub is not None:
- ub = value(ub)
+ lb = value(var[ndx].lb)
+ ub = value(var[ndx].ub)
orig_bounds[ndx] = (lb, ub)
# Get the variable domain
@@ -247,20 +246,21 @@ def _create_using(self, model, **kwds):
# Domain will either be NonNegativeReals, NonNegativeIntegers,
# or Binary. We consider Binary because some solvers may
# optimize over binary variables.
- if isinstance(orig_domain[ndx], RealSet):
+ if var[ndx].is_continuous():
for x in new_indices:
domains[x] = NonNegativeReals
- elif isinstance(orig_domain[ndx], IntegerSet):
- for x in new_indices:
- domains[x] = NonNegativeIntegers
- elif isinstance(orig_domain[ndx], BooleanSet):
+ elif var[ndx].is_binary():
for x in new_indices:
domains[x] = Binary
+ elif var[ndx].is_integer():
+ for x in new_indices:
+ domains[x] = NonNegativeIntegers
else:
- print ("Warning: domain '%s' not recognized, " + \
- "defaulting to 'Reals'") % (str(var.domain))
+ logger.warning(
+ "Warning: domain '%s' not recognized, "
+ "defaulting to 'NonNegativeReals'" % (var.domain,))
for x in new_indices:
- domains[x] = Reals
+ domains[x] = NonNegativeReals
constraint_rules[var_name] = constraints
domain_rules[var_name] = partial(self.exprMapRule, domains)
diff --git a/pyomo/core/plugins/transform/relax_integrality.py b/pyomo/core/plugins/transform/relax_integrality.py
index 971f62a47e1..5e0776182da 100644
--- a/pyomo/core/plugins/transform/relax_integrality.py
+++ b/pyomo/core/plugins/transform/relax_integrality.py
@@ -2,39 +2,28 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-from pyomo.core.base import Var
-from pyomo.core.base.set_types import BooleanSet, IntegerSet, Reals, RealInterval
-import pyomo.core.base
+from pyomo.common import deprecated
from pyomo.core.base import TransformationFactory
-from pyomo.core.plugins.transform.hierarchy import NonIsomorphicTransformation
+from pyomo.core.plugins.transform.discrete_vars import RelaxIntegerVars
-@TransformationFactory.register('core.relax_integrality',\
- doc="Create a model where integer variables are replaced with real variables.")
-class RelaxIntegrality(NonIsomorphicTransformation):
+@TransformationFactory.register(
+ 'core.relax_integrality',
+ doc="[DEPRECATED] Create a model where integer variables are replaced with "
+ "real variables.")
+class RelaxIntegrality(RelaxIntegerVars):
"""
This plugin relaxes integrality in a Pyomo model.
"""
+ @deprecated(
+ "core.relax_integrality is deprecated. Use core.relax_integer_vars",
+ version='TBD')
def __init__(self, **kwds):
- kwds['name'] = "relax_integrality"
super(RelaxIntegrality, self).__init__(**kwds)
-
- def _apply_to(self, model, **kwds):
- #
- # Iterate over all variables, replacing the domain with a real-valued domain
- # and setting appropriate bounds.
- #
- for var in model.component_data_objects(Var):
- # var.bounds returns the tightest of the domain
- # vs user-supplied lower and upper bounds
- lb, ub = var.bounds
- var.domain = Reals
- var.setlb(lb)
- var.setub(ub)
diff --git a/pyomo/core/plugins/transform/util.py b/pyomo/core/plugins/transform/util.py
index 345563819b4..e7f997ff32c 100644
--- a/pyomo/core/plugins/transform/util.py
+++ b/pyomo/core/plugins/transform/util.py
@@ -143,9 +143,6 @@ def collectAbstractComponents(model):
# Get the domain
data[domain] = _getAbstractDomain(obj)
- # Get the initialization rule
- data[rule] = _getAbstractInitialize(obj)
-
# Add this constraint
sets[name] = data
diff --git a/pyomo/core/pyomoobject.py b/pyomo/core/pyomoobject.py
new file mode 100644
index 00000000000..40854d7aa7a
--- /dev/null
+++ b/pyomo/core/pyomoobject.py
@@ -0,0 +1,37 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+
+class PyomoObject(object):
+ __slots__ = ()
+
+ def is_component_type(self):
+ """Return True if this class is a Pyomo component"""
+ return False
+
+ def is_numeric_type(self):
+ """Return True if this class is a Pyomo numeric object"""
+ return False
+
+ def is_parameter_type(self):
+ """Return False unless this class is a parameter object"""
+ return False
+
+ def is_variable_type(self):
+ """Return False unless this class is a variable object"""
+ return False
+
+ def is_expression_type(self):
+ """Return True if this numeric value is an expression"""
+ return False
+
+ def is_named_expression_type(self):
+ """Return True if this numeric value is a named expression"""
+ return False
diff --git a/pyomo/core/tests/diet/test_diet.py b/pyomo/core/tests/diet/test_diet.py
index c22c1893658..f53650d13de 100644
--- a/pyomo/core/tests/diet/test_diet.py
+++ b/pyomo/core/tests/diet/test_diet.py
@@ -12,8 +12,6 @@
from nose.tools import nottest
import pyutilib.th as unittest
-from pyutilib.misc.pyyaml_util import *
-import pyutilib.common
import pyomo.scripting.pyomo_main as main
from pyomo.opt import check_available_solvers
diff --git a/pyomo/core/tests/examples/test_kernel_examples.py b/pyomo/core/tests/examples/test_kernel_examples.py
index fb08fde960c..54f44001f66 100644
--- a/pyomo/core/tests/examples/test_kernel_examples.py
+++ b/pyomo/core/tests/examples/test_kernel_examples.py
@@ -14,11 +14,19 @@
import os
import glob
+import sys
from os.path import basename, dirname, abspath, join
import pyutilib.subprocess
import pyutilib.th as unittest
+from pyomo.common.dependencies import numpy_available, scipy_available
+
+import platform
+if platform.python_implementation() == "PyPy":
+ # The scipy is importable into PyPy, but ODE integrators don't work. (2/ 18)
+ scipy_available = False
+
currdir = dirname(abspath(__file__))
topdir = dirname(dirname(dirname(dirname(dirname(abspath(__file__))))))
examplesdir = join(topdir, "examples", "kernel")
@@ -26,24 +34,6 @@
examples = glob.glob(join(examplesdir,"*.py"))
examples.extend(glob.glob(join(examplesdir,"mosek","*.py")))
-numpy_available = False
-try:
- import numpy
- numpy_available = True
-except:
- pass
-
-scipy_available = False
-try:
- import platform
- if platform.python_implementation() == "PyPy":
- # The scipy is importable into PyPy, but ODE integrators don't work. (2/ 18)
- raise ImportError
- import scipy
- scipy_available = True
-except:
- pass
-
testing_solvers = {}
testing_solvers['ipopt','nl'] = False
testing_solvers['glpk','lp'] = False
@@ -75,7 +65,7 @@ def testmethod(self):
if (not testing_solvers['ipopt','nl']) or \
(not testing_solvers['mosek','python']):
self.skipTest("Ipopt or Mosek is not available")
- rc, log = pyutilib.subprocess.run(['python',example])
+ rc, log = pyutilib.subprocess.run([sys.executable,example])
self.assertEqual(rc, 0, msg=log)
return testmethod
diff --git a/pyomo/core/tests/examples/test_pyomo.py b/pyomo/core/tests/examples/test_pyomo.py
index acdf021b470..850ba57107c 100644
--- a/pyomo/core/tests/examples/test_pyomo.py
+++ b/pyomo/core/tests/examples/test_pyomo.py
@@ -21,18 +21,13 @@
import pyutilib.th as unittest
from pyutilib.misc import setup_redirect, reset_redirect
+from pyomo.common.dependencies import yaml_available
import pyomo.core
import pyomo.scripting.pyomo_main as main
from pyomo.opt import check_available_solvers
from six import StringIO
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
if os.path.exists(sys.exec_prefix+os.sep+'bin'+os.sep+'coverage'):
executable=sys.exec_prefix+os.sep+'bin'+os.sep+'coverage -x '
else:
@@ -91,7 +86,9 @@ def tearDown(self):
os.remove(currdir+'results.jsn')
def run_pyomo(self, cmd, root=None):
- return pyutilib.subprocess.run('pyomo solve --solver=glpk --results-format=json --save-results=%s.jsn ' % (root) +cmd, outfile=root+'.out')
+ cmd = 'pyomo solve --solver=glpk --results-format=json ' \
+ '--save-results=%s.jsn %s' % (root, cmd)
+ return pyutilib.subprocess.run(cmd, outfile=root+'.out')
class TestJson(BaseTester):
@@ -104,7 +101,8 @@ def test1_simple_pyomo_execution(self):
def test1a_simple_pyomo_execution(self):
# Simple execution of 'pyomo' in a subprocess
- self.run_pyomo(currdir+'pmedian.py pmedian.dat', root=currdir+'test1a')
+ self.run_pyomo('%s/pmedian.py %s/pmedian.dat' % (currdir,currdir),
+ root=currdir+'test1a')
self.assertMatchesJsonBaseline(currdir+"test1a.jsn", currdir+"test1.txt",tolerance=_diff_tol)
os.remove(currdir+'test1a.out')
diff --git a/pyomo/core/tests/transform/test_transform.py b/pyomo/core/tests/transform/test_transform.py
index a55452af859..15eb6ff97ec 100644
--- a/pyomo/core/tests/transform/test_transform.py
+++ b/pyomo/core/tests/transform/test_transform.py
@@ -101,7 +101,7 @@ def test_relax_integrality1(self):
self.model.e = Var(within=Boolean)
self.model.f = Var(domain=Boolean)
instance=self.model.create_instance()
- xfrm = TransformationFactory('core.relax_integrality')
+ xfrm = TransformationFactory('core.relax_integer_vars')
rinst = xfrm.create_using(instance)
self.assertEqual(type(rinst.a.domain), RealSet)
self.assertEqual(type(rinst.b.domain), RealSet)
@@ -126,7 +126,7 @@ def test_relax_integrality2(self):
self.model.e = Var([1,2,3], within=Boolean, dense=True)
self.model.f = Var([1,2,3], domain=Boolean, dense=True)
instance=self.model.create_instance()
- xfrm = TransformationFactory('core.relax_integrality')
+ xfrm = TransformationFactory('core.relax_integer_vars')
rinst = xfrm.create_using(instance)
self.assertEqual(type(rinst.a[1].domain), RealSet)
self.assertEqual(type(rinst.b[1].domain), RealSet)
@@ -152,7 +152,7 @@ def test_relax_integrality_cloned(self):
self.model.f = Var(domain=Boolean)
instance=self.model.create_instance()
instance_cloned = instance.clone()
- xfrm = TransformationFactory('core.relax_integrality')
+ xfrm = TransformationFactory('core.relax_integer_vars')
rinst = xfrm.create_using(instance_cloned)
self.assertEqual(type(rinst.a.domain), RealSet)
self.assertEqual(type(rinst.b.domain), RealSet)
@@ -172,9 +172,12 @@ def test_relax_integrality(self):
self.model.d = Var(within=Integers, bounds=(-2,3))
instance=self.model.create_instance()
instance_cloned = instance.clone()
- xfrm = TransformationFactory('core.relax_integrality')
+ xfrm = TransformationFactory('core.relax_integer_vars')
rinst = xfrm.create_using(instance_cloned)
self.assertEqual(type(rinst.d.domain), RealSet)
+ self.assertEqual(rinst.d.bounds, (-2,3))
+ self.assertIs(instance.d.domain, Integers)
+ self.assertIs(instance_cloned.d.domain, Integers)
def test_relax_integrality_simple_cloned(self):
self.model.x = Var(within=Integers, bounds=(-2,3))
@@ -182,7 +185,44 @@ def test_relax_integrality_simple_cloned(self):
instance_cloned = instance.clone()
xfrm = TransformationFactory('core.relax_discrete')
rinst = xfrm.create_using(instance_cloned)
- self.assertNotEqual(type(rinst.x.domain), RealSet)
+ self.assertIs(rinst.x.domain, Reals)
+ self.assertEqual(rinst.x.bounds, (-2,3))
+ self.assertIs(instance.x.domain, Integers)
+ self.assertIs(instance_cloned.x.domain, Integers)
+
+ def test_relax_integrality_on_deactivated_blocks(self):
+ self.model.x = Var(domain=NonNegativeIntegers)
+ self.model.b = Block()
+ self.model.b.x = Var(domain=Binary)
+ self.model.b.y = Var(domain=Integers, bounds=(-3,2))
+ instance = self.model.create_instance()
+ instance.b.deactivate()
+ relax_integrality = TransformationFactory('core.relax_integer_vars')
+ relax_integrality.apply_to(instance)
+ self.assertIs(instance.b.x.domain, Reals)
+ self.assertEqual(instance.b.x.lb, 0)
+ self.assertEqual(instance.b.x.ub, 1)
+ self.assertIs(instance.b.y.domain, Reals)
+ self.assertEqual(instance.b.y.lb, -3)
+ self.assertEqual(instance.b.y.ub, 2)
+ self.assertIs(instance.x.domain, Reals)
+ self.assertEqual(instance.x.lb, 0)
+ self.assertIsNone(instance.x.ub)
+
+ def test_relax_integrality_only_active_blocks(self):
+ self.model.x = Var(domain=NonNegativeIntegers)
+ self.model.b = Block()
+ self.model.b.x = Var(domain=Binary)
+ self.model.b.y = Var(domain=Integers, bounds=(-3,2))
+ instance = self.model.create_instance()
+ instance.b.deactivate()
+ relax_integrality = TransformationFactory('core.relax_integer_vars')
+ relax_integrality.apply_to(instance, transform_deactivated_blocks=False)
+ self.assertIs(instance.b.x.domain, Binary)
+ self.assertIs(instance.b.y.domain, Integers)
+ self.assertIs(instance.x.domain, Reals)
+ self.assertEqual(instance.x.lb, 0)
+ self.assertIsNone(instance.x.ub)
def test_nonnegativity_transformation_1(self):
self.model.a = Var()
@@ -204,15 +244,15 @@ def test_nonnegativity_transformation_1(self):
# Check that discrete variables are still discrete, and continuous
# continuous
for ndx in transformed.a:
- self.assertTrue(isinstance(transformed.a[ndx].domain, RealSet))
+ self.assertIs(transformed.a[ndx].domain, NonNegativeReals)
for ndx in transformed.b:
- self.assertTrue(isinstance(transformed.b[ndx].domain, IntegerSet))
+ self.assertIs(transformed.b[ndx].domain, NonNegativeIntegers)
for ndx in transformed.c:
- self.assertTrue(isinstance(transformed.c[ndx].domain, IntegerSet))
+ self.assertIs(transformed.c[ndx].domain, NonNegativeIntegers)
for ndx in transformed.d:
- self.assertTrue(isinstance(transformed.d[ndx].domain, BooleanSet))
+ self.assertIs(transformed.d[ndx].domain, Binary)
for ndx in transformed.e:
- self.assertTrue(isinstance(transformed.e[ndx].domain, BooleanSet))
+ self.assertIs(transformed.e[ndx].domain, Binary)
def test_nonnegativity_transformation_2(self):
self.model.S = RangeSet(0,10)
diff --git a/pyomo/core/tests/unit/kernel/test_kernel.py b/pyomo/core/tests/unit/kernel/test_kernel.py
index 0126082994a..a10a831bc60 100644
--- a/pyomo/core/tests/unit/kernel/test_kernel.py
+++ b/pyomo/core/tests/unit/kernel/test_kernel.py
@@ -182,18 +182,6 @@ def test_block_data_objects_hack(self):
self.assertEqual(
[str(obj) for obj in model.block_data_objects()],
[str(model)]+[str(obj) for obj in model.components(ctype=IBlock)])
- def test_type_hack(self):
- for obj in [pmo.variable(),
- pmo.constraint(),
- pmo.objective(),
- pmo.expression(),
- pmo.parameter(),
- pmo.suffix(),
- pmo.sos([]),
- pmo.block()]:
- ctype = obj.ctype
- self.assertIs(obj.__class__._ctype, ctype)
- self.assertIs(obj.type(), ctype)
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/kernel/test_parameter.py b/pyomo/core/tests/unit/kernel/test_parameter.py
index 62701a5b08c..c8015479df8 100644
--- a/pyomo/core/tests/unit/kernel/test_parameter.py
+++ b/pyomo/core/tests/unit/kernel/test_parameter.py
@@ -1,11 +1,8 @@
import pickle
-try:
- import dill
- has_dill = True
-except:
- has_dill = False
import pyutilib.th as unittest
+
+from pyomo.common.dependencies import dill, dill_available as has_dill
from pyomo.core.expr.numvalue import (NumericValue,
is_fixed,
is_constant,
diff --git a/pyomo/core/tests/unit/kernel/test_variable.py b/pyomo/core/tests/unit/kernel/test_variable.py
index 591bea3a1ec..46a124da364 100644
--- a/pyomo/core/tests/unit/kernel/test_variable.py
+++ b/pyomo/core/tests/unit/kernel/test_variable.py
@@ -24,16 +24,16 @@
from pyomo.core.kernel.block import block
from pyomo.core.kernel.set_types import (RealSet,
IntegerSet,
- Binary,
- NonNegativeReals,
- NegativeReals,
- Reals,
- RealInterval,
- Integers,
- NonNegativeIntegers,
- NegativeIntegers,
- IntegerInterval,
BooleanSet)
+from pyomo.core.base.set import(Binary,
+ NonNegativeReals,
+ NegativeReals,
+ Reals,
+ Integers,
+ NonNegativeIntegers,
+ NegativeIntegers,
+ RealInterval,
+ IntegerInterval)
import six
from six import StringIO
diff --git a/pyomo/core/tests/unit/test_action.py b/pyomo/core/tests/unit/test_action.py
index 7adb63c16bf..ef2cc3df09d 100644
--- a/pyomo/core/tests/unit/test_action.py
+++ b/pyomo/core/tests/unit/test_action.py
@@ -124,8 +124,9 @@ def test_dense_param(self):
buf = StringIO()
instance.pprint(ostream=buf)
self.assertEqual(buf.getvalue(),"""1 Set Declarations
- Z : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 3]
+ Z : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {1, 3}
1 Param Declarations
A : Size=2, Index=Z, Domain=Any, Default=None, Mutable=True
diff --git a/pyomo/core/tests/unit/test_block.py b/pyomo/core/tests/unit/test_block.py
index 83bfa22bce5..a5223e59d82 100644
--- a/pyomo/core/tests/unit/test_block.py
+++ b/pyomo/core/tests/unit/test_block.py
@@ -14,6 +14,7 @@
import os
import sys
import six
+import types
from six import StringIO
@@ -30,10 +31,6 @@
from pyomo.core.base.block import SimpleBlock, SubclassOf, _BlockData, declare_custom_block
from pyomo.core.expr import current as EXPR
from pyomo.opt import *
-try:
- from StringIO import StringIO # python 2
-except ImportError:
- from io import StringIO # python 3
from pyomo.gdp import Disjunct
@@ -64,6 +61,7 @@ def generate_model(self):
model = ConcreteModel()
model.q = Set(initialize=[1,2])
model.Q = Set(model.q,initialize=[1,2])
+ model.qq = NonNegativeIntegers*model.q
model.x = Var(initialize=-1)
model.X = Var(model.q,initialize=-1)
model.e = Expression(initialize=-1)
@@ -155,8 +153,8 @@ def B_rule(block,i):
model.component_lists = {}
model.component_data_lists = {}
- model.component_lists[Set] = [model.q, model.Q]
- model.component_data_lists[Set] = [model.q, model.Q[1], model.Q[2]]
+ model.component_lists[Set] = [model.q, model.Q, model.qq]
+ model.component_data_lists[Set] = [model.q, model.Q[1], model.Q[2], model.qq]
model.component_lists[Var] = [model.x, model.X]
model.component_data_lists[Var] = [model.x, model.X[1], model.X[2]]
model.component_lists[Expression] = [model.e, model.E]
@@ -189,7 +187,8 @@ def generator_test(self, ctype):
generator = list(block.component_objects(ctype, active=True, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("component_objects(active=True) failed with ctype %s" % ctype)
+ print("component_objects(active=True) failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("component_objects(active=True) should have failed with ctype %s" % ctype)
@@ -208,7 +207,8 @@ def generator_test(self, ctype):
generator = list(block.component_objects(ctype, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("components failed with ctype %s" % ctype)
+ print("components failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("components should have failed with ctype %s" % ctype)
@@ -227,7 +227,8 @@ def generator_test(self, ctype):
generator = list(block.component_data_iterindex(ctype, active=True, sort=False, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" % ctype)
+ print("component_data_objects(active=True, sort_by_keys=False) failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("component_data_objects(active=True, sort_by_keys=False) should have failed with ctype %s" % ctype)
@@ -246,7 +247,8 @@ def generator_test(self, ctype):
generator = list(block.component_data_iterindex(ctype, active=True, sort=True, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("component_data_objects(active=True, sort=True) failed with ctype %s" % ctype)
+ print("component_data_objects(active=True, sort=True) failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("component_data_objects(active=True, sort=True) should have failed with ctype %s" % ctype)
@@ -265,7 +267,8 @@ def generator_test(self, ctype):
generator = list(block.component_data_iterindex(ctype, sort=False, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("components_data(sort_by_keys=True) failed with ctype %s" % ctype)
+ print("components_data(sort_by_keys=True) failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("components_data(sort_by_keys=True) should have failed with ctype %s" % ctype)
@@ -284,7 +287,8 @@ def generator_test(self, ctype):
generator = list(block.component_data_iterindex(ctype, sort=True, descend_into=False))
except:
if issubclass(ctype, Component):
- self.fail("components_data(sort_by_keys=False) failed with ctype %s" % ctype)
+ print("components_data(sort_by_keys=False) failed with ctype %s" % ctype)
+ raise
else:
if not issubclass(ctype, Component):
self.fail("components_data(sort_by_keys=False) should have failed with ctype %s" % ctype)
@@ -647,6 +651,154 @@ def test_set_attr(self):
self.block.x = None
self.assertEqual(self.block.x._value, None)
+ ### creation of a circular reference
+ b = Block(concrete=True)
+ b.c = Block()
+ with self.assertRaisesRegexp(
+ ValueError, "Cannot assign the top-level block as a subblock "
+ "of one of its children \(c\): creates a circular hierarchy"):
+ b.c.d = b
+
+ def test_set_value(self):
+ b = Block(concrete=True)
+ with self.assertRaisesRegexp(
+ RuntimeError, "Block components do not support assignment "
+ "or set_value"):
+ b.set_value(None)
+
+ b.b = Block()
+ with self.assertRaisesRegexp(
+ RuntimeError, "Block components do not support assignment "
+ "or set_value"):
+ b.b = 5
+
+ def test_clear(self):
+ class DerivedBlock(SimpleBlock):
+ _Block_reserved_words = None
+
+ DerivedBlock._Block_reserved_words \
+ = set(['a','b','c']) | _BlockData._Block_reserved_words
+
+ m = ConcreteModel()
+ m.clear()
+ self.assertEqual(m._ctypes, {})
+ self.assertEqual(m._decl, {})
+ self.assertEqual(m._decl_order, [])
+
+ m.w = 5
+ m.x = Var()
+ m.y = Param()
+ m.z = Var()
+ m.clear()
+ self.assertFalse(hasattr(m, 'w'))
+ self.assertEqual(m._ctypes, {})
+ self.assertEqual(m._decl, {})
+ self.assertEqual(m._decl_order, [])
+
+ m.b = DerivedBlock()
+ m.b.a = a = Param()
+ m.b.x = Var()
+ m.b.b = b = Var()
+ m.b.y = Var()
+ m.b.z = Param()
+ m.b.c = c = Param()
+ m.b.clear()
+ self.assertEqual(m.b._ctypes, {Var: [1, 1, 1], Param:[0,2,2]})
+ self.assertEqual(m.b._decl, {'a':0, 'b':1, 'c':2})
+ self.assertEqual(len(m.b._decl_order), 3)
+ self.assertIs(m.b._decl_order[0][0], a)
+ self.assertIs(m.b._decl_order[1][0], b)
+ self.assertIs(m.b._decl_order[2][0], c)
+ self.assertEqual(m.b._decl_order[0][1], 2)
+ self.assertEqual(m.b._decl_order[1][1], None)
+ self.assertEqual(m.b._decl_order[2][1], None)
+
+ def test_transfer_attributes_from(self):
+ b = Block(concrete=True)
+ b.x = Var()
+ b.y = Var()
+ c = Block(concrete=True)
+ c.z = Param(initialize=5)
+ c.x = c_x = Param(initialize=5)
+ c.y = c_y = 5
+
+ b.clear()
+ b.transfer_attributes_from(c)
+ self.assertEqual(list(b.component_map()), ['z','x'])
+ self.assertEqual(list(c.component_map()), [])
+ self.assertIs(b.x, c_x)
+ self.assertIs(b.y, c_y)
+
+ class DerivedBlock(SimpleBlock):
+ _Block_reserved_words = set()
+ def __init__(self, *args, **kwds):
+ super(DerivedBlock, self).__init__(*args, **kwds)
+ self.x = Var()
+ self.y = Var()
+ DerivedBlock._Block_reserved_words = set(dir(DerivedBlock()))
+
+ b = DerivedBlock(concrete=True)
+ b_x = b.x
+ b_y = b.y
+ c = Block(concrete=True)
+ c.z = Param(initialize=5)
+ c.x = c_x = Param(initialize=5)
+ c.y = c_y = 5
+
+ b.clear()
+ b.transfer_attributes_from(c)
+ self.assertEqual(list(b.component_map()), ['y','z','x'])
+ self.assertEqual(list(c.component_map()), [])
+ self.assertIs(b.x, c_x)
+ self.assertIsNot(b.y, c_y)
+ self.assertIs(b.y, b_y)
+ self.assertEqual(value(b.y), value(c_y))
+
+ ### assignment of dict
+ b = DerivedBlock(concrete=True)
+ b_x = b.x
+ b_y = b.y
+ c = { 'z': Param(initialize=5),
+ 'x': Param(initialize=5),
+ 'y': 5 }
+
+ b.clear()
+ b.transfer_attributes_from(c)
+ self.assertEqual(list(b.component_map()), ['y','x','z'])
+ self.assertEqual(sorted(list(iterkeys(c))), ['x','y','z'])
+ self.assertIs(b.x, c['x'])
+ self.assertIsNot(b.y, c['y'])
+ self.assertIs(b.y, b_y)
+ self.assertEqual(value(b.y), value(c_y))
+
+ ### assignment of self
+ b = Block(concrete=True)
+ b.x = b_x = Var()
+ b.y = b_y = Var()
+ b.transfer_attributes_from(b)
+
+ self.assertEqual(list(b.component_map()), ['x','y'])
+ self.assertIs(b.x, b_x)
+ self.assertIs(b.y, b_y)
+
+ ### creation of a circular reference
+ b = Block(concrete=True)
+ b.c = Block()
+ b.c.d = Block()
+ b.c.d.e = Block()
+ with self.assertRaisesRegexp(
+ ValueError, '_BlockData.transfer_attributes_from\(\): '
+ 'Cannot set a sub-block \(c.d.e\) to a parent block \(c\):'):
+ b.c.d.e.transfer_attributes_from(b.c)
+
+ ### bad data type
+ b = Block(concrete=True)
+ with self.assertRaisesRegexp(
+ ValueError,
+ '_BlockData.transfer_attributes_from\(\): expected a Block '
+ 'or dict; received str'):
+ b.transfer_attributes_from('foo')
+
def test_iterate_hierarchy_defaults(self):
self.assertIs( TraversalStrategy.BFS,
TraversalStrategy.BreadthFirstSearch )
@@ -2009,12 +2161,15 @@ def test_pprint(self):
buf = StringIO()
m.pprint(ostream=buf)
ref = """3 Set Declarations
- a1_IDX : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=Insertion, Bounds=(4, 5)
- [5, 4]
- a3_IDX : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=Insertion, Bounds=(6, 7)
- [6, 7]
- a_index : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
+ a1_IDX : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {5, 4}
+ a3_IDX : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {6, 7}
+ a_index : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
3 Block Declarations
a : Size=3, Index=a_index, Active=True
@@ -2213,16 +2368,18 @@ def scalar_constraint(m):
return m.x[1]**2 <= 0
self.assertTrue(hasattr(model, 'scalar_constraint'))
- self.assertIs(model.scalar_constraint._type, Constraint)
+ self.assertIs(model.scalar_constraint.ctype, Constraint)
self.assertEqual(len(model.scalar_constraint), 1)
+ self.assertIs(type(scalar_constraint), types.FunctionType)
@model.Constraint(model.I)
def vector_constraint(m, i):
return m.x[i]**2 <= 0
self.assertTrue(hasattr(model, 'vector_constraint'))
- self.assertIs(model.vector_constraint._type, Constraint)
+ self.assertIs(model.vector_constraint.ctype, Constraint)
self.assertEqual(len(model.vector_constraint), 3)
+ self.assertIs(type(vector_constraint), types.FunctionType)
def test_reserved_words(self):
m = ConcreteModel()
@@ -2277,7 +2434,80 @@ def pprint(self, ostream=None, verbose=False, prefix=""):
b.pprint(ostream=stream)
self.assertEqual(correct_s, stream.getvalue())
+ def test_block_rules(self):
+ m = ConcreteModel()
+ m.I = Set()
+ _rule_ = []
+ def _block_rule(b,i):
+ _rule_.append(i)
+ b.x = Var(range(i))
+ m.b = Block(m.I, rule=_block_rule)
+ # I is empty: no rules called
+ self.assertEqual(_rule_, [])
+ m.I.update([1,3,5])
+ # Fetching a new block will call the rule
+ _b = m.b[3]
+ self.assertEqual(len(m.b), 1)
+ self.assertEqual(_rule_, [3])
+ self.assertIn('x', _b.component_map())
+ self.assertIn('x', m.b[3].component_map())
+
+ # If you transfer the attributes directly, the rule will still
+ # be called.
+ _tmp = Block()
+ _tmp.y = Var(range(3))
+ m.b[5].transfer_attributes_from(_tmp)
+ self.assertEqual(len(m.b), 2)
+ self.assertEqual(_rule_, [3,5])
+ self.assertIn('x', m.b[5].component_map())
+ self.assertIn('y', m.b[5].component_map())
+
+ # We do not support block assignment (and the rule will NOT be
+ # called)
+ _tmp = Block()
+ _tmp.y = Var(range(3))
+ with self.assertRaisesRegex(
+ RuntimeError, "Block components do not support "
+ "assignment or set_value"):
+ m.b[1] = _tmp
+ self.assertEqual(len(m.b), 2)
+ self.assertEqual(_rule_, [3,5])
+
+ # Blocks with non-finite indexing sets cannot be automatically
+ # populated (even if they have a rule!)
+ def _bb_rule(b, i, j):
+ _rule_.append((i,j))
+ b.x = Var(RangeSet(i))
+ b.y = Var(RangeSet(j))
+ m.bb = Block(m.I, NonNegativeIntegers, rule=_bb_rule)
+ self.assertEqual(_rule_, [3,5])
+ _b = m.bb[3,5]
+ self.assertEqual(_rule_, [3,5,(3,5)])
+ self.assertEqual(len(m.bb), 1)
+ self.assertEqual(len(_b.x), 3)
+ self.assertEqual(len(_b.y), 5)
+
+ def test_derived_block_construction(self):
+ # This tests a case where a derived block doesn't follow the
+ # assumption that unconstructed scalar blocks initialize
+ # `_data[None] = self` (therefore doesn't fully support abstract
+ # models). At one point, that was causing the block rule to
+ # fire twice during construction.
+ class ConcreteBlock(Block):
+ pass
+
+ class ScalarConcreteBlock(_BlockData, ConcreteBlock):
+ def __init__(self, *args, **kwds):
+ _BlockData.__init__(self, component=self)
+ ConcreteBlock.__init__(self, *args, **kwds)
+ _buf = []
+ def _rule(b):
+ _buf.append(1)
+
+ m = ConcreteModel()
+ m.b = ScalarConcreteBlock(rule=_rule)
+ self.assertEqual(_buf, [1])
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_check.py b/pyomo/core/tests/unit/test_check.py
index e15325d299b..a67d77930c9 100644
--- a/pyomo/core/tests/unit/test_check.py
+++ b/pyomo/core/tests/unit/test_check.py
@@ -173,8 +173,9 @@ def test_io(self):
buf = StringIO()
instance.pprint(ostream=buf)
self.assertEqual(buf.getvalue(),"""1 Set Declarations
- A : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
+ A : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
2 BuildCheck Declarations
c1 :
diff --git a/pyomo/core/tests/unit/test_component.py b/pyomo/core/tests/unit/test_component.py
index 1eb5ef12eaa..f5639caf091 100644
--- a/pyomo/core/tests/unit/test_component.py
+++ b/pyomo/core/tests/unit/test_component.py
@@ -10,17 +10,13 @@
#
# Unit Tests for components
#
-
+from six import StringIO
import pyutilib.th as unittest
from pyomo.common import DeveloperError
import pyomo.core.base._pyomo
from pyomo.core.base.block import generate_cuid_names
from pyomo.environ import *
-try:
- from StringIO import StringIO # python 2
-except ImportError:
- from io import StringIO # python 3
class TestComponent(unittest.TestCase):
@@ -540,10 +536,16 @@ def test_generate_cuid_names(self):
class TestEnviron(unittest.TestCase):
def test_components(self):
- self.assertTrue(set(x[0] for x in pyomo.core.base._pyomo.model_components()) >= set(['Set', 'Param', 'Var', 'Objective', 'Constraint']))
+ self.assertGreaterEqual(
+ set(x[0] for x in pyomo.core.base._pyomo.model_components()),
+ set(['Set', 'Param', 'Var', 'Objective', 'Constraint'])
+ )
def test_sets(self):
- self.assertTrue(set(x[0] for x in pyomo.core.base._pyomo.predefined_sets()) >= set(['Reals', 'Integers', 'Boolean']))
+ self.assertGreaterEqual(
+ set(x[0] for x in pyomo.core.base._pyomo.predefined_sets()),
+ set(['Reals', 'Integers', 'Boolean'])
+ )
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_deprecation.py b/pyomo/core/tests/unit/test_deprecation.py
new file mode 100644
index 00000000000..b461abdf784
--- /dev/null
+++ b/pyomo/core/tests/unit/test_deprecation.py
@@ -0,0 +1,46 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+import os
+import pyutilib.th as unittest
+import sys
+
+from importlib import import_module
+from six import StringIO, PY3
+
+from pyomo.common.log import LoggingIntercept
+
+def force_load(module):
+ if module in sys.modules:
+ del sys.modules[module]
+ return import_module(module)
+
+class TestDeprecatedModules(unittest.TestCase):
+ def test_rangeset(self):
+ log = StringIO()
+ with LoggingIntercept(log):
+ from pyomo.core.base.set import RangeSet
+ self.assertEqual(log.getvalue(), "")
+
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo'):
+ rs = force_load('pyomo.core.base.rangeset')
+ self.assertIn("The pyomo.core.base.rangeset module is deprecated.",
+ log.getvalue().strip().replace('\n',' '))
+ self.assertIs(RangeSet, rs.RangeSet)
+
+ # Run this twice to implicitly test the force_load() implementation
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo'):
+ rs = force_load('pyomo.core.base.rangeset')
+ self.assertIn("The pyomo.core.base.rangeset module is deprecated.",
+ log.getvalue().strip().replace('\n',' '))
+ self.assertIs(RangeSet, rs.RangeSet)
+
diff --git a/pyomo/core/tests/unit/test_derivs.py b/pyomo/core/tests/unit/test_derivs.py
index 47c4ba998a4..812e50555eb 100644
--- a/pyomo/core/tests/unit/test_derivs.py
+++ b/pyomo/core/tests/unit/test_derivs.py
@@ -1,6 +1,7 @@
import pyutilib.th as unittest
import pyomo.environ as pe
from pyomo.core.expr.calculus.diff_with_pyomo import reverse_ad, reverse_sd
+from pyomo.common.getGSL import find_GSL
tol = 6
@@ -190,3 +191,32 @@ def e2(m, i):
derivs = reverse_ad(m.o.expr)
symbolic = reverse_sd(m.o.expr)
self.assertAlmostEqual(derivs[m.x], pe.value(symbolic[m.x]), tol)
+
+ def test_multiple_named_expressions(self):
+ m = pe.ConcreteModel()
+ m.x = pe.Var()
+ m.y = pe.Var()
+ m.x.value = 1
+ m.y.value = 1
+ m.E = pe.Expression(expr=m.x*m.y)
+ e = m.E - m.E
+ derivs = reverse_ad(e)
+ self.assertAlmostEqual(derivs[m.x], 0)
+ self.assertAlmostEqual(derivs[m.y], 0)
+ symbolic = reverse_sd(e)
+ self.assertAlmostEqual(pe.value(symbolic[m.x]), 0)
+ self.assertAlmostEqual(pe.value(symbolic[m.y]), 0)
+
+ def test_external(self):
+ DLL = find_GSL()
+ if not DLL:
+ self.skipTest('Could not find the amplgsl.dll library')
+
+ m = pe.ConcreteModel()
+ m.hypot = pe.ExternalFunction(library=DLL, function='gsl_hypot')
+ m.x = pe.Var(initialize=0.5)
+ m.y = pe.Var(initialize=1.5)
+ e = 2 * m.hypot(m.x, m.x*m.y)
+ derivs = reverse_ad(e)
+ self.assertAlmostEqual(derivs[m.x], approx_deriv(e, m.x), tol)
+ self.assertAlmostEqual(derivs[m.y], approx_deriv(e, m.y), tol)
diff --git a/pyomo/core/tests/unit/test_expr5.txt b/pyomo/core/tests/unit/test_expr5.txt
index 7fc88553264..a5fc934bd77 100644
--- a/pyomo/core/tests/unit/test_expr5.txt
+++ b/pyomo/core/tests/unit/test_expr5.txt
@@ -1,9 +1,11 @@
2 Set Declarations
A : set A
- Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- c3_index : Dim=0, Dimen=1, Size=1, Domain=None, Ordered=False, Bounds=None
- [1]
+ Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ c3_index : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 1 : {1,}
2 Param Declarations
B : param B
diff --git a/pyomo/core/tests/unit/test_external.py b/pyomo/core/tests/unit/test_external.py
index 6a120ddcf95..2adf354c343 100644
--- a/pyomo/core/tests/unit/test_external.py
+++ b/pyomo/core/tests/unit/test_external.py
@@ -12,7 +12,6 @@
import pyutilib.th as unittest
from pyomo.common.getGSL import find_GSL
-from pyomo.core.base import IntegerSet
from pyomo.environ import *
from pyomo.core.base.external import (PythonCallbackFunction,
AMPLExternalFunction)
@@ -57,6 +56,12 @@ def test_getname(self):
self.assertEqual(M.m.f.getname(), "f")
self.assertEqual(M.m.f.getname(True), "m.f")
+ def test_extra_kwargs(self):
+ m = ConcreteModel()
+ with self.assertRaises(ValueError):
+ m.f = ExternalFunction(_g, this_should_raise_error='foo')
+
+
class TestAMPLExternalFunction(unittest.TestCase):
def assertListsAlmostEqual(self, first, second, places=7, msg=None):
self.assertEqual(len(first), len(second))
diff --git a/pyomo/core/tests/unit/test_indexed_slice.py b/pyomo/core/tests/unit/test_indexed_slice.py
index a7c468f6e88..225092a7e44 100644
--- a/pyomo/core/tests/unit/test_indexed_slice.py
+++ b/pyomo/core/tests/unit/test_indexed_slice.py
@@ -18,7 +18,7 @@
from pyomo.environ import *
from pyomo.core.base.block import _BlockData
-from pyomo.core.base.indexed_component import _IndexedComponent_slice
+from pyomo.core.base.indexed_component_slice import IndexedComponent_slice
def _x_init(m, k):
return k
@@ -60,25 +60,25 @@ def test_simple_getitem(self):
def test_simple_getslice(self):
_slicer = self.m.b[:,4]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4]', 'b[2,4]', 'b[3,4]'] )
_slicer = self.m.b[1,4].c[:,4]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4].c[1,4]', 'b[1,4].c[2,4]', 'b[1,4].c[3,4]'] )
def test_wildcard_slice(self):
_slicer = self.m.b[:]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[...]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]',
@@ -87,14 +87,14 @@ def test_wildcard_slice(self):
] )
_slicer = self.m.b[1,...]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'b[1,4]', 'b[1,5]', 'b[1,6]',
] )
_slicer = self.m.b[...,5]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'b[1,5]',
@@ -103,14 +103,14 @@ def test_wildcard_slice(self):
] )
_slicer = self.m.bb[2,...,8]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'bb[2,4,8]', 'bb[2,5,8]', 'bb[2,6,8]',
] )
_slicer = self.m.bb[:,...,8]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]',
@@ -119,7 +119,7 @@ def test_wildcard_slice(self):
] )
_slicer = self.m.bb[:,:,...,8]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]',
@@ -128,7 +128,7 @@ def test_wildcard_slice(self):
] )
_slicer = self.m.bb[:,...,:,8]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'bb[1,4,8]', 'bb[1,5,8]', 'bb[1,6,8]',
@@ -137,19 +137,19 @@ def test_wildcard_slice(self):
] )
_slicer = self.m.b[1,4,...]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, [ 'b[1,4]',
] )
_slicer = self.m.b[1,2,3,...]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[1,:,2]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
@@ -160,20 +160,20 @@ def test_wildcard_slice(self):
def test_nonterminal_slice(self):
_slicer = self.m.b[:,4].x
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4].x', 'b[2,4].x', 'b[3,4].x'] )
_slicer = self.m.b[:,4].x[7]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4].x[7]', 'b[2,4].x[7]', 'b[3,4].x[7]'] )
def test_nested_slices(self):
_slicer = self.m.b[1,:].c[:,4].x
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x',
@@ -182,7 +182,7 @@ def test_nested_slices(self):
] )
_slicer = self.m.b[1,:].c[:,4].x[8]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans,
@@ -193,7 +193,7 @@ def test_nested_slices(self):
def test_component_function_slices(self):
_slicer = self.m.component('b')[1,:].component('c')[:,4].component('x')
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual(
ans, ['b[1,4].c[1,4].x', 'b[1,4].c[2,4].x', 'b[1,4].c[3,4].x',
@@ -233,7 +233,7 @@ def test_setattr_slices(self):
_slice = self.m.b[...].c[...].x[:]
with self.assertRaisesRegexp(
AttributeError, ".*VarData' object has no attribute 'bogus'"):
- _slice.duplicate().bogus = 0
+ _slice.bogus = 0
# but disabling the exception flag will run without error
_slice.attribute_errors_generate_exceptions = False
# This doesn't do anything ... simply not raising an exception
@@ -250,15 +250,15 @@ def test_delattr_slices(self):
_slice = self.m.b[1,:].c[:,4].x.foo
_slice._call_stack[-1] = (
- _IndexedComponent_slice.del_attribute,
+ IndexedComponent_slice.del_attribute,
_slice._call_stack[-1][1] )
# call the iterator to delete the attributes
- list(_slice.duplicate())
+ list(_slice)
self.assertEqual(sum(list(1 if hasattr(x,'foo') else 0
for x in self.m.b[:,:].c[:,:].x)), 0)
# calling the iterator again will raise an exception
with self.assertRaisesRegexp(AttributeError, 'foo'):
- list(_slice.duplicate())
+ list(_slice)
# but disabling the exception flag will run without error
_slice.attribute_errors_generate_exceptions = False
# This doesn't do anything ... simply not raising an exception
@@ -284,7 +284,7 @@ def test_setitem_slices(self):
with self.assertRaisesRegexp(
KeyError, "Index 'bogus' is not valid for indexed "
"component 'b\[1,4\]\.c\[1,4\]\.x'"):
- _slice.duplicate()['bogus'] = 0
+ _slice['bogus'] = 0
# but disabling the exception flag will run without error
_slice.key_errors_generate_exceptions = False
# This doesn't do anything ... simply not raising an exception
@@ -337,7 +337,7 @@ def test_delitem_slices(self):
with self.assertRaisesRegexp(
KeyError, "Index 'bogus' is not valid for indexed "
"component 'b\[2,4\]\.c\[1,4\]\.x'"):
- del _slice.duplicate()['bogus']
+ del _slice['bogus']
# but disabling the exception flag will run without error
_slice.key_errors_generate_exceptions = False
# This doesn't do anything ... simply not raising an exception
@@ -366,45 +366,45 @@ def test_delitem_component(self):
def test_empty_slices(self):
_slicer = self.m.b[1,:].c[:,1].x
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[1,:].c[:,4].x[1]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.key_errors_generate_exceptions = False
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[1,:].c[:,4].y
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.attribute_errors_generate_exceptions = False
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[1,:].c[:,4].component('y', False)
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.call_errors_generate_exceptions = False
ans = [ str(x) for x in _slicer ]
self.assertEqual( ans, [] )
_slicer = self.m.b[1,:].c[:,4].x[1]
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.key_errors_generate_exceptions = True
self.assertRaises( KeyError, _slicer.next )
_slicer = self.m.b[1,:].c[:,4].y
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.attribute_errors_generate_exceptions = True
self.assertRaises( AttributeError, _slicer.next )
_slicer = self.m.b[1,:].c[:,4].component('y', False)
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.call_errors_generate_exceptions = True
self.assertRaises( TypeError,_slicer.next )
_slicer = self.m.b[1,:].c[:,4].component()
- self.assertIsInstance(_slicer, _IndexedComponent_slice)
+ self.assertIsInstance(_slicer, IndexedComponent_slice)
_slicer.call_errors_generate_exceptions = True
self.assertRaises( TypeError, _slicer.next )
@@ -514,5 +514,39 @@ def test_clone_on_model(self):
self.assertIs(x.model(), m)
self.assertIs(y.model(), n)
+ def test_hash_eqality(self):
+ m = self.m
+ a = m.b[1,:].c[:,...,4].x
+ b = m.b[1,:].c[1,...,:].x
+ self.assertNotEqual(a, b)
+ self.assertNotEqual(a, m)
+
+ self.assertEqual(a, a)
+ self.assertEqual(a, m.b[1,:].c[:,...,4].x)
+
+ _set = set([a,b])
+ self.assertEqual(len(_set), 2)
+ _set.add(m.b[1,:].c[:,...,4].x)
+ self.assertEqual(len(_set), 2)
+ _set.add(m.b[1,:].c[:,4].x)
+ self.assertEqual(len(_set), 3)
+
+ def test_duplicate(self):
+ m = self.m
+ a = m.b[1,:].c[:,...,4]
+
+ b = a.x
+ self.assertIs(a._call_stack, b._call_stack)
+ self.assertEqual(a._len+1, b._len)
+
+ c = a.y
+ self.assertEqual(a._len+1, c._len)
+ self.assertIsNot(a._call_stack, c._call_stack)
+
+ b1 = b.duplicate()
+ self.assertIsNot(a._call_stack, b1._call_stack)
+ self.assertEqual(a._len+1, b1._len)
+ self.assertEqual(hash(b), hash(b1))
+
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_model.py b/pyomo/core/tests/unit/test_model.py
index 43fe0f2f3a5..30c44f9e1a5 100644
--- a/pyomo/core/tests/unit/test_model.py
+++ b/pyomo/core/tests/unit/test_model.py
@@ -18,22 +18,17 @@
from os.path import abspath, dirname, join
currdir = dirname(abspath(__file__))
import pickle
+
import pyutilib.th as unittest
import pyutilib.services
-import pyomo.opt
-from pyomo.opt import SolutionStatus
-from pyomo.opt.parallel.local import SolverManager_Serial
-from pyomo.environ import *
-from pyomo.core.expr import current as EXPR
-solvers = pyomo.opt.check_available_solvers('glpk')
-
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
+from pyomo.common.dependencies import yaml_available
+from pyomo.core.expr import current as EXPR
+from pyomo.environ import *
+from pyomo.opt import SolutionStatus, check_available_solvers
+from pyomo.opt.parallel.local import SolverManager_Serial
+solvers = check_available_solvers('glpk')
class Test(unittest.TestCase):
@@ -769,8 +764,10 @@ def c(b):
return sum(m.x[i] for i in m.I) >= 0
m.c = Constraint( rule=c )
- model = AbstractModel(rule=make_invalid)
- self.assertRaises(RuntimeError, model.create_instance)
+ with self.assertRaisesRegexp(
+ ValueError, 'x\[1\]: The component has not been constructed.'):
+ model = AbstractModel(rule=make_invalid)
+ instance = model.create_instance()
model = AbstractModel(rule=make)
instance = model.create_instance()
diff --git a/pyomo/core/tests/unit/test_numeric_expr.py b/pyomo/core/tests/unit/test_numeric_expr.py
index a36b95bfe14..7026515bf15 100644
--- a/pyomo/core/tests/unit/test_numeric_expr.py
+++ b/pyomo/core/tests/unit/test_numeric_expr.py
@@ -16,6 +16,8 @@
import math
import os
import re
+from collections import defaultdict
+
import six
import sys
from os.path import abspath, dirname
@@ -55,7 +57,7 @@
from pyomo.core.base.var import SimpleVar
from pyomo.core.base.param import _ParamData, SimpleParam
from pyomo.core.base.label import *
-from pyomo.core.base.template_expr import IndexTemplate
+from pyomo.core.expr.template_expr import IndexTemplate
from pyomo.core.expr.expr_errors import TemplateExpressionError
from pyomo.repn import generate_standard_repn
@@ -2089,7 +2091,7 @@ def test_getitem(self):
t = IndexTemplate(m.I)
e = m.x[t+m.P[t+1]] + 3
- self.assertEqual("sum(x(sum({I}, P(sum({I}, 1)))), 3)", str(e))
+ self.assertEqual("sum(getitem(x, sum({I}, getitem(P, sum({I}, 1)))), 3)", str(e))
def test_small_expression(self):
#
@@ -2326,7 +2328,7 @@ def test_getitem(self):
t = IndexTemplate(m.I)
e = m.x[t+m.P[t+1]] + 3
- self.assertEqual("x({I} + P({I} + 1)) + 3", str(e))
+ self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e))
def test_associativity_rules(self):
m = ConcreteModel()
@@ -3429,10 +3431,19 @@ def test_Expr_if(self):
expr = Expr_if(m.e,1,0)
self.assertEqual(expr.polynomial_degree(), 0)
#
+ # A nonconstant expression has degree if both arguments have the
+ # same degree, as long as the IF is fixed (even if it is not
+ # defined)
+ #
+ expr = Expr_if(m.e,m.a,0)
+ self.assertEqual(expr.polynomial_degree(), 0)
+ expr = Expr_if(m.e,5*m.b,1+m.b)
+ self.assertEqual(expr.polynomial_degree(), 1)
+ #
# A nonconstant expression has degree None because
# m.e is an uninitialized parameter
#
- expr = Expr_if(m.e,m.a,0)
+ expr = Expr_if(m.e,m.b,0)
self.assertEqual(expr.polynomial_degree(), None)
@@ -4002,7 +4013,7 @@ def test_getitem(self):
e = m.x[t+m.P[t+1]] + 3
e_ = e.clone()
- self.assertEqual("x({I} + P({I} + 1)) + 3", str(e_))
+ self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e_))
#
total = counter.count - start
self.assertEqual(total, 1)
@@ -5012,7 +5023,7 @@ def test_getitem(self):
e = m.x[t+m.P[t+1]] + 3
s = pickle.dumps(e)
e_ = pickle.loads(s)
- self.assertEqual("x({I} + P({I} + 1)) + 3", str(e))
+ self.assertEqual("x[{I} + P[{I} + 1]] + 3", str(e))
def test_abs(self):
M = ConcreteModel()
@@ -5212,5 +5223,43 @@ def test_LinearExpression_expression(self):
self.assertTrue(len(repn.linear_coefs) == N)
self.assertTrue(len(repn.linear_vars) == N)
+ def test_LinearExpression_polynomial_degree(self):
+ m = ConcreteModel()
+ m.S = RangeSet(2)
+ m.var_1 = Var(initialize=0)
+ m.var_2 = Var(initialize=0)
+ m.var_3 = Var(m.S, initialize=0)
+
+ def con_rule(model):
+ return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0
+
+ m.c1 = Constraint(rule=con_rule)
+
+ m.var_1.fix(1)
+ m.var_2.fix(1)
+ m.var_3.fix(1)
+
+ self.assertTrue(is_fixed(m.c1.body))
+ self.assertEqual(polynomial_degree(m.c1.body), 0)
+
+ def test_LinearExpression_is_fixed(self):
+ m = ConcreteModel()
+ m.S = RangeSet(2)
+ m.var_1 = Var(initialize=0)
+ m.var_2 = Var(initialize=0)
+ m.var_3 = Var(m.S, initialize=0)
+
+ def con_rule(model):
+ return model.var_1 - (model.var_2 + sum_product(defaultdict(lambda: 6), model.var_3)) <= 0
+
+ m.c1 = Constraint(rule=con_rule)
+
+ m.var_1.fix(1)
+ m.var_2.fix(1)
+
+ self.assertFalse(is_fixed(m.c1.body))
+ self.assertEqual(polynomial_degree(m.c1.body), 1)
+
+
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_param.py b/pyomo/core/tests/unit/test_param.py
index 56c8c333710..7c079694200 100644
--- a/pyomo/core/tests/unit/test_param.py
+++ b/pyomo/core/tests/unit/test_param.py
@@ -26,6 +26,7 @@
import pyutilib.th as unittest
from pyomo.environ import *
+from pyomo.common.log import LoggingIntercept
from pyomo.core.base.param import _NotValid
from six import iteritems, itervalues, StringIO
@@ -729,11 +730,18 @@ def B_init(model, i, ii, iii, j):
return 2+i
return -(2+i)
self.model.B = Param(B_index, [True,False], initialize=B_init)
- try:
- self.instance = self.model.create_instance()
- self.fail("Expected ValueError because B_index returns a tuple")
- except ValueError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance = self.model.create_instance()
+ # self.fail("Expected ValueError because B_index returns a tuple")
+ # except ValueError:
+ # pass
+ self.instance = self.model.create_instance()
+ self.assertEqual(set(self.instance.B.keys()),set([(0,0,0,True),(2,4,4,True),(0,0,0,False),(2,4,4,False)]))
+ self.assertEqual(self.instance.B[0,0,0,True],2)
+ self.assertEqual(self.instance.B[0,0,0,False],-2)
+ self.assertEqual(self.instance.B[2,4,4,True],4)
+ self.assertEqual(self.instance.B[2,4,4,False],-4)
def test_index4(self):
self.model.A = Set(initialize=range(0,4))
@@ -1043,7 +1051,7 @@ def test_io8(self):
self.model.A=Set()
self.model.B=Param(self.model.A)
self.instance = self.model.create_instance("param.dat")
- self.assertEqual( self.instance.A.data(), set(['A','B','C']) )
+ self.assertEqual( set(self.instance.A.data()), set(['A','B','C']) )
def test_io9(self):
OUTPUT=open("param.dat","w")
@@ -1169,6 +1177,58 @@ def rule(model, i):
return 0.0
model.p = Param(model.A, initialize=rule)
+ def test_param_validate(self):
+ """Test Param `validate` and `within` throw ValueError when not valid.
+
+ The `within` argument will catch the ValueError, log extra information
+ with of an "ERROR" message, and reraise the ValueError.
+
+ 1. Immutable Param (unindexed)
+ 2. Immutable Param (indexed)
+ 3. Immutable Param (arbitrary validation rule)
+ 4. Mutable Param (unindexed)
+ 5. Mutable Param (indexed)
+ 6. Mutable Param (arbitrary validation rule)
+ """
+ def validation_rule(model, value):
+ """Arbitrary validation rule that always returns False."""
+ return False
+
+ # 1. Immutable Param (unindexed)
+ with self.assertRaisesRegex(ValueError, "Value not in parameter domain"):
+ m = ConcreteModel()
+ m.p1 = Param(initialize=-3, within=NonNegativeReals)
+
+ # 2. Immutable Param (indexed)
+ with self.assertRaisesRegex(ValueError, "Value not in parameter domain"):
+ m = ConcreteModel()
+ m.A = RangeSet(1, 2)
+ m.p2 = Param(m.A, initialize=-3, within=NonNegativeReals)
+
+ # 3. Immutable Param (arbitrary validation rule)
+ with self.assertRaisesRegex(ValueError, "Invalid parameter value"):
+ m = ConcreteModel()
+ m.p5 = Param(initialize=1, validate=validation_rule)
+
+ # 4. Mutable Param (unindexed)
+ with self.assertRaisesRegex(ValueError, "Value not in parameter domain"):
+ m = ConcreteModel()
+ m.p3 = Param(within=NonNegativeReals, mutable=True)
+ m.p3 = -3
+
+ # 5. Mutable Param (indexed)
+ with self.assertRaisesRegex(ValueError, "Value not in parameter domain"):
+ m = ConcreteModel()
+ m.A = RangeSet(1, 2)
+ m.p4 = Param(m.A, within=NonNegativeReals, mutable=True)
+ m.p4[1] = -3
+
+ # 6. Mutable Param (arbitrary validation rule)
+ with self.assertRaisesRegex(ValueError, "Invalid parameter value"):
+ m = ConcreteModel()
+ m.p6 = Param(mutable=True, validate=validation_rule)
+ m.p6 = 1
+
def test_get_uninitialized(self):
model=AbstractModel()
model.a = Param()
@@ -1293,6 +1353,26 @@ def test_nonnumeric(self):
a : b
""".strip())
+ def test_domain_deprecation(self):
+ m = ConcreteModel()
+ log = StringIO()
+ with LoggingIntercept(log, 'pyomo.core'):
+ m.p = Param(mutable=True)
+ m.p = 10
+ self.assertEqual(log.getvalue(), "")
+ self.assertEqual(value(m.p), 10)
+
+ with LoggingIntercept(log, 'pyomo.core'):
+ m.p = 'a'
+ self.assertIn(
+ "DEPRECATED: The default domain for Param objects is 'Any'",
+ log.getvalue())
+ self.assertIn(
+ "domain of this Param (p) to be 'Any'",
+ log.getvalue())
+ self.assertEqual(value(m.p), 'a')
+
+
def createNonIndexedParamMethod(func, init_xy, new_xy, tol=1e-10):
def testMethod(self):
diff --git a/pyomo/core/tests/unit/test_pickle.py b/pyomo/core/tests/unit/test_pickle.py
index ab4005413a7..74a58a10ea8 100644
--- a/pyomo/core/tests/unit/test_pickle.py
+++ b/pyomo/core/tests/unit/test_pickle.py
@@ -290,7 +290,6 @@ def test_pickle2(self):
# verifies that the use of lambda expressions as rules yields model instances
# that are not pickle'able.
- @unittest.skipIf(sys.version_info[:2] < (2,6), "Skipping test because the sparse_dict repn is not supported")
def test_pickle3(self):
def rule1(model):
return (1,model.x+model.y[1],2)
diff --git a/pyomo/core/tests/unit/test_pickle4_baseline.txt b/pyomo/core/tests/unit/test_pickle4_baseline.txt
index 27717ceb9fe..a32fb17bb39 100644
--- a/pyomo/core/tests/unit/test_pickle4_baseline.txt
+++ b/pyomo/core/tests/unit/test_pickle4_baseline.txt
@@ -1,6 +1,7 @@
1 Set Declarations
- s : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=(1, 2)
- [1, 2]
+ s : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {1, 2}
2 Var Declarations
x : Size=1, Index=None
diff --git a/pyomo/core/tests/unit/test_range.py b/pyomo/core/tests/unit/test_range.py
index d63fbcd24ad..a342375e07f 100644
--- a/pyomo/core/tests/unit/test_range.py
+++ b/pyomo/core/tests/unit/test_range.py
@@ -14,7 +14,7 @@
from pyomo.core.base.range import (
NumericRange as NR, NonNumericRange as NNR, RangeProduct as RP,
- AnyRange,
+ AnyRange, RangeDifferenceError
)
from pyomo.core.base.set import (
Any
@@ -27,6 +27,11 @@ def test_init(self):
self.assertIsNone(a.end)
self.assertEqual(a.step, 0)
+ a = NR(-float('inf'), float('inf'), 0)
+ self.assertIsNone(a.start)
+ self.assertIsNone(a.end)
+ self.assertEqual(a.step, 0)
+
a = NR(0, None, 0)
self.assertEqual(a.start, 0)
self.assertIsNone(a.end)
@@ -408,6 +413,10 @@ def test_issubset(self):
self.assertFalse(NR(10, 0, -2).issubset(NR(10, 0, -4)))
self.assertTrue(NR(10, 0, -2).issubset(NR(10, 0, -1)))
+ # Scalar-discrete
+ self.assertTrue(NR(5, 5, 0).issubset(NR(0, 10, 1)))
+ self.assertFalse(NR(15, 15, 0).issubset(NR(0, 10, 1)))
+
def test_lcm(self):
self.assertEqual(
NR(None,None,0)._step_lcm((NR(0,1,0),)),
@@ -508,6 +517,10 @@ def test_range_difference(self):
NR(None,0,0).range_difference([NR(-5,0,0,'[)')]),
[NR(None,-5,0,'[)')],
)
+ self.assertEqual(
+ NR(0,10,0).range_difference([NR(None,5,0,'[)')]),
+ [NR(5,10,0,'[]')],
+ )
# Subtracting an open range from a closed range gives a closed
# range
self.assertEqual(
@@ -547,6 +560,12 @@ def test_range_difference(self):
a = NR(0.25, None, 1)
self.assertEqual(a.range_difference([NR(0.5, None, 1)]), [a])
+ # And the onee thing we don't support:
+ with self.assertRaisesRegex(
+ RangeDifferenceError, 'We do not support subtracting an '
+ 'infinite discrete range \[0:None\] from an infinite '
+ 'continuous range \[None..None\]'):
+ NR(None,None,0).range_difference([NR(0,None,1)])
def test_range_intersection(self):
self.assertEqual(
@@ -577,6 +596,10 @@ def test_range_intersection(self):
NR(5,10,0).range_intersection([NR(0,4,0)]),
[],
)
+ self.assertEqual(
+ NR(0,4,0).range_intersection([NNR('a')]),
+ [],
+ )
# test ranges running in the other direction
self.assertEqual(
@@ -652,6 +675,10 @@ def test_range_difference(self):
NR(0,None,1).range_difference([AnyRange()]),
[]
)
+ self.assertEqual(
+ AnyRange().range_difference([AnyRange()]),
+ []
+ )
def test_range_intersection(self):
self.assertEqual(
diff --git a/pyomo/core/tests/unit/test_reference.py b/pyomo/core/tests/unit/test_reference.py
index 39f9c69fbb9..883b2442f5a 100644
--- a/pyomo/core/tests/unit/test_reference.py
+++ b/pyomo/core/tests/unit/test_reference.py
@@ -20,12 +20,12 @@
from pyomo.environ import *
from pyomo.core.base.var import IndexedVar
-from pyomo.core.base.sets import _SetProduct, SetOf
+from pyomo.core.base.set import SetProduct, UnorderedSetOf
from pyomo.core.base.indexed_component import (
UnindexedComponent_set, IndexedComponent
)
from pyomo.core.base.reference import (
- _ReferenceDict, _ReferenceSet, Reference, _get_base_sets
+ _ReferenceDict, _ReferenceSet, Reference
)
@@ -384,10 +384,10 @@ def test_component_reference(self):
m.x = Var()
m.r = Reference(m.x)
- self.assertIs(m.r.type(), Var)
+ self.assertIs(m.r.ctype, Var)
self.assertIsNot(m.r.index_set(), m.x.index_set())
self.assertIs(m.x.index_set(), UnindexedComponent_set)
- self.assertIs(type(m.r.index_set()), SetOf)
+ self.assertIs(type(m.r.index_set()), UnorderedSetOf)
self.assertEqual(len(m.r), 1)
self.assertTrue(m.r.is_indexed())
self.assertIn(None, m.r)
@@ -398,10 +398,10 @@ def test_component_reference(self):
m.s = Reference(m.x[:])
- self.assertIs(m.s.type(), Var)
+ self.assertIs(m.s.ctype, Var)
self.assertIsNot(m.s.index_set(), m.x.index_set())
self.assertIs(m.x.index_set(), UnindexedComponent_set)
- self.assertIs(type(m.s.index_set()), SetOf)
+ self.assertIs(type(m.s.index_set()), UnorderedSetOf)
self.assertEqual(len(m.s), 1)
self.assertTrue(m.s.is_indexed())
self.assertIn(None, m.s)
@@ -413,7 +413,7 @@ def test_component_reference(self):
m.y = Var([1,2])
m.t = Reference(m.y)
- self.assertIs(m.t.type(), Var)
+ self.assertIs(m.t.ctype, Var)
self.assertIs(m.t.index_set(), m.y.index_set())
self.assertEqual(len(m.t), 2)
self.assertTrue(m.t.is_indexed())
@@ -444,7 +444,7 @@ def test_single_reference(self):
m.b[2].x = Var(bounds=(2,None))
m.r = Reference(m.b[:].x)
- self.assertIs(m.r.type(), Var)
+ self.assertIs(m.r.ctype, Var)
self.assertIs(m.r.index_set(), m.b.index_set())
self.assertEqual(len(m.r), 2)
self.assertEqual(m.r[1].lb, 1)
@@ -465,8 +465,8 @@ def b(b,i):
m.r = Reference(m.b[:].x[:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), _SetProduct)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIsInstance(m.r.index_set(), SetProduct)
self.assertIs(m.r.index_set().set_tuple[0], m.I)
self.assertIs(m.r.index_set().set_tuple[1], m.J)
self.assertEqual(len(m.r), 2*2)
@@ -490,8 +490,8 @@ def b(b,i):
m.r = Reference(m.b[:].x[:,:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), _SetProduct)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIsInstance(m.r.index_set(), SetProduct)
self.assertIs(m.r.index_set().set_tuple[0], m.I)
self.assertIs(m.r.index_set().set_tuple[1], m.J)
self.assertEqual(len(m.r), 2*2)
@@ -516,8 +516,8 @@ def b(b,i):
m.r = Reference(m.b[:].x[3,:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), SetOf)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIs(type(m.r.index_set()), UnorderedSetOf)
self.assertEqual(len(m.r), 2*1)
self.assertEqual(m.r[1,3].lb, 1)
self.assertEqual(m.r[2,3].lb, 2)
@@ -539,8 +539,8 @@ def b(b,i):
m.r = Reference(m.b[:].x[:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), SetOf)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIs(type(m.r.index_set()), UnorderedSetOf)
self.assertEqual(len(m.r), 2*2)
self.assertEqual(m.r[1,3].lb, 1)
self.assertEqual(m.r[2,4].lb, 2)
@@ -562,8 +562,8 @@ def b(b,i):
m.r = Reference(m.b[:].x[:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), SetOf)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIs(type(m.r.index_set()), UnorderedSetOf)
self.assertEqual(len(m.r), 2*2)
self.assertEqual(m.r[1,3].lb, 1)
self.assertEqual(m.r[2,4].lb, 2)
@@ -585,8 +585,8 @@ def test_nested_reference_nonuniform_index_size(self):
m.r = Reference(m.b[:].x[:,:])
- self.assertIs(m.r.type(), Var)
- self.assertIs(type(m.r.index_set()), SetOf)
+ self.assertIs(m.r.ctype, Var)
+ self.assertIs(type(m.r.index_set()), UnorderedSetOf)
self.assertEqual(len(m.r), 2*2*2)
self.assertEqual(m.r[1,3,3].lb, 1)
self.assertEqual(m.r[2,4,3].lb, 2)
@@ -606,10 +606,10 @@ def test_nested_scalars(self):
m.r = Reference(m.b[:].x[:])
self.assertEqual(len(m.r), 1)
self.assertEqual(m.r.index_set().dimen, 2)
- base_sets = list(_get_base_sets(m.r.index_set()))
+ base_sets = list(m.r.index_set().subsets())
self.assertEqual(len(base_sets), 2)
- self.assertIs(type(base_sets[0]), SetOf)
- self.assertIs(type(base_sets[1]), SetOf)
+ self.assertIs(type(base_sets[0]), UnorderedSetOf)
+ self.assertIs(type(base_sets[1]), UnorderedSetOf)
def test_ctype_detection(self):
m = ConcreteModel()
@@ -627,14 +627,14 @@ def test_ctype_detection(self):
m.y = Reference(m.b[:].y[...])
self.assertIs(type(m.y), IndexedVar)
- self.assertIs(m.y.type(), Var)
+ self.assertIs(m.y.ctype, Var)
m.y1 = Reference(m.b[:].y[...], ctype=None)
self.assertIs(type(m.y1), IndexedComponent)
- self.assertIs(m.y1.type(), IndexedComponent)
+ self.assertIs(m.y1.ctype, IndexedComponent)
m.z = Reference(m.b[:].z)
self.assertIs(type(m.z), IndexedComponent)
- self.assertIs(m.z.type(), IndexedComponent)
+ self.assertIs(m.z.ctype, IndexedComponent)
def test_reference_to_sparse(self):
m = ConcreteModel()
@@ -706,16 +706,52 @@ def b(b, i):
self.assertEqual(len(m.b), 1)
self.assertEqual(len(m.b[1].x), 3)
- # While (2,1) appears to be a valid member of the slice, because 2
- # was not in the Set when the Block rule fired, there is no
- # m.b[2] block data. Attempting to add m.xx[2,1] will correctly
- # instantiate the block and then promptly fail because we don't
- # automatically fire rules after construction.
- with self.assertRaisesRegexp(
- AttributeError, "'_BlockData' object has no attribute 'x'"):
- m.xx.add((2,1))
+ # While (2,2) appears to be a valid member of the slice, because
+ # 2 was not in the Set when the Block rule fired, there is no
+ # m.b[2] block data. Accessing m.xx[2,1] will construct the
+ # b[2] block data, fire the rule, and then add the new value to
+ # the Var x.
+ self.assertEqual(len(m.xx), 3)
+ m.xx[2,2] = 10
+ self.assertEqual(len(m.b), 2)
+ self.assertEqual(len(list(m.b[2].component_objects())), 1)
+ self.assertEqual(len(m.xx), 4)
+ self.assertIs(m.xx[2,2], m.b[2].x[2])
+ self.assertEqual(value(m.b[2].x[2]), 10)
+
+ def test_insert_var(self):
+ m = ConcreteModel()
+ m.T = Set(initialize=[1,5])
+ m.x = Var(m.T, initialize=lambda m,i: i)
+ @m.Block(m.T)
+ def b(b, i):
+ b.y = Var(initialize=lambda b: 10*b.index())
+ ref_x = Reference(m.x[:])
+ ref_y = Reference(m.b[:].y)
+
+ self.assertEqual(len(m.x), 2)
+ self.assertEqual(len(ref_x), 2)
self.assertEqual(len(m.b), 2)
- self.assertEqual(len(list(m.b[2].component_objects())), 0)
+ self.assertEqual(len(ref_y), 2)
+ self.assertEqual(value(ref_x[1]), 1)
+ self.assertEqual(value(ref_x[5]), 5)
+ self.assertEqual(value(ref_y[1]), 10)
+ self.assertEqual(value(ref_y[5]), 50)
+
+ m.T.add(2)
+ _x = ref_x[2]
+ self.assertEqual(len(m.x), 3)
+ self.assertIs(_x, m.x[2])
+ self.assertEqual(value(_x), 2)
+ self.assertEqual(value(m.x[2]), 2)
+ self.assertEqual(value(ref_x[2]), 2)
+
+ _y = ref_y[2]
+ self.assertEqual(len(m.b), 3)
+ self.assertIs(_y, m.b[2].y)
+ self.assertEqual(value(_y), 20)
+ self.assertEqual(value(ref_y[2]), 20)
+ self.assertEqual(value(m.b[2].y), 20)
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_set.py b/pyomo/core/tests/unit/test_set.py
index 330e2ea9362..32394da60d9 100644
--- a/pyomo/core/tests/unit/test_set.py
+++ b/pyomo/core/tests/unit/test_set.py
@@ -23,17 +23,18 @@
import pyutilib.th as unittest
-from pyomo.common.log import LoggingIntercept
from pyomo.common import DeveloperError
+from pyomo.common.dependencies import numpy as np, numpy_available
+from pyomo.common.log import LoggingIntercept
from pyomo.core.expr import native_numeric_types, native_types
import pyomo.core.base.set as SetModule
from pyomo.core.base.indexed_component import normalize_index
from pyomo.core.base.util import (
- ConstantInitializer, ItemInitializer,
+ ConstantInitializer, ItemInitializer, IndexedCallInitializer,
)
from pyomo.core.base.set import (
NumericRange as NR, NonNumericRange as NNR,
- AnyRange, _AnySet, Any, Binary,
+ AnyRange, _AnySet, Any, AnyWithNone, _EmptySet, EmptySet, Binary,
Reals, NonNegativeReals, PositiveReals, NonPositiveReals, NegativeReals,
Integers, PositiveIntegers, NegativeIntegers,
NonPositiveIntegers, NonNegativeIntegers,
@@ -53,8 +54,9 @@
SetProduct_OrderedSet,
_SetData, _FiniteSetData, _InsertionOrderSetData, _SortedSetData,
_FiniteSetMixin, _OrderedSetMixin,
- SetInitializer, SetIntersectInitializer, RangeSetInitializer,
- UnknownSetDimen,
+ SetInitializer, SetIntersectInitializer, BoundsInitializer,
+ UnknownSetDimen, UnindexedComponent_set,
+ DeclareGlobalSet, IntegerSet, RealSet,
simple_set_rule, set_options,
)
from pyomo.environ import (
@@ -62,11 +64,6 @@
Objective,
)
-try:
- import numpy as np
- numpy_available = True
-except ImportError:
- numpy_available = False
class Test_SetInitializer(unittest.TestCase):
def test_single_set(self):
@@ -111,9 +108,9 @@ def test_intersect(self):
self.assertIs(a(None,None), Reals)
a = SetInitializer(None)
- a.intersect(RangeSetInitializer(5))
+ a.intersect(BoundsInitializer(5, default_step=1))
self.assertIs(type(a), SetInitializer)
- self.assertIs(type(a._set), RangeSetInitializer)
+ self.assertIs(type(a._set), BoundsInitializer)
self.assertTrue(a.constant())
self.assertFalse(a.verified)
self.assertEqual(a(None,None), RangeSet(5))
@@ -144,11 +141,11 @@ def test_intersect(self):
a = SetInitializer(Reals)
a.intersect(SetInitializer(Integers))
- a.intersect(RangeSetInitializer(3))
+ a.intersect(BoundsInitializer(3, default_step=1))
self.assertIs(type(a), SetInitializer)
self.assertIs(type(a._set), SetIntersectInitializer)
self.assertIs(type(a._set._A), SetIntersectInitializer)
- self.assertIs(type(a._set._B), RangeSetInitializer)
+ self.assertIs(type(a._set._B), BoundsInitializer)
self.assertIs(a._set._A._A.val, Reals)
self.assertIs(a._set._A._B.val, Integers)
self.assertTrue(a.constant())
@@ -158,19 +155,21 @@ def test_intersect(self):
self.assertIs(type(s._sets[0]), SetIntersection_InfiniteSet)
self.assertIsInstance(s._sets[1], RangeSet)
+ p = Param(initialize=3)
a = SetInitializer(Reals)
a.intersect(SetInitializer(Integers))
- a.intersect(RangeSetInitializer(3, default_step=0))
+ a.intersect(BoundsInitializer(p, default_step=0))
self.assertIs(type(a), SetInitializer)
self.assertIs(type(a._set), SetIntersectInitializer)
self.assertIs(type(a._set._A), SetIntersectInitializer)
- self.assertIs(type(a._set._B), RangeSetInitializer)
+ self.assertIs(type(a._set._B), BoundsInitializer)
self.assertIs(a._set._A._A.val, Reals)
self.assertIs(a._set._A._B.val, Integers)
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertIs(type(s), SetIntersection_InfiniteSet)
+ p.construct()
s.construct()
self.assertIs(type(s), SetIntersection_OrderedSet)
self.assertIs(type(s._sets[0]), SetIntersection_InfiniteSet)
@@ -179,13 +178,14 @@ def test_intersect(self):
self.assertFalse(s._sets[1].isfinite())
self.assertTrue(s.isfinite())
+ p = Param(initialize=3)
a = SetInitializer(Reals)
a.intersect(SetInitializer({1:Integers}))
- a.intersect(RangeSetInitializer(3, default_step=0))
+ a.intersect(BoundsInitializer(p, default_step=0))
self.assertIs(type(a), SetInitializer)
self.assertIs(type(a._set), SetIntersectInitializer)
self.assertIs(type(a._set._A), SetIntersectInitializer)
- self.assertIs(type(a._set._B), RangeSetInitializer)
+ self.assertIs(type(a._set._B), BoundsInitializer)
self.assertIs(a._set._A._A.val, Reals)
self.assertIs(type(a._set._A._B), ItemInitializer)
self.assertFalse(a.constant())
@@ -194,6 +194,7 @@ def test_intersect(self):
a(None,None)
s = a(None,1)
self.assertIs(type(s), SetIntersection_InfiniteSet)
+ p.construct()
s.construct()
self.assertIs(type(s), SetIntersection_OrderedSet)
self.assertIs(type(s._sets[0]), SetIntersection_InfiniteSet)
@@ -202,50 +203,56 @@ def test_intersect(self):
self.assertFalse(s._sets[1].isfinite())
self.assertTrue(s.isfinite())
- def test_rangeset(self):
- a = RangeSetInitializer(5)
+ def test_boundsinit(self):
+ a = BoundsInitializer(5, default_step=1)
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(5))
- a = RangeSetInitializer((0,5))
+ a = BoundsInitializer((0,5), default_step=1)
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(0,5))
- a = RangeSetInitializer((0,5,2))
+ a = BoundsInitializer((0,5,2))
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(0,5,2))
- a = RangeSetInitializer(5, default_step=0)
+ a = BoundsInitializer(())
+ self.assertTrue(a.constant())
+ self.assertFalse(a.verified)
+ s = a(None,None)
+ self.assertEqual(s, RangeSet(None,None,0))
+
+ a = BoundsInitializer(5)
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(1,5,0))
- a = RangeSetInitializer((0,5), default_step=0)
+ a = BoundsInitializer((0,5))
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(0,5,0))
- a = RangeSetInitializer((0,5,2), default_step=0)
+ a = BoundsInitializer((0,5,2))
self.assertTrue(a.constant())
self.assertFalse(a.verified)
s = a(None,None)
self.assertEqual(s, RangeSet(0,5,2))
- a = RangeSetInitializer({1:5})
+ a = BoundsInitializer({1:5}, default_step=1)
self.assertFalse(a.constant())
self.assertFalse(a.verified)
s = a(None,1)
self.assertEqual(s, RangeSet(5))
- a = RangeSetInitializer({1:(0,5)})
+ a = BoundsInitializer({1:(0,5)}, default_step=1)
self.assertFalse(a.constant())
self.assertFalse(a.verified)
s = a(None,1)
@@ -262,7 +269,7 @@ def test_setdefault(self):
a.setdefault(Reals)
self.assertIs(a(None,None), Integers)
- a = RangeSetInitializer(5)
+ a = BoundsInitializer(5, default_step=1)
self.assertEqual(a(None,None), RangeSet(5))
a.setdefault(Reals)
self.assertEqual(a(None,None), RangeSet(5))
@@ -273,6 +280,48 @@ def test_setdefault(self):
a.setdefault(RangeSet(5))
self.assertIs(type(a(None,None)), SetIntersection_InfiniteSet)
+ def test_indices(self):
+ a = SetInitializer(None)
+ self.assertFalse(a.contains_indices())
+ with self.assertRaisesRegex(
+ RuntimeError, 'does not contain embedded indices'):
+ a.indices()
+
+ a = SetInitializer([1,2,3])
+ self.assertFalse(a.contains_indices())
+ with self.assertRaisesRegex(
+ RuntimeError, 'does not contain embedded indices'):
+ a.indices()
+
+ # intersection initializers
+ a = SetInitializer({1: [1,2,3], 2: [4]})
+ self.assertTrue(a.contains_indices())
+ self.assertEqual(list(a.indices()), [1,2])
+
+ a.intersect(SetInitializer({1: [4], 2: [1,2]}))
+ self.assertTrue(a.contains_indices())
+ self.assertEqual(list(a.indices()), [1,2])
+
+ # intersection initializer mismatch
+ a = SetInitializer({1: [1,2,3], 2: [4]})
+ self.assertTrue(a.contains_indices())
+ self.assertEqual(list(a.indices()), [1,2])
+
+ a.intersect(SetInitializer({1: [4], 3: [1,2]}))
+ self.assertTrue(a.contains_indices())
+ with self.assertRaisesRegex(
+ ValueError, 'contains two sub-initializers with inconsistent'):
+ a.indices()
+
+ # intersection initializer mismatch (unindexed)
+ a = SetInitializer([1,2])
+ self.assertFalse(a.contains_indices())
+ a.intersect(SetInitializer([1,2]))
+ self.assertFalse(a.contains_indices())
+ with self.assertRaisesRegex(
+ RuntimeError, 'does not contain embedded indices'):
+ a.indices()
+
class InfiniteSetTester(unittest.TestCase):
def test_Reals(self):
@@ -286,6 +335,35 @@ def test_Reals(self):
self.assertFalse(Reals.isdiscrete())
self.assertFalse(Reals.isfinite())
+ self.assertEqual(Reals.dim(), 0)
+ self.assertIs(Reals.index_set(), UnindexedComponent_set)
+ with self.assertRaisesRegex(
+ TypeError, ".*'GlobalSet' has no len"):
+ len(Reals)
+ with self.assertRaisesRegex(
+ TypeError, "'GlobalSet' object is not iterable "
+ "\(non-finite Set 'Reals' is not iterable\)"):
+ list(Reals)
+ self.assertEqual(list(Reals.ranges()), [NR(None,None,0)])
+ self.assertEqual(Reals.bounds(), (None,None))
+ self.assertEqual(Reals.dimen, 1)
+
+ tmp = RealSet()
+ self.assertFalse(tmp.isdiscrete())
+ self.assertFalse(tmp.isfinite())
+ self.assertEqual(Reals, tmp)
+ self.assertEqual(tmp, Reals)
+ tmp.clear()
+ self.assertEqual(EmptySet, tmp)
+ self.assertEqual(tmp, EmptySet)
+
+ self.assertEqual(tmp.domain, Reals)
+ self.assertEqual(str(Reals), 'Reals')
+ self.assertEqual(str(tmp), 'Reals')
+ b = ConcreteModel()
+ b.tmp = tmp
+ self.assertEqual(str(tmp), 'tmp')
+
def test_Integers(self):
self.assertIn(0, Integers)
self.assertNotIn(1.5, Integers)
@@ -297,6 +375,35 @@ def test_Integers(self):
self.assertTrue(Integers.isdiscrete())
self.assertFalse(Integers.isfinite())
+ self.assertEqual(Integers.dim(), 0)
+ self.assertIs(Integers.index_set(), UnindexedComponent_set)
+ with self.assertRaisesRegex(
+ TypeError, ".*'GlobalSet' has no len"):
+ len(Integers)
+ with self.assertRaisesRegex(
+ TypeError, "'GlobalSet' object is not iterable "
+ "\(non-finite Set 'Integers' is not iterable\)"):
+ list(Integers)
+ self.assertEqual(list(Integers.ranges()), [NR(0,None,1),NR(0,None,-1)])
+ self.assertEqual(Integers.bounds(), (None,None))
+ self.assertEqual(Integers.dimen, 1)
+
+ tmp = IntegerSet()
+ self.assertTrue(tmp.isdiscrete())
+ self.assertFalse(tmp.isfinite())
+ self.assertEqual(Integers, tmp)
+ self.assertEqual(tmp, Integers)
+ tmp.clear()
+ self.assertEqual(EmptySet, tmp)
+ self.assertEqual(tmp, EmptySet)
+
+ self.assertEqual(tmp.domain, Reals)
+ self.assertEqual(str(Integers), 'Integers')
+ self.assertEqual(str(tmp), 'Integers')
+ b = ConcreteModel()
+ b.tmp = tmp
+ self.assertEqual(str(tmp), 'tmp')
+
def test_Any(self):
self.assertIn(0, Any)
self.assertIn(1.5, Any)
@@ -308,6 +415,78 @@ def test_Any(self):
self.assertFalse(Any.isdiscrete())
self.assertFalse(Any.isfinite())
+ self.assertEqual(Any.dim(), 0)
+ self.assertIs(Any.index_set(), UnindexedComponent_set)
+ with self.assertRaisesRegex(
+ TypeError, ".*'Any' has no len"):
+ len(Any)
+ with self.assertRaisesRegex(
+ TypeError, "'GlobalSet' object is not iterable "
+ "\(non-finite Set 'Any' is not iterable\)"):
+ list(Any)
+ self.assertEqual(list(Any.ranges()), [AnyRange()])
+ self.assertEqual(Any.bounds(), (None,None))
+ self.assertEqual(Any.dimen, None)
+
+ tmp = _AnySet()
+ self.assertFalse(tmp.isdiscrete())
+ self.assertFalse(tmp.isfinite())
+ self.assertEqual(Any, tmp)
+ tmp.clear()
+ self.assertEqual(Any, tmp)
+
+ self.assertEqual(tmp.domain, Any)
+ self.assertEqual(str(Any), 'Any')
+ self.assertEqual(str(tmp), '_AnySet')
+ b = ConcreteModel()
+ b.tmp = tmp
+ self.assertEqual(str(tmp), 'tmp')
+
+ def test_AnyWithNone(self):
+ os = StringIO()
+ with LoggingIntercept(os, 'pyomo'):
+ self.assertIn(None, AnyWithNone)
+ self.assertIn(1, AnyWithNone)
+ self.assertRegexpMatches(
+ os.getvalue(),
+ "^DEPRECATED: The AnyWithNone set is deprecated")
+
+ self.assertEqual(Any, AnyWithNone)
+ self.assertEqual(AnyWithNone, Any)
+
+ def test_EmptySet(self):
+ self.assertNotIn(0, EmptySet)
+ self.assertNotIn(1.5, EmptySet)
+ self.assertNotIn(100, EmptySet),
+ self.assertNotIn(-100, EmptySet),
+ self.assertNotIn('A', EmptySet)
+ self.assertNotIn(None, EmptySet)
+
+ self.assertTrue(EmptySet.isdiscrete())
+ self.assertTrue(EmptySet.isfinite())
+
+ self.assertEqual(EmptySet.dim(), 0)
+ self.assertIs(EmptySet.index_set(), UnindexedComponent_set)
+ self.assertEqual(len(EmptySet), 0)
+ self.assertEqual(list(EmptySet), [])
+ self.assertEqual(list(EmptySet.ranges()), [])
+ self.assertEqual(EmptySet.bounds(), (None,None))
+ self.assertEqual(EmptySet.dimen, 0)
+
+ tmp = _EmptySet()
+ self.assertTrue(tmp.isdiscrete())
+ self.assertTrue(tmp.isfinite())
+ self.assertEqual(EmptySet, tmp)
+ tmp.clear()
+ self.assertEqual(EmptySet, tmp)
+
+ self.assertEqual(tmp.domain, EmptySet)
+ self.assertEqual(str(EmptySet), 'EmptySet')
+ self.assertEqual(str(tmp), '_EmptySet')
+ b = ConcreteModel()
+ b.tmp = tmp
+ self.assertEqual(str(tmp), 'tmp')
+
@unittest.skipIf(not numpy_available, "NumPy required for these tests")
def test_numpy_compatible(self):
self.assertIn(np.intc(1), Reals)
@@ -361,6 +540,17 @@ def test_relational_operators(self):
self.assertFalse(PositiveIntegers.issuperset(Integers))
self.assertFalse(PositiveIntegers.isdisjoint(Integers))
+ # Special case: cleared non-finite rangesets
+ tmp = IntegerSet()
+ tmp.clear()
+ self.assertTrue(tmp.issubset(EmptySet))
+ self.assertTrue(tmp.issuperset(EmptySet))
+ self.assertTrue(tmp.isdisjoint(EmptySet))
+
+ self.assertTrue(EmptySet.issubset(tmp))
+ self.assertTrue(EmptySet.issuperset(tmp))
+ self.assertTrue(EmptySet.isdisjoint(tmp))
+
def test_equality(self):
self.assertEqual(Any, Any)
@@ -645,25 +835,28 @@ def test_constructor(self):
self.assertEqual(i, j)
i = RangeSet(3)
+ self.assertTrue(i.is_constructed())
self.assertEqual(len(i), 3)
self.assertEqual(len(list(i.ranges())), 1)
i = RangeSet(1,3)
+ self.assertTrue(i.is_constructed())
self.assertEqual(len(i), 3)
self.assertEqual(len(list(i.ranges())), 1)
i = RangeSet(ranges=[NR(1,3,1)])
+ self.assertTrue(i.is_constructed())
self.assertEqual(len(i), 3)
self.assertEqual(list(i.ranges()), [NR(1,3,1)])
i = RangeSet(1,3,0)
with self.assertRaisesRegexp(
- TypeError, ".*'InfiniteSimpleRangeSet' has no len()"):
+ TypeError, ".*'InfiniteSimpleRangeSet' has no len"):
len(i)
self.assertEqual(len(list(i.ranges())), 1)
with self.assertRaisesRegexp(
- TypeError, ".*'GlobalSet' has no len()"):
+ TypeError, ".*'GlobalSet' has no len"):
len(Integers)
self.assertEqual(len(list(Integers.ranges())), 2)
@@ -677,17 +870,45 @@ def test_constructor(self):
"NumericRange objects"):
RangeSet(ranges=(NR(1,5,1), NNR('a')))
+ with self.assertRaisesRegexp(
+ ValueError, "Constructing a finite RangeSet over a "
+ "non-finite range "):
+ RangeSet(finite=True, ranges=(NR(1,5,0),))
+
+ with self.assertRaisesRegexp(
+ ValueError, "RangeSet does not support unbounded ranges "
+ "with a non-integer step"):
+ RangeSet(0,None,0.5)
+
+ class _AlmostNumeric(object):
+ def __init__(self, val):
+ self.val = val
+ def __float__(self):
+ return self.val
+ def __add__(self, other):
+ return self.val+other
+ def __sub__(self, other):
+ return self.val-other
+
+ i = RangeSet(_AlmostNumeric(1))
+ self.assertFalse(i.is_constructed())
+ i.construct()
+ self.assertEqual(list(i), [1])
+
output = StringIO()
p = Param(initialize=5)
+ i = RangeSet(p)
+ self.assertFalse(i.is_constructed())
+ self.assertIs(type(i), AbstractFiniteSimpleRangeSet)
p.construct()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
- i = RangeSet(p)
- self.assertIs(type(i), AbstractFiniteSimpleRangeSet)
self.assertEqual(output.getvalue(), "")
i.construct()
ref = 'Constructing RangeSet, '\
- 'name=AbstractFiniteSimpleRangeSet, from data=None\n'
+ 'name=FiniteSimpleRangeSet, from data=None\n'
self.assertEqual(output.getvalue(), ref)
+ self.assertTrue(i.is_constructed())
+ self.assertIs(type(i), FiniteSimpleRangeSet)
# Calling construct() twice bypasses construction the second
# time around
i.construct()
@@ -710,10 +931,57 @@ def test_constructor(self):
self.assertEqual(len(i), 0)
self.assertEqual(len(list(i.ranges())), 0)
+ # Special case: we do not error when the constructing a 0-length
+ # RangeSetwith bounds (i, i-1)
i = RangeSet(0,-1)
self.assertEqual(len(i), 0)
self.assertEqual(len(list(i.ranges())), 0)
+ # Test non-finite RangeSets
+ i = RangeSet(1,10)
+ self.assertIs(type(i), FiniteSimpleRangeSet)
+ i = RangeSet(1,10,0)
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+ i = RangeSet(1,1,0)
+ self.assertIs(type(i), FiniteSimpleRangeSet)
+ j = RangeSet(1, float('inf'))
+ self.assertIs(type(j), InfiniteSimpleRangeSet)
+ i = RangeSet(1,None)
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+ self.assertEqual(i,j)
+ self.assertIn(1, i)
+ self.assertIn(100, i)
+ self.assertNotIn(0, i)
+ self.assertNotIn(1.5, i)
+ i = RangeSet(None,1)
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+ self.assertIn(1, i)
+ self.assertNotIn(100, i)
+ self.assertIn(0, i)
+ self.assertNotIn(0.5, i)
+ i = RangeSet(None,None)
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+ self.assertIn(1, i)
+ self.assertIn(100, i)
+ self.assertIn(0, i)
+ self.assertNotIn(0.5, i)
+
+ i = RangeSet(None,None,bounds=(-5,10))
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+ self.assertIn(10, i)
+ self.assertNotIn(11, i)
+ self.assertIn(-5, i)
+ self.assertNotIn(-6, i)
+ self.assertNotIn(0.5, i)
+
+ p = Param(initialize=float('inf'))
+ i = RangeSet(1, p, 1)
+ self.assertIs(type(i), AbstractFiniteSimpleRangeSet)
+ p.construct()
+ i = RangeSet(1, p, 1)
+ self.assertIs(type(i), InfiniteSimpleRangeSet)
+
+
# Test abstract RangeSets
m = AbstractModel()
m.p = Param()
@@ -739,6 +1007,76 @@ def test_constructor(self):
data={None: {'p': {None: 1}, 'q': {None: 5}, 's': {None: 1},
'i': {None: [1,2,3]} }})
+ def test_filter(self):
+ def rFilter(m, i):
+ return i % 2
+ # Simple filter (beginning with the *first* element)
+ r = RangeSet(10, filter=rFilter)
+ self.assertEqual(r, [1,3,5,7,9])
+
+ # Nothing to remove
+ r = RangeSet(1, filter=rFilter)
+ self.assertEqual(r, [1])
+
+ # Remove the only element in the range
+ r = RangeSet(2,2, filter=rFilter)
+ self.assertEqual(r, [])
+
+ # remove the *second* element in the range
+ r = RangeSet(2,3, filter=rFilter)
+ self.assertEqual(r, [3])
+
+ # Test a filter that doesn't raise an exception for "None"
+ def rFilter(m, i):
+ return i is None or i % 2
+ r = RangeSet(10, filter=rFilter)
+ self.assertEqual(r, [1,3,5,7,9])
+
+ with self.assertRaisesRegexp(
+ ValueError, "The 'filter' keyword argument is not "
+ "valid for non-finite RangeSet component"):
+ r = RangeSet(1,10,0, filter=rFilter)
+
+ def test_validate(self):
+ def rFilter(m, i):
+ self.assertIs(m, None)
+ return i % 2
+ # Simple validation
+ r = RangeSet(1,10,2, validate=rFilter)
+ self.assertEqual(r, [1,3,5,7,9])
+
+ # Failed validation
+ with self.assertRaisesRegexp(
+ ValueError, "The value=2 violates the validation rule"):
+ r = RangeSet(10, validate=rFilter)
+
+ # Test a validation that doesn't raise an exception for "None"
+ def rFilter(m, i):
+ return i is None or i % 2
+ r = RangeSet(1,10,2, validate=rFilter)
+ self.assertEqual(r, [1,3,5,7,9])
+
+ with self.assertRaisesRegexp(
+ ValueError, "The 'validate' keyword argument is not "
+ "valid for non-finite RangeSet component"):
+ r = RangeSet(1,10,0, validate=rFilter)
+
+ def badRule(m, i):
+ raise RuntimeError("ERROR: %s" % i)
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ with self.assertRaisesRegexp(
+ RuntimeError, "ERROR: 1"):
+ r = RangeSet(10, validate=badRule)
+ self.assertEqual(
+ output.getvalue(),
+ "Exception raised while validating element "
+ "'1' for Set FiniteSimpleRangeSet\n")
+
+ def test_bounds(self):
+ r = RangeSet(100, bounds=(2.5, 5.5))
+ self.assertEqual(r, [3,4,5])
+
def test_contains(self):
r = RangeSet(5)
self.assertIn(1, r)
@@ -810,6 +1148,9 @@ def __len__(self):
return len(self.data)
self.assertEqual(SetOf({1,3,5}), _NonIterable())
+ # Test types that cannot be case to set
+ self.assertNotEqual(SetOf({3,}), 3)
+
def test_inequality(self):
self.assertTrue(SetOf([1,2,3]) <= SetOf({1,2,3}))
self.assertFalse(SetOf([1,2,3]) < SetOf({1,2,3}))
@@ -873,7 +1214,6 @@ def test_pprint(self):
m.J = SetOf([1,2,3])
buf = StringIO()
- m.pprint()
m.pprint(ostream=buf)
self.assertEqual(buf.getvalue().strip(), """
4 RangeSet Declarations
@@ -927,15 +1267,27 @@ def test_naming(self):
m.a = Param(initialize=3)
o = RangeSet(m.a)
- self.assertEqual(str(o), "AbstractFiniteSimpleRangeSet")
+ self.assertEqual(str(o), "[1:3]")
m.O = o
self.assertEqual(str(o), "O")
p = RangeSet(m.a, finite=False)
- self.assertEqual(str(p), "AbstractInfiniteSimpleRangeSet")
+ self.assertEqual(str(p), "[1:3]")
m.P = p
self.assertEqual(str(p), "P")
+ b = Param(initialize=3)
+ oo = RangeSet(b)
+ self.assertEqual(str(oo), "AbstractFiniteSimpleRangeSet")
+ pp = RangeSet(b, finite=False)
+ self.assertEqual(str(pp), "AbstractInfiniteSimpleRangeSet")
+
+ b.construct()
+ m.OO = oo
+ self.assertEqual(str(oo), "OO")
+ m.PP = pp
+ self.assertEqual(str(pp), "PP")
+
def test_isdisjoint(self):
i = SetOf({1,2,3})
self.assertTrue(i.isdisjoint({4,5,6}))
@@ -977,6 +1329,11 @@ def __len__(self):
self.assertTrue(SetOf({2,4}).isdisjoint(_NonIterable()))
self.assertFalse(SetOf({2,3,4}).isdisjoint(_NonIterable()))
+ # test bad type
+ with self.assertRaisesRegexp(
+ TypeError, "'int' object is not iterable"):
+ i.isdisjoint(1)
+
def test_issubset(self):
i = SetOf({1,2,3})
self.assertTrue(i.issubset({1,2,3,4}))
@@ -1019,6 +1376,11 @@ def __len__(self):
self.assertTrue(SetOf({1,5}).issubset(_NonIterable()))
self.assertFalse(SetOf({1,3,4}).issubset(_NonIterable()))
+ # test bad type
+ with self.assertRaisesRegexp(
+ TypeError, "'int' object is not iterable"):
+ i.issubset(1)
+
def test_issuperset(self):
i = SetOf({1,2,3})
self.assertTrue(i.issuperset({1,2}))
@@ -1062,6 +1424,11 @@ def __len__(self):
with self.assertRaisesRegexp(TypeError, 'not iterable'):
SetOf({1,3,4,5}).issuperset(_NonIterable())
+ # test bad type
+ with self.assertRaisesRegexp(
+ TypeError, "'int' object is not iterable"):
+ i.issuperset(1)
+
def test_unordered_setof(self):
i = SetOf({1,3,2,0})
@@ -1343,16 +1710,33 @@ def test_float_steps(self):
"step direction \(got \[0:4:-0.5\]\)"):
RangeSet(0,4,-.5)
+ def test_check_values(self):
+ m = ConcreteModel()
+ m.I = RangeSet(5)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
+ self.assertTrue(m.I.check_values())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: check_values\(\) is deprecated:")
+
+
class Test_SetOperator(unittest.TestCase):
def test_construct(self):
- a = RangeSet(3)
+ p = Param(initialize=3)
+ a = RangeSet(p)
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
i = a * a
self.assertEqual(output.getvalue(), "")
+ p.construct()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
i.construct()
ref = 'Constructing SetOperator, name=SetProduct_OrderedSet, '\
'from data=None\n' \
+ 'Constructing RangeSet, name=FiniteSimpleRangeSet, '\
+ 'from data=None\n'\
'Constructing Set, name=SetProduct_OrderedSet, '\
'from data=None\n'
self.assertEqual(output.getvalue(), ref)
@@ -1361,6 +1745,28 @@ def test_construct(self):
i.construct()
self.assertEqual(output.getvalue(), ref)
+ def test_deepcopy(self):
+ # This tests the example in Set.__deepcopy__()
+ # This also tests that returning Set.Skip from a rule works...
+ a = AbstractModel()
+ a.A = Set(initialize=[1,2])
+ a.B = Set(initialize=[3,4])
+ def x_init(m,i):
+ if i == 2:
+ return Set.Skip
+ else:
+ return []
+ a.x = Set( [1,2],
+ domain={1: a.A*a.B, 2: a.A*a.A},
+ initialize=x_init )
+
+ i = a.create_instance()
+ self.assertEqual(len(i.x), 1)
+ self.assertIn(1, i.x)
+ self.assertNotIn(2, i.x)
+ self.assertEqual(i.x[1].dimen, 2)
+ self.assertEqual(i.x[1].domain, i.A*i.B)
+ self.assertEqual(i.x[1], [])
class TestSetUnion(unittest.TestCase):
def test_pickle(self):
@@ -1418,8 +1824,8 @@ def test_domain_and_pprint(self):
m.A.pprint(ostream=output)
ref="""
A : Size=1, Index=None, Ordered=True
- Key : Dimen : Domain : Size : Members
- None : 1 : I | {3, 4} : 4 : {1, 2, 3, 4}
+ Key : Dimen : Domain : Size : Members
+ None : 1 : I | A_index_0 : 4 : {1, 2, 3, 4}
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -1672,8 +2078,8 @@ def test_domain_and_pprint(self):
m.A.pprint(ostream=output)
ref="""
A : Size=1, Index=None, Ordered=True
- Key : Dimen : Domain : Size : Members
- None : 1 : I & {3, 4} : 0 : {}
+ Key : Dimen : Domain : Size : Members
+ None : 1 : I & A_index_0 : 0 : {}
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -1847,35 +2253,68 @@ def test_infinite_setintersection(self):
def test_odd_intersections(self):
# Test the intersection of an infinite discrete range with a
# finite continuous one
- a = RangeSet(0, None, 2)
- b = RangeSet(5,10,0)
- m = ConcreteModel()
- x = a & b
- self.assertIs(type(x), SetIntersection_InfiniteSet)
- m.X = x
- self.assertIs(type(x), SetIntersection_OrderedSet)
- self.assertEqual(list(x), [6,8,10])
-
- self.assertEqual(x.ord(6), 1)
- self.assertEqual(x.ord(8), 2)
- self.assertEqual(x.ord(10), 3)
-
- self.assertEqual(x[1], 6)
- self.assertEqual(x[2], 8)
- self.assertEqual(x[3], 10)
+ m = AbstractModel()
+ m.p = Param(initialize=0)
+ m.a = RangeSet(0, None, 2)
+ m.b = RangeSet(5,10,m.p, finite=False)
+ m.x = m.a & m.b
+ self.assertTrue(m.a._constructed)
+ self.assertFalse(m.b._constructed)
+ self.assertFalse(m.x._constructed)
+ self.assertIs(type(m.x), SetIntersection_InfiniteSet)
+ i = m.create_instance()
+ self.assertIs(type(i.x), SetIntersection_OrderedSet)
+ self.assertEqual(list(i.x), [6,8,10])
+
+ self.assertEqual(i.x.ord(6), 1)
+ self.assertEqual(i.x.ord(8), 2)
+ self.assertEqual(i.x.ord(10), 3)
+
+ self.assertEqual(i.x[1], 6)
+ self.assertEqual(i.x[2], 8)
+ self.assertEqual(i.x[3], 10)
with self.assertRaisesRegexp(
IndexError,
- "X index out of range"):
- x[4]
+ "x index out of range"):
+ i.x[4]
- self.assertEqual(x[-3], 6)
- self.assertEqual(x[-2], 8)
- self.assertEqual(x[-1], 10)
+ self.assertEqual(i.x[-3], 6)
+ self.assertEqual(i.x[-2], 8)
+ self.assertEqual(i.x[-1], 10)
with self.assertRaisesRegexp(
IndexError,
- "X index out of range"):
- x[-4]
+ "x index out of range"):
+ i.x[-4]
+ def test_subsets(self):
+ a = SetOf([1])
+ b = SetOf([1])
+ c = SetOf([1])
+ d = SetOf([1])
+
+ x = a & b
+ self.assertEqual(len(x._sets), 2)
+ self.assertEqual(list(x.subsets()), [x])
+ self.assertEqual(list(x.subsets(False)), [x])
+ self.assertEqual(list(x.subsets(True)), [a,b])
+ x = a & b & c
+ self.assertEqual(len(x._sets), 2)
+ self.assertEqual(list(x.subsets()), [x])
+ self.assertEqual(list(x.subsets(False)), [x])
+ self.assertEqual(list(x.subsets(True)), [a,b,c])
+ x = (a & b) & (c & d)
+ self.assertEqual(len(x._sets), 2)
+ self.assertEqual(list(x.subsets()), [x])
+ self.assertEqual(list(x.subsets(False)), [x])
+ self.assertEqual(list(x.subsets(True)), [a,b,c,d])
+
+ x = (a & b) * (c & d)
+ self.assertEqual(len(x._sets), 2)
+ self.assertEqual(len(list(x.subsets())), 2)
+ self.assertEqual(list(x.subsets()), [a&b, c&d])
+ self.assertEqual(list(x.subsets(False)), [a&b, c&d])
+ self.assertEqual(len(list(x.subsets(True))), 4)
+ self.assertEqual(list(x.subsets(True)), [a,b,c,d])
class TestSetDifference(unittest.TestCase):
@@ -1922,8 +2361,8 @@ def test_domain_and_pprint(self):
m.A.pprint(ostream=output)
ref="""
A : Size=1, Index=None, Ordered=True
- Key : Dimen : Domain : Size : Members
- None : 1 : I - {3, 4} : 2 : {1, 2}
+ Key : Dimen : Domain : Size : Members
+ None : 1 : I - A_index_0 : 2 : {1, 2}
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -2124,8 +2563,8 @@ def test_domain_and_pprint(self):
m.A.pprint(ostream=output)
ref="""
A : Size=1, Index=None, Ordered=True
- Key : Dimen : Domain : Size : Members
- None : 1 : I ^ {3, 4} : 4 : {1, 2, 3, 4}
+ Key : Dimen : Domain : Size : Members
+ None : 1 : I ^ A_index_0 : 4 : {1, 2, 3, 4}
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -2379,8 +2818,8 @@ def test_domain_and_pprint(self):
m.A.pprint(ostream=output)
ref="""
A : Size=1, Index=None, Ordered=True
- Key : Dimen : Domain : Size : Members
- None : 2 : I*{3, 4} : 4 : {(1, 3), (1, 4), (2, 3), (2, 4)}
+ Key : Dimen : Domain : Size : Members
+ None : 2 : I*A_index_0 : 4 : {(1, 3), (1, 4), (2, 3), (2, 4)}
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -2442,7 +2881,7 @@ def test_cutPointGenerator(self):
]
self.assertEqual(test, ref)
- def test_flatten_cross_product(self):
+ def test_subsets(self):
a = SetOf([1])
b = SetOf([1])
c = SetOf([1])
@@ -2450,13 +2889,38 @@ def test_flatten_cross_product(self):
x = a * b
self.assertEqual(len(x._sets), 2)
- self.assertEqual(list(x.flatten_cross_product()), [a,b])
+ self.assertEqual(list(x.subsets()), [a,b])
+ self.assertEqual(list(x.subsets(True)), [a,b])
+ self.assertEqual(list(x.subsets(False)), [a,b])
x = a * b * c
self.assertEqual(len(x._sets), 2)
- self.assertEqual(list(x.flatten_cross_product()), [a,b,c])
+ self.assertEqual(list(x.subsets()), [a,b,c])
+ self.assertEqual(list(x.subsets(True)), [a,b,c])
+ self.assertEqual(list(x.subsets(False)), [a,b,c])
x = (a * b) * (c * d)
self.assertEqual(len(x._sets), 2)
- self.assertEqual(list(x.flatten_cross_product()), [a,b,c,d])
+ self.assertEqual(list(x.subsets()), [a,b,c,d])
+ self.assertEqual(list(x.subsets(True)), [a,b,c,d])
+ self.assertEqual(list(x.subsets(False)), [a,b,c,d])
+
+ x = (a - b) * (c * d)
+ self.assertEqual(len(x._sets), 2)
+ self.assertEqual(len(list(x.subsets())), 3)
+ self.assertEqual(len(list(x.subsets(False))), 3)
+ self.assertEqual(list(x.subsets()), [(a-b),c,d])
+ self.assertEqual(len(list(x.subsets(True))), 4)
+ self.assertEqual(list(x.subsets(True)), [a,b,c,d])
+
+ def test_set_tuple(self):
+ a = SetOf([1])
+ b = SetOf([1])
+ x = a * b
+ os = StringIO()
+ with LoggingIntercept(os, 'pyomo'):
+ self.assertEqual(x.set_tuple, [a,b])
+ self.assertRegexpMatches(
+ os.getvalue(),
+ '^DEPRECATED: SetProduct.set_tuple is deprecated.')
def test_no_normalize_index(self):
try:
@@ -2716,6 +3180,73 @@ def test_ordered_nondim_setproduct(self):
self.assertEqual(x.ord((1, 2, (3, 4), 0)), 3)
self.assertEqual(x.ord((1, 2, 3, 4, 0)), 3)
+ def test_setproduct_construct_data(self):
+ m = AbstractModel()
+ m.I = Set(initialize=[1,2])
+ m.J = m.I * m.I
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ m.create_instance(
+ data={None:{'J': {None: [(1,1),(1,2),(2,1),(2,2)]}}})
+ self.assertRegexpMatches(
+ output.getvalue().replace('\n',' '),
+ "^DEPRECATED: Providing construction data to SetOperator objects "
+ "is deprecated")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ with self.assertRaisesRegexp(
+ ValueError, "Constructing SetOperator J with "
+ "incompatible data \(data=\{None: \[\(1, 1\), \(1, 2\), "
+ "\(2, 1\)\]\}"):
+ m.create_instance(
+ data={None:{'J': {None: [(1,1),(1,2),(2,1)]}}})
+ self.assertRegexpMatches(
+ output.getvalue().replace('\n',' '),
+ "^DEPRECATED: Providing construction data to SetOperator objects "
+ "is deprecated")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ with self.assertRaisesRegexp(
+ ValueError, "Constructing SetOperator J with "
+ "incompatible data \(data=\{None: \[\(1, 3\), \(1, 2\), "
+ "\(2, 1\), \(2, 2\)\]\}"):
+ m.create_instance(
+ data={None:{'J': {None: [(1,3),(1,2),(2,1),(2,2)]}}})
+ self.assertRegexpMatches(
+ output.getvalue().replace('\n',' '),
+ "^DEPRECATED: Providing construction data to SetOperator objects "
+ "is deprecated")
+
+ def test_setproduct_nondim_set(self):
+ m = ConcreteModel()
+ m.I = Set(initialize=[1,2,3])
+ m.J = Set()
+ m.K = Set(initialize=[4,5,6])
+ m.Z = m.I * m.J * m.K
+ self.assertEqual(len(m.Z), 0)
+ self.assertNotIn((2,5), m.Z)
+
+ m.J.add(0)
+ self.assertEqual(len(m.Z), 9)
+ self.assertIn((2,0,5), m.Z)
+
+ def test_setproduct_toolong_val(self):
+ m = ConcreteModel()
+ m.I = Set(initialize=[1,2,3])
+ m.J = Set(initialize=[4,5,6])
+ m.Z = m.I * m.J
+ self.assertIn((2,5), m.Z)
+ self.assertNotIn((2,5,3), m.Z)
+
+ m = ConcreteModel()
+ m.I = Set(initialize=[1,2,3])
+ m.J = Set(initialize=[4,5,6], dimen=None)
+ m.Z = m.I * m.J
+ self.assertIn((2,5), m.Z)
+ self.assertNotIn((2,5,3), m.Z)
+
class TestGlobalSets(unittest.TestCase):
def test_globals(self):
@@ -2734,6 +3265,165 @@ def test_name(self):
self.assertEqual(str(Reals), 'Reals')
self.assertEqual(str(Integers), 'Integers')
+ def test_iteration(self):
+ with self.assertRaisesRegexp(
+ TypeError, "'GlobalSet' object is not iterable "
+ "\(non-finite Set 'Reals' is not iterable\)"):
+ iter(Reals)
+
+ with self.assertRaisesRegexp(
+ TypeError, "'GlobalSet' object is not iterable "
+ "\(non-finite Set 'Integers' is not iterable\)"):
+ iter(Integers)
+
+ self.assertEqual(list(iter(Binary)), [0,1])
+
+ def test_declare(self):
+ NS = {}
+ DeclareGlobalSet(RangeSet( name='TrinarySet',
+ ranges=(NR(0,2,1),) ),
+ NS)
+ self.assertEqual(list(NS['TrinarySet']), [0,1,2])
+ a = pickle.loads(pickle.dumps(NS['TrinarySet']))
+ self.assertIs(a, NS['TrinarySet'])
+ with self.assertRaisesRegex(
+ NameError, "name 'TrinarySet' is not defined"):
+ TrinarySet
+ del SetModule.GlobalSets['TrinarySet']
+ del NS['TrinarySet']
+
+ # Now test the automatic identification of the globals() scope
+ DeclareGlobalSet(RangeSet( name='TrinarySet',
+ ranges=(NR(0,2,1),) ))
+ self.assertEqual(list(TrinarySet), [0,1,2])
+ a = pickle.loads(pickle.dumps(TrinarySet))
+ self.assertIs(a, TrinarySet)
+ del SetModule.GlobalSets['TrinarySet']
+ del globals()['TrinarySet']
+ with self.assertRaisesRegex(
+ NameError, "name 'TrinarySet' is not defined"):
+ TrinarySet
+
+ def test_exceptions(self):
+ with self.assertRaisesRegex(
+ RuntimeError, "Duplicate Global Set declaration, Reals"):
+ DeclareGlobalSet(RangeSet( name='Reals', ranges=(NR(0,2,1),) ))
+
+ # But repeat declarations are OK
+ a = Reals
+ DeclareGlobalSet(Reals)
+ self.assertIs(a, Reals)
+ self.assertIs(a, globals()['Reals'])
+ self.assertIs(a, SetModule.GlobalSets['Reals'])
+
+ NS = {}
+ ts = DeclareGlobalSet(
+ RangeSet(name='TrinarySet', ranges=(NR(0,2,1),)), NS)
+ self.assertIs(NS['TrinarySet'], ts)
+
+ # Repeat declaration is OK
+ DeclareGlobalSet(ts, NS)
+ self.assertIs(NS['TrinarySet'], ts)
+
+ # but conflicting one raises exception
+ NS['foo'] = None
+ with self.assertRaisesRegex(
+ RuntimeError, "Refusing to overwrite global object, foo"):
+ DeclareGlobalSet(
+ RangeSet( name='foo', ranges=(NR(0,2,1),) ), NS)
+
+ def test_RealSet_IntegerSet(self):
+ a = SetModule.RealSet()
+ self.assertEqual(a, Reals)
+ self.assertIsNot(a, Reals)
+
+ a = SetModule.RealSet(bounds=(1,3))
+ self.assertEqual(a.bounds(), (1,3))
+
+ a = SetModule.IntegerSet()
+ self.assertEqual(a, Integers)
+ self.assertIsNot(a, Integers)
+
+ a = SetModule.IntegerSet(bounds=(1,3))
+ self.assertEqual(a.bounds(), (1,3))
+ self.assertEqual(list(a), [1,2,3])
+
+ with self.assertRaisesRegex(
+ RuntimeError, "Unexpected keyword arguments: \{'foo': 5\}"):
+ IntegerSet(foo=5)
+
+ def test_intervals(self):
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.RealInterval()
+ self.assertIn("RealInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, Reals)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.RealInterval(bounds=(0,None))
+ self.assertIn("RealInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, NonNegativeReals)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.RealInterval(bounds=5)
+ self.assertIn("RealInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, RangeSet(1,5,0))
+
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.RealInterval(bounds=(5,))
+ self.assertIn("RealInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, RangeSet(1,5,0))
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval()
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, Integers)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=(0,None))
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, NonNegativeIntegers)
+ self.assertFalse(a.isfinite())
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=(None,-1))
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, NegativeIntegers)
+ self.assertFalse(a.isfinite())
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=(-float('inf'),-1))
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(a, NegativeIntegers)
+ self.assertFalse(a.isfinite())
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=(0,3))
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(list(a), [0,1,2,3])
+ self.assertTrue(a.isfinite())
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=5)
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(list(a), [1,2,3,4,5])
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ a = SetModule.IntegerInterval(bounds=(5,))
+ self.assertIn("IntegerInterval has been deprecated.", output.getvalue())
+ self.assertEqual(list(a), [1,2,3,4,5])
+
def _init_set(m, *args):
n = 1
@@ -2749,9 +3439,9 @@ def test_deprecated_args(self):
with LoggingIntercept(output, 'pyomo.core'):
m.I = Set(virtual=True)
self.assertEqual(len(m.I), 0)
- self.assertEqual(
+ self.assertRegexpMatches(
output.getvalue(),
- "DEPRECATED: Pyomo Sets ignore the 'virtual' keyword argument\n")
+ "^DEPRECATED: Pyomo Sets ignore the 'virtual' keyword argument")
def test_scalar_set_initialize_and_iterate(self):
m = ConcreteModel()
@@ -2763,7 +3453,7 @@ def test_scalar_set_initialize_and_iterate(self):
m = ConcreteModel()
with self.assertRaisesRegexp(
- KeyError, "Cannot treat the scalar component 'I'"
+ KeyError, "Cannot treat the scalar component 'I' "
"as an indexed component"):
m.I = Set(initialize={1:(1,3,2,4)})
@@ -2797,7 +3487,7 @@ def I_init(m):
with LoggingIntercept(output, 'pyomo.core'):
m = ConcreteModel()
m.I = Set(initialize={1,3,2,4})
- ref = "Initializing an ordered Set with a " \
+ ref = "Initializing ordered Set I with a " \
"fundamentally unordered data source (type: set)."
self.assertIn(ref, output.getvalue())
self.assertEqual(m.I.sorted_data(), (1,2,3,4))
@@ -2842,6 +3532,16 @@ def I_init(m):
self.assertEqual(m.I.data(), (4,3,2,1))
self.assertEqual(m.I.dimen, 1)
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ with self.assertRaisesRegexp(
+ TypeError, "'int' object is not iterable"):
+ m = ConcreteModel()
+ m.I = Set(initialize=5)
+ ref = "Initializer for Set I returned non-iterable object " \
+ "of type int."
+ self.assertIn(ref, output.getvalue())
+
def test_insertion_deletion(self):
def _verify(_s, _l):
self.assertTrue(_s.isordered())
@@ -3112,14 +3812,33 @@ def _verify(_s, _l):
m.I = [0,-1,1]
_verify(m.I, [-1,0,1])
+ def test_multiple_insertion(self):
+ m = ConcreteModel()
+ m.I = Set(ordered=True, initialize=[1])
+
+ self.assertEqual(m.I.add(3,2,4), 3)
+ self.assertEqual(tuple(m.I.data()), (1,3,2,4))
+
+ self.assertEqual(m.I.add(1,5,4), 1)
+ self.assertEqual(tuple(m.I.data()), (1,3,2,4,5))
+
+
def test_indexed_set(self):
# Implicit construction
m = ConcreteModel()
m.I = Set([1,2,3], ordered=False)
- self.assertEqual(len(m.I), 3)
+ self.assertEqual(len(m.I), 0)
+ m.I[1]
+ self.assertEqual(len(m.I), 1)
+ self.assertEqual(m.I[1], [])
+
+ self.assertEqual(m.I[2], [])
+ self.assertEqual(len(m.I), 2)
+
m.I[1].add(1)
m.I[2].add(2)
m.I[3].add(4)
+ self.assertEqual(len(m.I), 3)
self.assertEqual(list(m.I[1]), [1])
self.assertEqual(list(m.I[2]), [2])
self.assertEqual(list(m.I[3]), [4])
@@ -3167,6 +3886,18 @@ def test_indexed_set(self):
self.assertIs(type(m.I[2]), _SortedSetData)
self.assertIs(type(m.I[3]), _SortedSetData)
+ # Explicit (procedural) construction
+ m = ConcreteModel()
+ m.I = Set([1,2,3], ordered=True)
+ self.assertEqual(len(m.I), 0)
+ m.I[1] = [1,2,3]
+ m.I[(2,)] = [4,5,6]
+ # test index mapping
+ self.assertEqual(sorted(m.I._data.keys()), [1,2])
+ self.assertEqual(list(m.I[1]), [1,2,3])
+ self.assertEqual(list(m.I[2]), [4,5,6])
+
+
def test_naming(self):
m = ConcreteModel()
@@ -3208,6 +3939,7 @@ def test_indexing(self):
def test_add_filter_validate(self):
m = ConcreteModel()
m.I = Set(domain=Integers)
+ self.assertIs(m.I.filter, None)
with self.assertRaisesRegexp(
ValueError,
"Cannot add value 1.5 to Set I.\n"
@@ -3250,9 +3982,12 @@ def test_add_filter_validate(self):
"Element 1 already exists in Set J; no action taken\n")
- def _l_tri(m, i, j):
+ def _l_tri(model, i, j):
+ self.assertIs(model, m)
return i >= j
m.K = Set(initialize=RangeSet(3)*RangeSet(3), filter=_l_tri)
+ self.assertIsInstance(m.K.filter, IndexedCallInitializer)
+ self.assertIs(m.K.filter._fcn, _l_tri)
self.assertEqual(
list(m.K), [(1,1), (2,1), (2,2), (3,1), (3,2), (3,3)])
@@ -3268,7 +4003,8 @@ def _l_tri(m, i, j):
# component. construct() needs to recognize that the filter is
# returning a constant in construct() and re-assign it to be the
# _filter for each _SetData
- def _lt_3(m, i):
+ def _lt_3(model, i):
+ self.assertIs(model, m)
return i < 3
m.L = Set([1,2,3,4,5], initialize=RangeSet(10), filter=_lt_3)
self.assertEqual(len(m.L), 5)
@@ -3283,13 +4019,14 @@ def _lt_3(m, i):
self.assertEqual(list(m.L[2]), [1,2,0])
- def _validate(m,i,j):
+ m = ConcreteModel()
+ def _validate(model,i,j):
+ self.assertIs(model, m)
if i + j < 2:
return True
if i - j > 2:
return False
raise RuntimeError("Bogus value")
- m = ConcreteModel()
m.I = Set(validate=_validate)
output = StringIO()
with LoggingIntercept(output, 'pyomo.core'):
@@ -3327,8 +4064,13 @@ def _validate(m,i,j):
"Set J[2,2]\n")
def test_domain(self):
+ m = ConcreteModel()
+ m.I = Set()
+ self.assertIs(m.I.domain, Any)
+
m = ConcreteModel()
m.I = Set(domain=Integers)
+ self.assertIs(m.I.domain, Integers)
m.I.add(1)
m.I.add(2.)
self.assertEqual(list(m.I), [1, 2.])
@@ -3338,6 +4080,7 @@ def test_domain(self):
m = ConcreteModel()
m.I = Set(within=Integers)
+ self.assertIs(m.I.domain, Integers)
m.I.add(1)
m.I.add(2.)
self.assertEqual(list(m.I), [1, 2.])
@@ -3347,6 +4090,7 @@ def test_domain(self):
m = ConcreteModel()
m.I = Set(bounds=(1,5))
+ self.assertEqual(m.I.domain, RangeSet(1,5,0))
m.I.add(1)
m.I.add(2.)
self.assertEqual(list(m.I), [1, 2.])
@@ -3356,19 +4100,20 @@ def test_domain(self):
m = ConcreteModel()
m.I = Set(domain=Integers, within=RangeSet(0, None, 2), bounds=(0,9))
+ self.assertEqual(m.I.domain, RangeSet(0,9,2))
m.I = [0,2.,4]
self.assertEqual(list(m.I), [0,2.,4])
with self.assertRaisesRegexp(
ValueError, 'The value is not in the domain '
- '\(Integers & \[0:None:2\]\) & \[0..9\]'):
+ '\(Integers & I_domain_index_0_index_1'):
m.I.add(1.5)
with self.assertRaisesRegexp(
ValueError, 'The value is not in the domain '
- '\(Integers & \[0:None:2\]\) & \[0..9\]'):
+ '\(Integers & I_domain_index_0_index_1'):
m.I.add(1)
with self.assertRaisesRegexp(
ValueError, 'The value is not in the domain '
- '\(Integers & \[0:None:2\]\) & \[0..9\]'):
+ '\(Integers & I_domain_index_0_index_1'):
m.I.add(10)
@@ -3387,14 +4132,8 @@ def myFcn(x):
m.N = Integers - Reals
buf = StringIO()
- m.pprint()
m.pprint(ostream=buf)
self.assertEqual(buf.getvalue().strip(), """
-1 RangeSet Declarations
- I_index : Dimen=1, Size=3, Bounds=(1, 3)
- Key : Finite : Members
- None : True : [1:3]
-
6 Set Declarations
I : Size=3, Index=I_index, Ordered=Insertion
Key : Dimen : Domain : Size : Members
@@ -3411,13 +4150,23 @@ def myFcn(x):
Key : Dimen : Domain : Size : Members
None : 2 : Any : 2 : {(3, 4), (1, 2)}
M : Size=1, Index=None, Ordered=False
- Key : Dimen : Domain : Size : Members
- None : 1 : Reals - [0] : Inf : ([None..0) | (0..None])
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Reals - M_index_1 : Inf : ([None..0) | (0..None])
N : Size=1, Index=None, Ordered=False
Key : Dimen : Domain : Size : Members
None : 1 : Integers - Reals : Inf : []
-7 Declarations: I_index I J K L M N""".strip())
+1 RangeSet Declarations
+ I_index : Dimen=1, Size=3, Bounds=(1, 3)
+ Key : Finite : Members
+ None : True : [1:3]
+
+1 SetOf Declarations
+ M_index_1 : Dimen=1, Size=1, Bounds=(0, 0)
+ Key : Ordered : Members
+ None : True : [0]
+
+8 Declarations: I_index I J K L M_index_1 M N""".strip())
def test_pickle(self):
m = ConcreteModel()
@@ -3475,15 +4224,25 @@ def test_dimen(self):
self.assertIsNone(m.L.dimen)
self.assertEqual(list(m.L), [1, (2,3)])
+ a = AbstractModel()
+ a.I = Set(initialize=[1,2,3])
+ self.assertEqual(a.I.dimen, UnknownSetDimen)
+ a.J = Set(initialize=[1,2,3], dimen=1)
+ self.assertEqual(a.J.dimen, 1)
+ m = a.create_instance(data={None:{'I': {None:[(1,2), (3,4)]}}})
+ self.assertEqual(m.I.dimen, 2)
+ self.assertEqual(m.J.dimen, 1)
+
def test_construction(self):
m = AbstractModel()
m.I = Set(initialize=[1,2,3])
m.J = Set(initialize=[4,5,6])
+ m.K = Set(initialize=[(1,4),(2,6),(3,5)], within=m.I*m.J)
m.II = Set([1,2,3], initialize={1:[0], 2:[1,2], 3: xrange(3)})
m.JJ = Set([1,2,3], initialize={1:[0], 2:[1,2], 3: xrange(3)})
+ m.KK = Set([1,2], initialize=[], dimen=lambda m,i: i)
output = StringIO()
- m.pprint()
m.I.pprint(ostream=output)
m.II.pprint(ostream=output)
m.J.pprint(ostream=output)
@@ -3500,20 +4259,50 @@ def test_construction(self):
self.assertEqual(output.getvalue().strip(), ref)
i = m.create_instance(data={
- None: {'I': [-1,0], 'II': {1: [10,11], 3:[30]}}
+ None: {'I': [-1,0], 'II': {1: [10,11], 3:[30]},
+ 'K': [-1, 4, -1, 6, 0, 5]}
})
self.assertEqual(list(i.I), [-1,0])
self.assertEqual(list(i.J), [4,5,6])
+ self.assertEqual(list(i.K), [(-1,4),(-1,6),(0,5)])
self.assertEqual(list(i.II[1]), [10,11])
self.assertEqual(list(i.II[3]), [30])
self.assertEqual(list(i.JJ[1]), [0])
self.assertEqual(list(i.JJ[2]), [1,2])
self.assertEqual(list(i.JJ[3]), [0,1,2])
+ self.assertEqual(list(i.KK[1]), [])
+ self.assertEqual(list(i.KK[2]), [])
# Implicitly-constructed set should fall back on initialize!
self.assertEqual(list(i.II[2]), [1,2])
+ # Additional tests for tuplize:
+ i = m.create_instance(data={
+ None: {'K': [(1,4),(2,6)],
+ 'KK': [1,4,2,6]}
+ })
+ self.assertEqual(list(i.K), [(1,4),(2,6)])
+ self.assertEqual(list(i.KK), [1,2])
+ self.assertEqual(list(i.KK[1]), [1,4,2,6])
+ self.assertEqual(list(i.KK[2]), [(1,4),(2,6)])
+ i = m.create_instance(data={
+ None: {'K': []}
+ })
+ self.assertEqual(list(i.K), [])
+ with self.assertRaisesRegexp(
+ ValueError, "Cannot tuplize list data for set K because "
+ "its length 3 is not a multiple of dimen=2"):
+ i = m.create_instance(data={
+ None: {'K': [1,2,3]}
+ })
+ with self.assertRaisesRegexp(
+ ValueError, "Cannot tuplize list data for set KK\[2\] because "
+ "its length 3 is not a multiple of dimen=2"):
+ i = m.create_instance(data={
+ None: {'KK': {2: [1,2,3]}}
+ })
+
ref = """
Constructing AbstractOrderedSimpleSet 'I' on [Model] from data=None
Constructing Set, name=I, from data=None
@@ -3655,6 +4444,7 @@ def _i_init(m, i):
with self.assertRaisesRegexp(
ValueError, "Set rule returned None instead of Set.End"):
m.I1 = Set(initialize=_i_init)
+
@simple_set_rule
def _j_init(m, i):
if i > 3:
@@ -3663,6 +4453,30 @@ def _j_init(m, i):
m.J = Set(initialize=_j_init)
self.assertEqual(list(m.J), [1,2,3])
+ # Backwards compatability: Test rule for indexed component that
+ # does not take the index
+ @simple_set_rule
+ def _k_init(m):
+ return [1,2,3]
+ m.K = Set([1], initialize=_k_init)
+ self.assertEqual(list(m.K[1]), [1,2,3])
+
+
+ @simple_set_rule
+ def _l_init(m, l):
+ if l > 3:
+ return None
+ return tuple(range(l))
+ m.L = Set(initialize=_l_init, dimen=None)
+ self.assertEqual(list(m.L), [0, (0,1), (0,1,2)])
+
+ m.M = Set([1,2,3], initialize=_l_init)
+ self.assertEqual(list(m.M), [1,2,3])
+ self.assertEqual(list(m.M[1]), [0])
+ self.assertEqual(list(m.M[2]), [0,1])
+ self.assertEqual(list(m.M[3]), [0,1,2])
+
+
def test_set_skip(self):
# Test Set.Skip
m = ConcreteModel()
@@ -3906,6 +4720,66 @@ def test_sorted_operations(self):
self.assertEqual(I.ord(0), i+1)
self.assertTrue(I._is_sorted)
+ def test_process_setarg(self):
+ m = AbstractModel()
+ m.I = Set([1,2,3])
+ self.assertTrue(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ m = AbstractModel()
+ m.I = Set({1,2,3})
+ self.assertTrue(m.I.index_set().is_constructed())
+ self.assertFalse(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ m = AbstractModel()
+ m.I = Set(RangeSet(3))
+ self.assertTrue(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ m = AbstractModel()
+ m.p = Param(initialize=3)
+ m.I = Set(RangeSet(m.p))
+ self.assertFalse(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ m = AbstractModel()
+ m.I = Set(lambda m: [1,2,3])
+ self.assertFalse(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ def _i_idx(m):
+ return [1,2,3]
+ m = AbstractModel()
+ m.I = Set(_i_idx)
+ self.assertFalse(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = m.create_instance()
+ self.assertEqual(i.I.index_set(), [1,2,3])
+
+ # Note: generators are uncopyable, so we will mock up the same
+ # behavior as above using an unconstructed block
+ def _i_idx():
+ yield 1
+ yield 2
+ yield 3
+ m = Block()
+ m.I = Set(_i_idx())
+ self.assertFalse(m.I.index_set().is_constructed())
+ self.assertTrue(m.I.index_set().isordered())
+ i = ConcreteModel()
+ i.m = m
+ self.assertEqual(i.m.I.index_set(), [1,2,3])
+
def test_set_options(self):
output = StringIO()
with LoggingIntercept(output, 'pyomo.core'):
@@ -3913,7 +4787,7 @@ def test_set_options(self):
def Bindex(m):
return range(5)
self.assertIn(
- "DEPRECATED: The set_options decorator seems nonessential",
+ "The set_options decorator is deprecated",
output.getvalue())
m = ConcreteModel()
@@ -3921,11 +4795,9 @@ def Bindex(m):
m.J = m.I.cross(Bindex)
self.assertIs(m.J._sets[1]._domain, Integers)
- # TODO: Once this is merged into IndexedContainer, the following
- # should work
- #
- #m.K = Set(Bindex)
- #self.assertIs(m.K.index_set()._domain, Integers)
+ m.K = Set(Bindex)
+ self.assertIs(m.K.index_set()._domain, Integers)
+ self.assertEqual(m.K.index_set(), [0,1,2,3,4])
def test_no_normalize_index(self):
try:
@@ -3992,15 +4864,21 @@ def test_SetData(self):
# __contains__
None in s
- self.assertFalse(s == m.I)
- self.assertFalse(m.I == s)
- self.assertTrue(s != m.I)
- self.assertTrue(m.I != s)
+ with self.assertRaises(DeveloperError):
+ s == m.I
+ with self.assertRaises(DeveloperError):
+ m.I == s
+ with self.assertRaises(DeveloperError):
+ s != m.I
+ with self.assertRaises(DeveloperError):
+ m.I != s
with self.assertRaises(DeveloperError):
str(s)
with self.assertRaises(DeveloperError):
s.dimen
+ with self.assertRaises(DeveloperError):
+ s.domain
self.assertFalse(s.isfinite())
self.assertFalse(s.isordered())
@@ -4015,9 +4893,11 @@ def test_SetData(self):
with self.assertRaises(DeveloperError):
s.issuperset(m.I)
- self.assertFalse(m.I.issuperset(s))
+ with self.assertRaises(DeveloperError):
+ m.I.issuperset(s)
- self.assertFalse(s.issubset(m.I))
+ with self.assertRaises(DeveloperError):
+ s.issubset(m.I)
with self.assertRaises(DeveloperError):
m.I.issubset(s)
@@ -4053,13 +4933,15 @@ def test_SetData(self):
self.assertIs(type(s * m.I), SetProduct_InfiniteSet)
self.assertIs(type(m.I * s), SetProduct_InfiniteSet)
- self.assertFalse(s < m.I)
with self.assertRaises(DeveloperError):
- self.assertFalse(m.I < s)
+ s < m.I
+ with self.assertRaises(DeveloperError):
+ m.I < s
with self.assertRaises(DeveloperError):
- self.assertFalse(s > m.I)
- self.assertFalse(m.I > s)
+ s > m.I
+ with self.assertRaises(DeveloperError):
+ m.I > s
def test_FiniteMixin(self):
# This tests an anstract finite set API
@@ -4091,6 +4973,8 @@ class FiniteMixin(_FiniteSetMixin, _SetData):
str(s)
with self.assertRaises(DeveloperError):
s.dimen
+ with self.assertRaises(DeveloperError):
+ s.domain
self.assertTrue(s.isfinite())
self.assertFalse(s.isordered())
@@ -4213,6 +5097,8 @@ class OrderedMixin(_OrderedSetMixin, _FiniteSetMixin, _SetData):
str(s)
with self.assertRaises(DeveloperError):
s.dimen
+ with self.assertRaises(DeveloperError):
+ s.domain
self.assertTrue(s.isfinite())
self.assertTrue(s.isordered())
@@ -4444,6 +5330,33 @@ def test_get_discrete_interval(self):
class TestDeprecation(unittest.TestCase):
+ def test_filter(self):
+ m = ConcreteModel()
+ m.I = Set(initialize=[1,2,3])
+ m.J = m.I*m.I
+ m.K = Set(initialize=[1,2,3], filter=lambda m,i: i%2)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
+ self.assertIsNone(m.I.filter)
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: 'filter' is no longer a public attribute")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
+ self.assertIsNone(m.J.filter)
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: 'filter' is no longer a public attribute")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
+ self.assertIsInstance(m.K.filter, IndexedCallInitializer)
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: 'filter' is no longer a public attribute")
+
def test_virtual(self):
m = ConcreteModel()
m.I = Set(initialize=[1,2,3])
@@ -4452,16 +5365,28 @@ def test_virtual(self):
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
self.assertFalse(m.I.virtual)
- self.assertIn(
- "The 'virtual' flag is no longer supported",
- output.getvalue())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'virtual' attribute is no longer supported")
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
self.assertTrue(m.J.virtual)
- self.assertIn(
- "The 'virtual' flag is no longer supported",
- output.getvalue())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'virtual' attribute is no longer supported")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ m.J.virtual = True
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'virtual' attribute is no longer supported")
+ with self.assertRaisesRegexp(
+ ValueError,
+ "Attempting to set the \(deprecated\) 'virtual' attribute on J "
+ "to an invalid value \(False\)"):
+ m.J.virtual = False
def test_concrete(self):
m = ConcreteModel()
@@ -4471,23 +5396,106 @@ def test_concrete(self):
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
self.assertTrue(m.I.concrete)
- self.assertIn(
- "The 'concrete' flag is no longer supported",
- output.getvalue())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'concrete' attribute is no longer supported")
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
self.assertTrue(m.J.concrete)
- self.assertIn(
- "The 'concrete' flag is no longer supported",
- output.getvalue())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'concrete' attribute is no longer supported")
output = StringIO()
with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
self.assertFalse(Reals.concrete)
- self.assertIn(
- "The 'concrete' flag is no longer supported",
- output.getvalue())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'concrete' attribute is no longer supported")
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ m.J.concrete = True
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'concrete' attribute is no longer supported.")
+ with self.assertRaisesRegexp(
+ ValueError,
+ "Attempting to set the \(deprecated\) 'concrete' attribute on "
+ "J to an invalid value \(False\)"):
+ m.J.concrete = False
+
+ def test_ordered_attr(self):
+ m = ConcreteModel()
+ m.J = Set(ordered=True)
+ m.K = Set(ordered=False)
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ self.assertTrue(m.J.ordered)
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'ordered' attribute is no longer supported.")
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ self.assertFalse(m.K.ordered)
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'ordered' attribute is no longer supported.")
+
+ def test_value_attr(self):
+ m = ConcreteModel()
+ m.J = Set(ordered=True, initialize=[1,3,2])
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ tmp = m.J.value
+ self.assertIs(type(tmp), set)
+ self.assertEqual(tmp, set([1,3,2]))
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: The 'value' attribute is deprecated. Use .data\(\)")
+
+ def test_value_list_attr(self):
+ m = ConcreteModel()
+ m.J = Set(ordered=True, initialize=[1,3,2])
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ tmp = m.J.value_list
+ self.assertIs(type(tmp), list)
+ self.assertEqual(tmp, list([1,3,2]))
+ self.assertRegexpMatches(
+ output.getvalue().replace('\n',' '),
+ "^DEPRECATED: The 'value_list' attribute is deprecated. "
+ "Use .ordered_data\(\)")
+
+ def test_check_values(self):
+ m = ConcreteModel()
+ m.I = Set(ordered=True, initialize=[1,3,2])
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ self.assertTrue(m.I.check_values())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: check_values\(\) is deprecated: Sets only "
+ "contain valid")
+
+ m.J = m.I*m.I
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core', logging.DEBUG):
+ self.assertTrue(m.J.check_values())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: check_values\(\) is deprecated:")
+
+ # We historically supported check_values on indexed sets
+ m.K = Set([1,2], ordered=True, initialize=[1,3,2])
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ self.assertTrue(m.K.check_values())
+ self.assertRegexpMatches(
+ output.getvalue(),
+ "^DEPRECATED: check_values\(\) is deprecated: Sets only "
+ "contain valid")
class TestIssues(unittest.TestCase):
@@ -4515,8 +5523,15 @@ def test_issue_116(self):
m.s = Set(initialize=['one'])
m.t = Set([1], initialize=['one'])
m.x = Var(m.s)
+
+ output = StringIO()
+ with LoggingIntercept(output, 'pyomo.core'):
+ self.assertTrue(m.s in m.s)
+ self.assertIn(
+ "Testing for set subsets with 'a in b' is deprecated.",
+ output.getvalue()
+ )
if PY2:
- self.assertFalse(m.s in m.s)
self.assertFalse(m.s in m.t)
with self.assertRaisesRegexp(KeyError, "Index 's' is not valid"):
m.x[m.s].display()
@@ -4524,8 +5539,6 @@ def test_issue_116(self):
# Note that pypy raises a different exception from cpython
err = "((unhashable type: 'OrderedSimpleSet')" \
"|('OrderedSimpleSet' objects are unhashable))"
- with self.assertRaisesRegexp(TypeError, err):
- self.assertFalse(m.s in m.s)
with self.assertRaisesRegexp(TypeError, err):
self.assertFalse(m.s in m.t)
with self.assertRaisesRegexp(TypeError, err):
@@ -4786,6 +5799,19 @@ def objective_rule(model_arg):
output = StringIO()
m.pprint(ostream=output)
ref = """
+3 Set Declarations
+ arc_keys : Set of arcs
+ Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 2 : arc_keys_domain : 2 : {(0, 0), (0, 1)}
+ arc_keys_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : node_keys*node_keys : 4 : {(0, 0), (0, 1), (1, 0), (1, 1)}
+ node_keys : Set of nodes
+ Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {0, 1}
+
1 Var Declarations
arc_variables : Size=2, Index=arc_keys
Key : Lower : Value : Upper : Fixed : Stale : Domain
@@ -4797,17 +5823,7 @@ def objective_rule(model_arg):
Key : Active : Sense : Expression
None : True : minimize : arc_variables[0,0] + arc_variables[0,1]
-2 Set Declarations
- arc_keys : Set of arcs
- Size=1, Index=None, Ordered=Insertion
- Key : Dimen : Domain : Size : Members
- None : 2 : node_keys*node_keys : 2 : {(0, 0), (0, 1)}
- node_keys : Set of nodes
- Size=1, Index=None, Ordered=Insertion
- Key : Dimen : Domain : Size : Members
- None : 1 : Any : 2 : {0, 1}
-
-4 Declarations: node_keys arc_keys arc_variables obj
+5 Declarations: node_keys arc_keys_domain arc_keys arc_variables obj
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
@@ -4816,6 +5832,19 @@ def objective_rule(model_arg):
output = StringIO()
m.pprint(ostream=output)
ref = """
+3 Set Declarations
+ arc_keys : Set of arcs
+ Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : None : arc_keys_domain : 2 : {ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=0)), ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=1))}
+ arc_keys_domain : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : None : node_keys*node_keys : 4 : {(NodeKey(id=0), NodeKey(id=0)), (NodeKey(id=0), NodeKey(id=1)), (NodeKey(id=1), NodeKey(id=0)), (NodeKey(id=1), NodeKey(id=1))}
+ node_keys : Set of nodes
+ Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : None : Any : 2 : {NodeKey(id=0), NodeKey(id=1)}
+
1 Var Declarations
arc_variables : Size=2, Index=arc_keys
Key : Lower : Value : Upper : Fixed : Stale : Domain
@@ -4827,19 +5856,25 @@ def objective_rule(model_arg):
Key : Active : Sense : Expression
None : True : minimize : arc_variables[ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=0))] + arc_variables[ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=1))]
-2 Set Declarations
- arc_keys : Set of arcs
- Size=1, Index=None, Ordered=Insertion
- Key : Dimen : Domain : Size : Members
- None : None : node_keys*node_keys : 2 : {ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=0)), ArcKey(node_from=NodeKey(id=0), node_to=NodeKey(id=1))}
- node_keys : Set of nodes
- Size=1, Index=None, Ordered=Insertion
- Key : Dimen : Domain : Size : Members
- None : None : Any : 2 : {NodeKey(id=0), NodeKey(id=1)}
-
-4 Declarations: node_keys arc_keys arc_variables obj
+5 Declarations: node_keys arc_keys_domain arc_keys arc_variables obj
""".strip()
self.assertEqual(output.getvalue().strip(), ref)
finally:
normalize_index.flatten = _oldFlatten
+
+ def test_issue_1375(self):
+ def a_rule(m):
+ for i in range(0):
+ yield i
+
+ def b_rule(m):
+ for i in range(3):
+ for j in range(0):
+ yield i, j
+
+ m = ConcreteModel()
+ m.a = Set(initialize=a_rule, dimen=1)
+ self.assertEqual(len(m.a), 0)
+ m.b = Set(initialize=b_rule, dimen=2)
+ self.assertEqual(len(m.b), 0)
diff --git a/pyomo/core/tests/unit/test_sets.py b/pyomo/core/tests/unit/test_sets.py
index 61325c58706..33ebd1edb61 100644
--- a/pyomo/core/tests/unit/test_sets.py
+++ b/pyomo/core/tests/unit/test_sets.py
@@ -2,8 +2,8 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
@@ -38,9 +38,9 @@
import pyutilib.th as unittest
import pyomo.core.base
-from pyomo.core.base.set_types import _AnySet
from pyomo.environ import *
-from pyomo.core.kernel.set_types import _VirtualSet
+from pyomo.core.base.set import _AnySet, RangeDifferenceError
+from pyomo.core.base.component import CloneError
_has_numpy = False
try:
@@ -157,7 +157,7 @@ def test_addInvalid(self):
# This verifies that by default, all set elements are valid. That
# is, the default within is None
#
- self.assertEqual( self.instance.A.domain, None)
+ self.assertEqual( self.instance.A.domain, Any)
self.instance.A.add('2','3','4')
self.assertFalse( '2' not in self.instance.A, "Found invalid new element in A")
@@ -188,7 +188,11 @@ def test_iterator(self):
self.tmp = set()
for val in self.instance.A:
self.tmp.add(val)
- self.assertFalse( self.tmp != self.instance.A.data(), "Set values found by the iterator appear to be different from the underlying set (%s) (%s)" % (str(self.tmp), str(self.instance.A.data())))
+ self.assertTrue(
+ self.tmp == set(self.instance.A.data()),
+ "Set values found by the iterator appear to be different from "
+ "the underlying set (%s) (%s)" % (
+ str(self.tmp), str(self.instance.A.data())))
def test_eq1(self):
"""Various checks for set equality and inequality (1)"""
@@ -289,8 +293,8 @@ def evenFilter(model, el):
# would be immediately constructed and would never see the
# filter
m = AbstractModel()
- m.tmp = Set(initialize=range(0,10))
- m.tmp.filter = evenFilter
+ m.tmp = Set(initialize=range(0,10), filter=evenFilter)
+ #m.tmp.filter = evenFilter
m.tmp.construct()
self.assertEqual(sorted([x for x in m.tmp]), [0,2,4,6,8])
@@ -401,11 +405,14 @@ def setUp(self):
def test_clear(self):
"""Check the clear() method empties the set"""
- try:
- self.instance.A.clear()
- self.fail("Expected TypeError because a RangeSet is a virtual set")
- except TypeError:
- pass
+ # After the Set rewrite, RangeSet objects can be cleared
+ # try:
+ # self.instance.A.clear()
+ # self.fail("Expected TypeError because a RangeSet is a virtual set")
+ # except TypeError:
+ # pass
+ self.instance.A.clear()
+ self.assertEqual(len(self.instance.A), 0)
def test_virtual(self):
"""Check if this is a virtual set"""
@@ -433,7 +440,8 @@ def test_bounds(self):
def test_addValid(self):
"""Check that we can add valid set elements"""
- pass
+ with self.assertRaises(AttributeError):
+ self.instance.A.add(6)
def test_addInvalid(self):
"""Check that we get an error when adding invalid set elements"""
@@ -441,26 +449,22 @@ def test_addInvalid(self):
# This verifies that by default, all set elements are valid. That
# is, the default within is None
#
- try:
+ with self.assertRaises(AttributeError):
self.instance.A.add('2','3','4')
- self.fail("Expected to generate an error when we remove an element from a RangeSet")
- except TypeError:
- pass
- self.assertFalse( '2' in self.instance.A, "Value we attempted to add is not in A")
+ self.assertFalse( '2' in self.instance.A,
+ "Value we attempted to add is not in A")
def test_removeValid(self):
"""Check that we can remove a valid set element"""
- try:
+ with self.assertRaises(AttributeError):
self.instance.A.remove(self.e3)
- self.fail("Expected to generate an error when we remove an element from a RangeSet")
- except KeyError:
- pass
self.assertEqual( len(self.instance.A), 5)
self.assertTrue( self.e3 in self.instance.A, "Element is still in A")
def test_removeInvalid(self):
"""Check that we fail to remove an invalid set element"""
- self.assertRaises(KeyError, self.instance.A.remove, 6)
+ with self.assertRaises(AttributeError):
+ self.instance.A.remove(6)
self.assertEqual( len(self.instance.A), 5)
def test_remove(self):
@@ -469,13 +473,11 @@ def test_remove(self):
def test_discardValid(self):
"""Check that we can discard a valid set element"""
- try:
+ with self.assertRaises(AttributeError):
self.instance.A.discard(self.e3)
- self.fail("Expected to generate an error when we discare an element from a RangeSet")
- except KeyError:
- pass
self.assertEqual( len(self.instance.A), 5)
- self.assertTrue( self.e3 in self.instance.A, "Found element in A that attemped to discard")
+ self.assertTrue( self.e3 in self.instance.A,
+ "Found element in A that attemped to discard")
def test_discardInvalid(self):
"""Check that we fail to remove an invalid set element without an exception"""
@@ -507,8 +509,8 @@ def test_filter_attribute(self):
""" Check that RangeSets can filter out unwanted elements """
def evenFilter(model, el):
return el % 2 == 0
- self.instance.tmp = RangeSet(0,10)
- self.instance.tmp.filter = evenFilter
+ self.instance.tmp = RangeSet(0,10, filter=evenFilter)
+ #self.instance.tmp.filter = evenFilter
self.instance.tmp.construct()
self.assertEqual(sorted([x for x in self.instance.tmp]), [0,2,4,6,8,10])
@@ -523,10 +525,10 @@ def setUp(self):
#
# Create model instance
#
- def validate_fn(model, val):
+ def filter_fn(model, val):
return (val >= 1) and (val <= 5)
- self.model.A = RangeSet(1,10, validate=validate_fn)
+ self.model.A = RangeSet(1,10, filter=filter_fn)
#
# Misc datasets
#
@@ -603,22 +605,22 @@ def test_ImmutableParams(self):
model.lb = Param(initialize=1)
model.ub = Param(initialize=5)
model.A = RangeSet(model.lb, model.ub)
- self.assertEqual( model.A.data(), set([1,2,3,4,5]) )
+ self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) )
def test_MutableParams(self):
model = ConcreteModel()
model.lb = Param(initialize=1, mutable=True)
model.ub = Param(initialize=5, mutable=True)
model.A = RangeSet(model.lb, model.ub)
- self.assertEqual( model.A.data(), set([1,2,3,4,5]) )
+ self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) )
model.lb = 2
model.ub = 4
model.B = RangeSet(model.lb, model.ub)
# Note: rangesets are constant -- even if the mutable param
# under the hood changes
- self.assertEqual( model.A.data(), set([1,2,3,4,5]) )
- self.assertEqual( model.B.data(), set([2,3,4]) )
+ self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) )
+ self.assertEqual( set(model.B.data()), set([2,3,4]) )
def test_Expressions(self):
model = ConcreteModel()
@@ -626,14 +628,14 @@ def test_Expressions(self):
model.lb = Expression(expr=model.p*2-1)
model.ub = Expression(expr=model.p*5)
model.A = RangeSet(model.lb, model.ub)
- self.assertEqual( model.A.data(), set([1,2,3,4,5]) )
+ self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) )
model.p = 2
model.B = RangeSet(model.lb, model.ub)
# Note: rangesets are constant -- even if the mutable param
# under the hood changes
- self.assertEqual( model.A.data(), set([1,2,3,4,5]) )
- self.assertEqual( model.B.data(), set([3,4,5,6,7,8,9,10]) )
+ self.assertEqual( set(model.A.data()), set([1,2,3,4,5]) )
+ self.assertEqual( set(model.B.data()), set([3,4,5,6,7,8,9,10]) )
@@ -715,7 +717,7 @@ def setUp(self):
self.e6='A6'
def test_bounds(self):
- self.assertEqual( self.instance.A.bounds(), None)
+ self.assertEqual( self.instance.A.bounds(), ('A1','A7'))
class SimpleSetC(SimpleSetA):
@@ -768,7 +770,7 @@ def tearDown(self):
PyomoModel.tearDown(self)
def test_bounds(self):
- self.assertEqual( self.instance.A.bounds(), None)
+ self.assertEqual( self.instance.A.bounds(), (('A1',1), ('A7',1)))
def test_addInvalid(self):
"""Check that we get an error when adding invalid set elements"""
@@ -776,7 +778,7 @@ def test_addInvalid(self):
# This verifies that by default, all set elements are valid. That
# is, the default within is None
#
- self.assertEqual( self.instance.A.domain, None)
+ self.assertEqual( self.instance.A.domain, Any)
try:
self.instance.A.add('2','3','4')
except ValueError:
@@ -833,7 +835,7 @@ def setUp(self):
def test_numpy_bool(self):
model = ConcreteModel()
model.A = Set(initialize=[numpy.bool_(False), numpy.bool_(True)])
- self.assertEqual( model.A.bounds(), None)
+ self.assertEqual( model.A.bounds(), (0,1))
def test_numpy_int(self):
model = ConcreteModel()
@@ -904,15 +906,13 @@ def test_getitem(self):
def test_setitem(self):
"""Check the access to items"""
- try:
- self.model.Z = Set(initialize=['A','C'])
- self.model.A = Set(self.model.Z,initialize={'A':[1]})
- self.instance = self.model.create_instance()
- tmp=[1,6,9]
- self.instance.A['A'] = tmp
- self.instance.A['C'] = tmp
- except:
- self.fail("Problems setting a valid set into a set array")
+ self.model.Z = Set(initialize=['A','C'])
+ self.model.A = Set(self.model.Z,initialize={'A':[1]})
+ self.instance = self.model.create_instance()
+ tmp=[1,6,9]
+ self.instance.A['A'] = tmp
+ self.instance.A['C'] = tmp
+
try:
self.instance.A['D'] = tmp
except KeyError:
@@ -928,19 +928,24 @@ def test_keys(self):
def test_len(self):
"""Check that a simple set of numeric elements has the right size"""
- try:
- len(self.instance.A)
- except TypeError:
- self.fail("fail test_len")
- else:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # len(self.instance.A)
+ # except TypeError:
+ # self.fail("fail test_len")
+ # else:
+ # pass
+ self.assertEqual(len(self.instance.A), 2)
def test_data(self):
"""Check that we can access the underlying set data"""
- try:
+ # try:
+ # self.instance.A.data()
+ # except:
+ # self.fail("Expected data() method to pass")
+ with self.assertRaisesRegexp(
+ AttributeError, ".*no attribute 'data'"):
self.instance.A.data()
- except:
- self.fail("Expected data() method to pass")
def test_dim(self):
"""Check that a simple set has dimension zero for its indexing"""
@@ -954,19 +959,22 @@ def test_clear(self):
def test_virtual(self):
"""Check if this is not a virtual set"""
- try:
+ # try:
+ # self.instance.A.virtual
+ # except:
+ # pass
+ # else:
+ # self.fail("Set arrays do not have a virtual data element")
+ with self.assertRaisesRegexp(
+ AttributeError, ".*no attribute 'virtual'"):
self.instance.A.virtual
- except:
- pass
- else:
- self.fail("Set arrays do not have a virtual data element")
def test_check_values(self):
"""Check if the values added to this set are valid"""
#
# This should not throw an exception here
#
- self.instance.A.check_values()
+ self.assertTrue( self.instance.A.check_values() )
def test_first(self):
"""Check that we can get the 'first' value in the set"""
@@ -1050,48 +1058,64 @@ def test_contains(self):
def test_or(self):
"""Check that set union works"""
- try:
- self.instance.A | self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("fail test_or")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.A | self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("fail test_or")
+ self.assertEqual(self.instance.A | self.instance.tmpset3,
+ self.instance.A)
def test_and(self):
"""Check that set intersection works"""
- try:
- self.instance.tmp = self.instance.A & self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("fail test_and")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.tmp = self.instance.A & self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("fail test_and")
+ self.assertEqual(self.instance.A & self.instance.tmpset3,
+ EmptySet)
def test_xor(self):
"""Check that set exclusive or works"""
- try:
- self.instance.A ^ self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("fail test_xor")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.A ^ self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("fail test_xor")
+ self.assertEqual(self.instance.A ^ self.instance.tmpset3,
+ self.instance.A)
def test_diff(self):
"""Check that set difference works"""
- try:
- self.instance.A - self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("fail test_diff")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.A - self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("fail test_diff")
+ self.assertEqual(self.instance.A - self.instance.tmpset3,
+ self.instance.A)
def test_mul(self):
"""Check that set cross-product works"""
- try:
- self.instance.A * self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("fail test_mul")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.A * self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("fail test_mul")
+ # Note: cross product with an empty set is an empty set
+ self.assertEqual(self.instance.A * self.instance.tmpset3,
+ [])
def test_override_values(self):
m = ConcreteModel()
@@ -1140,7 +1164,7 @@ def setUp(self):
self.e1=('A1',1)
def test_bounds(self):
- self.assertEqual( self.instance.A['A',1].bounds(), None)
+ self.assertEqual( self.instance.A['A',1].bounds(), (1,7))
def test_getitem(self):
"""Check the access to items"""
@@ -1343,23 +1367,34 @@ def test_bounds(self):
def test_inequality_comparison_fails(self):
x = RealSet()
y = RealSet()
- with self.assertRaises(TypeError):
- x < y
- with self.assertRaises(TypeError):
- x <= y
- with self.assertRaises(TypeError):
- x > y
- with self.assertRaises(TypeError):
- x >= y
+ # In the set rewrite, the following now works!
+ # with self.assertRaises(TypeError):
+ # x < y
+ # with self.assertRaises(TypeError):
+ # x <= y
+ # with self.assertRaises(TypeError):
+ # x > y
+ # with self.assertRaises(TypeError):
+ # x >= y
+ self.assertFalse(x < y)
+ self.assertTrue(x <= y)
+ self.assertFalse(x > y)
+ self.assertTrue(x >= y)
def test_name(self):
x = RealSet()
- self.assertEqual(x.name, None)
- self.assertTrue('RealSet' in str(x))
+ # After the set rewrite, RealSet is implemented on top of the
+ # Reals global set
+ #
+ #self.assertEqual(x.name, None)
+ #self.assertTrue('RealSet' in str(x))
+ self.assertEqual(x.name, 'Reals')
+ self.assertEqual('Reals', str(x))
x = RealSet(name="x")
self.assertEqual(x.name, 'x')
self.assertEqual(str(x), 'x')
+ @unittest.skip("_VirtualSet was removed during the set rewrite")
def test_contains(self):
x = _VirtualSet()
self.assertTrue(None in x)
@@ -1475,8 +1510,7 @@ def test_UnitInterval(self):
def test_RealInterval(self):
x = RealInterval()
- self.assertEqual(x.name,
- "RealInterval(None, None)")
+ self.assertEqual(x.name, "RealInterval(None, None)")
self.assertFalse(None in x)
self.assertTrue(10 in x)
self.assertTrue(1.1 in x)
@@ -1489,8 +1523,7 @@ def test_RealInterval(self):
self.assertTrue(-10 in x)
x = RealInterval(bounds=(-1,1))
- self.assertEqual(x.name,
- "RealInterval(-1, 1)")
+ self.assertEqual(x.name, "RealInterval(-1, 1)")
self.assertFalse(10 in x)
self.assertFalse(1.1 in x)
self.assertTrue(1 in x)
@@ -1524,19 +1557,29 @@ def test_bounds(self):
def test_inequality_comparison_fails(self):
x = RealSet()
y = RealSet()
- with self.assertRaises(TypeError):
- x < y
- with self.assertRaises(TypeError):
- x <= y
- with self.assertRaises(TypeError):
- x > y
- with self.assertRaises(TypeError):
- x >= y
+ # In the set rewrite, the following now works!
+ # with self.assertRaises(TypeError):
+ # x < y
+ # with self.assertRaises(TypeError):
+ # x <= y
+ # with self.assertRaises(TypeError):
+ # x > y
+ # with self.assertRaises(TypeError):
+ # x >= y
+ self.assertFalse( x < y )
+ self.assertTrue( x <= y )
+ self.assertFalse( x > y )
+ self.assertTrue( x >= y )
def test_name(self):
x = IntegerSet()
- self.assertEqual(x.name, None)
- self.assertTrue('IntegerSet' in str(x))
+ # After the set rewrite, RealSet is implemented on top of the
+ # Reals global set
+ #
+ # self.assertEqual(x.name, None)
+ # self.assertTrue('IntegerSet' in str(x))
+ self.assertEqual(x.name, 'Integers')
+ self.assertEqual('Integers', str(x))
x = IntegerSet(name="x")
self.assertEqual(x.name, 'x')
self.assertEqual(str(x), 'x')
@@ -1620,8 +1663,7 @@ def test_NonNegativeIntegers(self):
def test_IntegerInterval(self):
x = IntegerInterval()
self.assertFalse(None in x)
- self.assertEqual(x.name,
- "IntegerInterval(None, None)")
+ self.assertEqual(x.name, "IntegerInterval(None, None)")
self.assertTrue(10 in x)
self.assertFalse(1.1 in x)
self.assertTrue(1 in x)
@@ -1634,8 +1676,7 @@ def test_IntegerInterval(self):
x = IntegerInterval(bounds=(-1,1))
self.assertFalse(None in x)
- self.assertEqual(x.name,
- "IntegerInterval(-1, 1)")
+ self.assertEqual(x.name, "IntegerInterval(-1, 1)")
self.assertFalse(10 in x)
self.assertFalse(1.1 in x)
self.assertTrue(1 in x)
@@ -1668,19 +1709,30 @@ def test_bounds(self):
def test_inequality_comparison_fails(self):
x = RealSet()
y = RealSet()
- with self.assertRaises(TypeError):
- x < y
- with self.assertRaises(TypeError):
- x <= y
- with self.assertRaises(TypeError):
- x > y
- with self.assertRaises(TypeError):
- x >= y
+ # In the set rewrite, the following now works!
+ # with self.assertRaises(TypeError):
+ # x < y
+ # with self.assertRaises(TypeError):
+ # x <= y
+ # with self.assertRaises(TypeError):
+ # x > y
+ # with self.assertRaises(TypeError):
+ # x >= y
+ self.assertFalse(x < y)
+ self.assertTrue(x <= y)
+ self.assertFalse(x > y)
+ self.assertTrue(x >= y)
def test_name(self):
x = BooleanSet()
- self.assertEqual(x.name, None)
- self.assertTrue('BooleanSet' in str(x))
+ # After the set rewrite, BinarySet is implemented on top of the
+ # Binary global set, and BooleanSet and BinarySet are no longer
+ # aliases for each other.
+ #
+ # self.assertEqual(x.name, None)
+ # self.assertTrue('BooleanSet' in str(x))
+ self.assertEqual(x.name, 'Boolean')
+ self.assertEqual('Boolean', str(x))
x = BooleanSet(name="x")
self.assertEqual(x.name, 'x')
self.assertEqual(str(x), 'x')
@@ -1747,7 +1799,7 @@ def setUp(self):
# Create model instance
#
x = _AnySet()
- x.concrete=True
+ #x.concrete=True
self.model.A = x
x.concrete=False
#
@@ -1758,7 +1810,7 @@ def setUp(self):
self.model.tmpset3 = Set(initialize=[2,'3',5,7,9])
y = _AnySet()
- y.concrete=True
+ #y.concrete=True
self.model.setunion = y
y.concrete=False
self.model.setintersection = Set(initialize=[1,'3',5,7])
@@ -1775,7 +1827,8 @@ def setUp(self):
self.e6=6
def test_bounds(self):
- self.assertEqual( self.instance.A.bounds(), None)
+ # In the set rewrite, bounds() always returns a tuple
+ self.assertEqual( self.instance.A.bounds(), (None, None))
def test_contains(self):
"""Various checks for contains() method"""
@@ -1788,25 +1841,26 @@ def test_None1(self):
def test_len(self):
"""Check that the set has the right size"""
- try:
+ # After the set rewrite, this still fails, but with a different
+ # exception:
+ # try:
+ # len(self.instance.A)
+ # except ValueError:
+ # pass
+ # else:
+ # self.fail("test_len failure")
+ with self.assertRaisesRegexp(
+ TypeError, "object of type 'Any' has no len()"):
len(self.instance.A)
- except ValueError:
- pass
- else:
- self.fail("test_len failure")
def test_data(self):
"""Check that we can access the underlying set data"""
- try:
+ with self.assertRaises(AttributeError):
self.instance.A.data()
- except TypeError:
- pass
- else:
- self.fail("test_data failure")
def test_clear(self):
"""Check that the clear() method generates an exception"""
- self.assertRaises(TypeError, self.instance.A.clear)
+ self.assertIsNone(self.instance.A.clear())
def test_virtual(self):
"""Check if this is not a virtual set"""
@@ -1814,15 +1868,18 @@ def test_virtual(self):
def test_discardValid(self):
"""Check that we fail to remove an invalid set element without an exception"""
- self.assertRaises(KeyError, self.instance.A.discard, self.e2)
+ with self.assertRaises(AttributeError):
+ self.instance.A.discard(self.e2)
def test_discardInvalid(self):
"""Check that we fail to remove an invalid set element without an exception"""
- pass
+ with self.assertRaises(AttributeError):
+ self.instance.A.data()
def test_removeValid(self):
"""Check that we can remove a valid set element"""
- self.assertRaises(KeyError, self.instance.A.remove, self.e3)
+ with self.assertRaises(AttributeError):
+ self.instance.A.remove(self.e3)
def test_removeInvalid(self):
pass
@@ -1833,18 +1890,15 @@ def test_addInvalid(self):
def test_addValid(self):
"""Check that we can add valid set elements"""
- self.assertEqual( self.instance.A.domain, None)
- self.assertRaises(TypeError,self.instance.A.add,2)
+ self.assertIs( self.instance.A.domain, Any)
+ with self.assertRaises(AttributeError):
+ self.instance.A.add(2)
def test_iterator(self):
"""Check that we can iterate through the set"""
- try:
+ with self.assertRaises(TypeError):
for val in self.instance.A:
- tmp=val
- except TypeError:
- pass
- else:
- self.fail("test_iterator failure")
+ pass
def test_eq1(self):
"""Various checks for set equality and inequality (1)"""
@@ -1863,96 +1917,95 @@ def test_eq2(self):
def test_le1(self):
"""Various checks for set subset (1)"""
- try:
- self.instance.A < self.instance.tmpset1
- self.instance.A <= self.instance.tmpset1
- self.instance.A > self.instance.tmpset1
- self.instance.A >= self.instance.tmpset1
- self.instance.tmpset1 < self.instance.A
- self.instance.tmpset1 <= self.instance.A
- self.instance.tmpset1 > self.instance.A
- self.instance.tmpset1 >= self.instance.A
- except TypeError:
- pass
- else:
- self.fail("test_le1 failure")
+ self.assertFalse(self.instance.A < self.instance.tmpset1)
+ self.assertFalse(self.instance.A <= self.instance.tmpset1)
+ self.assertTrue(self.instance.A > self.instance.tmpset1)
+ self.assertTrue(self.instance.A >= self.instance.tmpset1)
+ self.assertTrue(self.instance.tmpset1 < self.instance.A)
+ self.assertTrue(self.instance.tmpset1 <= self.instance.A)
+ self.assertFalse(self.instance.tmpset1 > self.instance.A)
+ self.assertFalse(self.instance.tmpset1 >= self.instance.A)
def test_le2(self):
"""Various checks for set subset (2)"""
- try:
- self.instance.A < self.instance.tmpset2
- self.instance.A <= self.instance.tmpset2
- self.instance.A > self.instance.tmpset2
- self.instance.A >= self.instance.tmpset2
- self.instance.tmpset2 < self.instance.A
- self.instance.tmpset2 <= self.instance.A
- self.instance.tmpset2 > self.instance.A
- self.instance.tmpset2 >= self.instance.A
- except TypeError:
- pass
- else:
- self.fail("test_le2 failure")
+ self.assertFalse(self.instance.A < self.instance.tmpset2)
+ self.assertFalse(self.instance.A <= self.instance.tmpset2)
+ self.assertTrue(self.instance.A > self.instance.tmpset2)
+ self.assertTrue(self.instance.A >= self.instance.tmpset2)
+ self.assertTrue(self.instance.tmpset2 < self.instance.A)
+ self.assertTrue(self.instance.tmpset2 <= self.instance.A)
+ self.assertFalse(self.instance.tmpset2 > self.instance.A)
+ self.assertFalse(self.instance.tmpset2 >= self.instance.A)
def test_le3(self):
"""Various checks for set subset (3)"""
- try:
- self.instance.A < self.instance.tmpset3
- self.instance.A <= self.instance.tmpset3
- self.instance.A > self.instance.tmpset3
- self.instance.A >= self.instance.tmpset3
- self.instance.tmpset3 < self.instance.A
- self.instance.tmpset3 <= self.instance.A
- self.instance.tmpset3 > self.instance.A
- self.instance.tmpset3 >= self.instance.A
- except TypeError:
- pass
- else:
- self.fail("test_le3 failure")
+ self.assertFalse(self.instance.A < self.instance.tmpset3)
+ self.assertFalse(self.instance.A <= self.instance.tmpset3)
+ self.assertTrue(self.instance.A > self.instance.tmpset3)
+ self.assertTrue(self.instance.A >= self.instance.tmpset3)
+ self.assertTrue(self.instance.tmpset3 < self.instance.A)
+ self.assertTrue(self.instance.tmpset3 <= self.instance.A)
+ self.assertFalse(self.instance.tmpset3 > self.instance.A)
+ self.assertFalse(self.instance.tmpset3 >= self.instance.A)
def test_or(self):
"""Check that set union works"""
- try:
- self.instance.tmp = self.instance.A | self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("Operator __or__ should have failed.")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.tmp = self.instance.A | self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("Operator __or__ should have failed.")
+ self.assertEqual(self.instance.A | self.instance.tmpset3, Any)
def test_and(self):
"""Check that set intersection works"""
- try:
- self.instance.tmp = self.instance.A & self.instance.tmpset3
- except TypeError:
- pass
- else:
- self.fail("Operator __and__ should have failed.")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.tmp = self.instance.A & self.instance.tmpset3
+ # except TypeError:
+ # pass
+ # else:
+ # self.fail("Operator __and__ should have failed.")
+ self.assertEqual(self.instance.A & self.instance.tmpset3,
+ self.instance.tmpset3)
def test_xor(self):
"""Check that set exclusive or works"""
- try:
- self.tmp = self.instance.A ^ self.instance.tmpset3
- except:
- pass
- else:
- self.fail("Operator __xor__ should have failed.")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.tmp = self.instance.A ^ self.instance.tmpset3
+ # except:
+ # pass
+ # else:
+ # self.fail("Operator __xor__ should have failed.")
+ self.assertEqual(self.instance.A ^ self.instance.tmpset3, Any)
def test_diff(self):
"""Check that set difference works"""
- try:
- self.tmp = self.instance.A - self.instance.tmpset3
- except:
- pass
- else:
- self.fail("Operator __diff__ should have failed.")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.tmp = self.instance.A - self.instance.tmpset3
+ # except:
+ # pass
+ # else:
+ # self.fail("Operator __diff__ should have failed.")
+ self.assertEqual(self.instance.A - self.instance.tmpset3, Any)
def test_mul(self):
"""Check that set cross-product works"""
- try:
- self.instance.tmp = self.instance.A * self.instance.tmpset3
- except:
- pass
- else:
- self.fail("Operator __mul__ should have failed.")
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance.tmp = self.instance.A * self.instance.tmpset3
+ # except:
+ # pass
+ # else:
+ # self.fail("Operator __mul__ should have failed.")
+ x = self.instance.A * self.instance.tmpset3
+ self.assertIsNone(x.dimen)
+ self.assertEqual(list(x.subsets()),
+ [self.instance.A, self.instance.tmpset3])
class TestSetArgs1(PyomoModel):
@@ -1971,21 +2024,27 @@ def tearDown(self):
os.remove(currdir+"setA.dat")
PyomoModel.tearDown(self)
- def test_initialize1(self):
+ def test_initialize1_list(self):
self.model.A = Set(initialize=[1,2,3,'A'])
self.instance = self.model.create_instance()
self.assertEqual(len(self.instance.A),4)
- def test_initialize2(self):
+ def test_initialize2_listcomp(self):
self.model.A = Set(initialize=[(i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0])
self.instance = self.model.create_instance()
self.assertEqual(len(self.instance.A),4)
- def test_initialize3(self):
- self.model.A = Set(initialize=((i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0))
+ def test_initialize3_generator(self):
+ self.model.A = Set(initialize=lambda m: (
+ (i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0))
self.instance = self.model.create_instance()
self.assertEqual(len(self.instance.A),4)
+ m = ConcreteModel()
+ m.A = Set(initialize=(
+ (i,j) for i in range(0,3) for j in range(1,4) if (i+j)%2 == 0))
+ self.assertEqual(len(m.A),4)
+
def test_initialize4(self):
self.model.A = Set(initialize=range(0,4))
def B_index(model):
@@ -2058,11 +2117,20 @@ def B_init(model, i, ii, iii, j):
return range(i,2+i)
return []
self.model.B = Set(B_index, [True,False], initialize=B_init)
- try:
- self.instance = self.model.create_instance()
- self.fail("Expected ValueError because B_index returns a tuple")
- except ValueError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # self.instance = self.model.create_instance()
+ # self.fail("Expected ValueError because B_index returns a tuple")
+ # except ValueError:
+ # pass
+ instance = self.model.create_instance()
+ self.assertEquals(len(instance.B), 6)
+ self.assertEquals(instance.B[0,1,0,False], [])
+ self.assertEquals(instance.B[0,1,0,True], [0,1])
+ self.assertEquals(instance.B[1,2,1,False], [])
+ self.assertEquals(instance.B[1,2,1,True], [1,2])
+ self.assertEquals(instance.B[2,3,4,False], [])
+ self.assertEquals(instance.B[2,3,4,True], [2,3])
def test_initialize9(self):
self.model.A = Set(initialize=range(0,3))
@@ -2334,7 +2402,7 @@ def test_initialize(self):
#
self.model.Z = Set()
self.model.A = Set(self.model.Z, initialize={'A':[1,2,3,'A']})
- self.instance = self.model.create_instance()
+ self.instance = self.model.create_instance(currdir+'setA.dat')
self.assertEqual(len(self.instance.A['A']),4)
def test_dimen(self):
@@ -2577,8 +2645,8 @@ def test_virtual_cross_set(self):
self.model.C.virtual = True
self.instance = self.model.create_instance()
self.assertEqual(len(self.instance.C),9)
- if not self.instance.C.value is None:
- self.assertEqual(len(self.instance.C.value),0)
+ if self.instance.C.value is not None:
+ self.assertEqual(len(self.instance.C.value),9)
tmp=[]
for item in self.instance.C:
tmp.append(item)
@@ -2594,15 +2662,17 @@ def test_pprint_mixed(self):
m.A = Set(m.Z, initialize={'A':[1,2,3,'A']})
buf = StringIO()
m.pprint(ostream=buf)
- self.assertEqual("""2 Set Declarations
- A : Dim=1, Dimen=1, Size=4, Domain=None, ArraySize=1, Ordered=False, Bounds=None
- Key : Members
- A : [1, 2, 3, 'A']
- Z : Dim=0, Dimen=1, Size=2, Domain=None, Ordered=False, Bounds=None
- ['A', 'C']
+ ref="""2 Set Declarations
+ A : Size=1, Index=Z, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ A : 1 : Any : 4 : {1, 2, 3, 'A'}
+ Z : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 2 : {'A', 'C'}
2 Declarations: Z A
-""", buf.getvalue())
+"""
+ self.assertEqual(ref, buf.getvalue())
def test_initialize_and_clone_from_dict_keys(self):
# In Python3, initializing a dictionary from keys() returns a
@@ -2613,8 +2683,9 @@ def test_initialize_and_clone_from_dict_keys(self):
# an easy way to ensure that this simple model is cleanly
# clonable.
ref = """1 Set Declarations
- INDEX : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 5)
- [1, 3, 5]
+ INDEX : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 3, 5}
1 Param Declarations
p : Size=3, Index=INDEX, Domain=Any, Default=None, Mutable=False
@@ -2636,11 +2707,16 @@ def test_initialize_and_clone_from_dict_keys(self):
m.pprint(ostream=buf)
self.assertEqual(ref, buf.getvalue())
#
- m2 = copy.deepcopy(m)
+ m2 = m.clone()
buf = StringIO()
m2.pprint(ostream=buf)
self.assertEqual(ref, buf.getvalue())
#
+ m3 = copy.deepcopy(m)
+ buf = StringIO()
+ m3.pprint(ostream=buf)
+ self.assertEqual(ref, buf.getvalue())
+ #
# six.iterkeys()
#
m = ConcreteModel()
@@ -2651,10 +2727,15 @@ def test_initialize_and_clone_from_dict_keys(self):
m.pprint(ostream=buf)
self.assertEqual(ref, buf.getvalue())
#
- m2 = copy.deepcopy(m)
+ m2 = m.clone()
buf = StringIO()
m2.pprint(ostream=buf)
self.assertEqual(ref, buf.getvalue())
+ #
+ m3 = copy.deepcopy(m)
+ buf = StringIO()
+ m3.pprint(ostream=buf)
+ self.assertEqual(ref, buf.getvalue())
class TestSetIO(PyomoModel):
@@ -2694,7 +2775,7 @@ def test_io3(self):
OUTPUT.write("data;\n")
OUTPUT.write("set A := A1 A2 A3;\n")
OUTPUT.write("set B := 1 2 3 4;\n")
- OUTPUT.write("set C := (A1,1) (A2,2) (A3,3);\n")
+ #OUTPUT.write("set C := (A1,1) (A2,2) (A3,3);\n")
OUTPUT.write("end;\n")
OUTPUT.close()
self.model.A = Set()
@@ -2703,6 +2784,21 @@ def test_io3(self):
self.instance = self.model.create_instance(currdir+"setA.dat")
self.assertEqual( len(self.instance.C), 12)
+ def test_io3a(self):
+ OUTPUT=open(currdir+"setA.dat","w")
+ OUTPUT.write("data;\n")
+ OUTPUT.write("set A := A1 A2 A3;\n")
+ OUTPUT.write("set B := 1 2 3 4;\n")
+ OUTPUT.write("set C := (A1,1) (A2,2) (A3,3);\n")
+ OUTPUT.write("end;\n")
+ OUTPUT.close()
+ self.model.A = Set()
+ self.model.B = Set()
+ self.model.C = self.model.A * self.model.B
+ with self.assertRaisesRegexp(
+ ValueError, "SetOperator C with incompatible data"):
+ self.instance = self.model.create_instance(currdir+"setA.dat")
+
def test_io4(self):
OUTPUT=open(currdir+"setA.dat","w")
OUTPUT.write("data;\n")
@@ -2813,15 +2909,6 @@ def test_io10(self):
self.assertEqual( len(self.instance.F['A1 x']), 3)
-def init_fn(model):
- return []
-
-def tmp_constructor(model, ctr, index):
- if ctr == 10:
- return None
- else:
- return ctr
-
class TestSetErrors(PyomoModel):
def test_membership(self):
@@ -2865,17 +2952,19 @@ def test_numpy_membership(self):
self.assertEqual( numpy.int_(0) in Integers, True)
self.assertEqual( numpy.int_(1) in Integers, True)
- # Numpy.bool_ is NOT a numeric type
- self.assertEqual( numpy.bool_(True) in Integers, False)
- self.assertEqual( numpy.bool_(False) in Integers, False)
+ # Numpy.bool_(True) is NOT a numeric type, but it behaves
+ # identically to 1
+ self.assertEqual( numpy.bool_(True) in Integers, True)
+ self.assertEqual( numpy.bool_(False) in Integers, True)
self.assertEqual( numpy.float_(1.1) in Integers, False)
self.assertEqual( numpy.int_(2) in Integers, True)
self.assertEqual( numpy.int_(0) in Reals, True)
self.assertEqual( numpy.int_(1) in Reals, True)
- # Numpy.bool_ is NOT a numeric type
- self.assertEqual( numpy.bool_(True) in Reals, False)
- self.assertEqual( numpy.bool_(False) in Reals, False)
+ # Numpy.bool_(True) is NOT a numeric type, but it behaves
+ # identically to 1
+ self.assertEqual( numpy.bool_(True) in Reals, True)
+ self.assertEqual( numpy.bool_(False) in Reals, True)
self.assertEqual( numpy.float_(1.1) in Reals, True)
self.assertEqual( numpy.int_(2) in Reals, True)
@@ -2895,25 +2984,37 @@ def test_setargs1(self):
pass
def test_setargs2(self):
- try:
- a=Set()
- b=Set(a)
- c=Set(within=b, dimen=2)
- self.fail("test_setargs1 - expected error because of bad argument")
- except ValueError:
- pass
+ # After the set rewrite, the following error doesn't manifest
+ # itself until construction time
+ # try:
+ # a=Set()
+ # b=Set(a)
+ # c=Set(within=b, dimen=2)
+ # self.fail("test_setargs1 - expected error because of bad argument")
+ # except ValueError:
+ # pass
a=Set()
b=Set(a)
+ with self.assertRaisesRegexp(
+ TypeError, "Cannot apply a Set operator to an indexed"):
+ c=Set(within=b, dimen=2)
+ c.construct()
+
+ a=Set()
+ b=Set()
c=Set(within=b, dimen=1)
+ c.construct()
self.assertEqual(c.domain,b)
- c.domain = a
- self.assertEqual(c.domain,a)
+ # After the set rewrite, we disallow setting the domain after
+ # declaration
+ #c.domain = a
+ #self.assertEqual(c.domain,a)
def test_setargs3(self):
model = ConcreteModel()
- model.a=Set(dimen=1, initialize=(1,2))
+ model.a=Set(dimen=1, initialize=(1,2,3))
try:
- model.b=Set(dimen=2, initialize=(1,2))
+ model.b=Set(dimen=2, initialize=(1,2,3))
self.fail("test_setargs3 - expected error because dimen does not match set values")
except ValueError:
pass
@@ -2938,6 +3039,7 @@ def test_setargs5(self):
model.Y = RangeSet(model.C)
model.X = Param(model.C, default=0.0)
+ @unittest.skip("_verify was removed during the set rewrite")
def test_verify(self):
a=Set(initialize=[1,2,3])
b=Set(within=a)
@@ -2962,22 +3064,40 @@ def test_verify(self):
pass
def test_construct(self):
- a = Set(initialize={})
- try:
+ a = Set(initialize={1:2,3:4})
+ # After the set rewrite, this still fails, but with a different
+ # exception:
+ # try:
+ # a.construct()
+ # self.fail("test_construct - expected failure constructing with a dictionary")
+ # except ValueError:
+ # pass
+ with self.assertRaisesRegexp(
+ KeyError, "Cannot treat the scalar component '[^']*' "
+ "as an indexed component"):
a.construct()
- self.fail("test_construct - expected failure constructing with a dictionary")
- except ValueError:
- pass
+
+ # After the set rewrite, empty dictionaries are acceptable
+ a = Set(initialize={})
+ a.construct()
+ self.assertEqual(a, EmptySet)
#
+ def init_fn(model):
+ return []
+ # After the set rewrite, model()==None is acceptable
a = Set(initialize=init_fn)
- try:
- a.construct()
- self.fail("test_construct - expected exception due to None model")
- except ValueError:
- pass
+ # try:
+ # a.construct()
+ # self.fail("test_construct - expected exception due to None model")
+ # except ValueError:
+ # pass
+ a.construct()
+ self.assertEqual(a, EmptySet)
+
def test_add(self):
a=Set()
+ a.construct()
a.add(1)
a.add("a")
try:
@@ -2987,19 +3107,30 @@ def test_add(self):
pass
def test_getitem(self):
- a=Set(initialize=[1,2])
- try:
+ a=Set(initialize=[2,3])
+ # With the set rewrite, sets are ordered by default
+ # try:
+ # a[0]
+ # self.fail("test_getitem - cannot index an unordered set")
+ # except ValueError:
+ # pass
+ # except IndexError:
+ # pass
+ with self.assertRaisesRegexp(
+ RuntimeError, ".*before it has been constructed"):
a[0]
- self.fail("test_getitem - cannot index an unordered set")
- except ValueError:
- pass
- except IndexError:
- pass
+ a.construct()
+ with self.assertRaisesRegexp(
+ IndexError, "Pyomo Sets are 1-indexed"):
+ a[0]
+ self.assertEqual(a[1], 2)
+
def test_eq(self):
a=Set(dimen=1,name="a",initialize=[1,2])
a.construct()
b=Set(dimen=2)
+ b.construct()
self.assertEqual(a==b,False)
self.assertTrue(not a.__eq__(Boolean))
self.assertTrue(not Boolean == a)
@@ -3008,6 +3139,7 @@ def test_neq(self):
a=Set(dimen=1,initialize=[1,2])
a.construct()
b=Set(dimen=2)
+ b.construct()
self.assertEqual(a!=b,True)
self.assertTrue(a.__ne__(Boolean))
self.assertTrue(Boolean != a)
@@ -3023,6 +3155,7 @@ def test_contains(self):
self.assertEqual(1 in NonNegativeIntegers, True)
def test_subset(self):
+ # In the set rewrite, the following now works!
#try:
# Integers in Reals
# self.fail("test_subset - expected TypeError")
@@ -3033,25 +3166,35 @@ def test_subset(self):
# self.fail("test_subset - expected TypeError")
#except TypeError:
# pass
- try:
- a=Set(dimen=1)
- b=Set(dimen=2)
- a in b
- self.fail("test_subset - expected ValueError")
- except ValueError:
- pass
+ self.assertTrue(Integers.issubset(Reals))
+ # Prior to the set rewrite, SetOperators (like issubset) between
+ # sets with differing dimentionality generated an error.
+ # Because of vagueness around the concept of the UnknownSetDimen
+ # and dimen=None, we no longer generate those errors. This
+ # means that two empty sets (a and b) with differing
+ # dimensionalities can be subsets of each other.
+ # try:
+ # a=Set(dimen=1)
+ # b=Set(dimen=2)
+ # a in b
+ # self.fail("test_subset - expected ValueError")
+ # except ValueError:
+ # pass
def test_superset(self):
- try:
- Reals >= Integers
- self.fail("test_subset - expected TypeError")
- except TypeError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # Reals >= Integers
+ # self.fail("test_subset - expected TypeError")
+ # except TypeError:
+ # pass
#try:
# Integers.issubset(Reals)
# self.fail("test_subset - expected TypeError")
#except TypeError:
# pass
+ self.assertTrue(Reals > Integers)
+ self.assertTrue(Integers.issubset(Reals))
a=Set(initialize=[1,3,5,7])
a.construct()
b=Set(initialize=[1,3])
@@ -3062,11 +3205,14 @@ def test_superset(self):
self.assertEqual(a >= b, True)
def test_lt(self):
- try:
- Integers < Reals
- self.fail("test_subset - expected TypeError")
- except TypeError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # Integers < Reals
+ # self.fail("test_subset - expected TypeError")
+ # except TypeError:
+ # pass
+ self.assertTrue(Integers < Reals)
+
a=Set(initialize=[1,3,5,7])
a.construct()
a < Reals
@@ -3076,117 +3222,162 @@ def test_lt(self):
self.assertEqual(bc
- self.fail("test_subset - expected ValueError")
- except ValueError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # a>c
+ # self.fail("test_subset - expected ValueError")
+ # except ValueError:
+ # pass
+ self.assertFalse(a > c)
def test_or(self):
a=Set(initialize=[1,2,3])
c=Set(initialize=[(1,2)])
+ a.construct()
c.construct()
- try:
- Reals | Integers
- self.fail("test_or - expected TypeError")
- except TypeError:
- pass
- try:
- a | Integers
- self.fail("test_or - expected TypeError")
- except TypeError:
- pass
- try:
- a | c
- self.fail("test_or - expected ValueError")
- except ValueError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # Reals | Integers
+ # self.fail("test_or - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a | Integers
+ # self.fail("test_or - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a | c
+ # self.fail("test_or - expected ValueError")
+ # except ValueError:
+ # pass
+ self.assertEqual(Reals | Integers, Reals)
+ self.assertEqual(a | Integers, Integers)
+ self.assertEqual(a | c, [1,2,3,(1,2)])
def test_and(self):
a=Set(initialize=[1,2,3])
c=Set(initialize=[(1,2)])
+ a.construct()
c.construct()
- try:
- Reals & Integers
- self.fail("test_and - expected TypeError")
- except TypeError:
- pass
- try:
- a & Integers
- self.fail("test_and - expected TypeError")
- except TypeError:
- pass
- try:
- a & c
- self.fail("test_and - expected ValueError")
- except ValueError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # Reals & Integers
+ # self.fail("test_and - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a & Integers
+ # self.fail("test_and - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a & c
+ # self.fail("test_and - expected ValueError")
+ # except ValueError:
+ # pass
+ self.assertEqual(Reals & Integers, Integers)
+ self.assertEqual(a & Integers, a)
+ self.assertEqual(a & c, EmptySet)
def test_xor(self):
a=Set(initialize=[1,2,3])
+ a.construct()
c=Set(initialize=[(1,2)])
c.construct()
- try:
- Reals ^ Integers
- self.fail("test_xor - expected TypeError")
- except TypeError:
- pass
- try:
- a ^ Integers
- self.fail("test_xor - expected TypeError")
- except TypeError:
- pass
- try:
- a ^ c
- self.fail("test_xor - expected ValueError")
- except ValueError:
- pass
+ # In the set rewrite, the following "mostly works"
+ # try:
+ # Reals ^ Integers
+ # self.fail("test_xor - expected TypeError")
+ # except TypeError:
+ # pass
+ X = Reals ^ Integers
+ self.assertIn(0.5, X)
+ self.assertNotIn(1, X)
+ with self.assertRaisesRegexp(
+ RangeDifferenceError, "We do not support subtracting an "
+ "infinite discrete range \[0:None\] from an infinite "
+ "continuous range \[None..None\]"):
+ X < Reals
+ # In the set rewrite, the following now works!
+ # try:
+ # a ^ Integers
+ # self.fail("test_xor - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a ^ c
+ # self.fail("test_xor - expected ValueError")
+ # except ValueError:
+ # pass
+ self.assertEqual(a ^ Integers, Integers - a)
+ self.assertEqual(a ^ c, SetOf([1,2,3,(1,2)]))
def test_sub(self):
a=Set(initialize=[1,2,3])
+ a.construct()
c=Set(initialize=[(1,2)])
c.construct()
- try:
- Reals - Integers
- self.fail("test_sub - expected TypeError")
- except TypeError:
- pass
- try:
- a - Integers
- self.fail("test_sub - expected TypeError")
- except TypeError:
- pass
- try:
- a - c
- self.fail("test_sub - expected ValueError")
- except ValueError:
- pass
+ # In the set rewrite, the following "mostly works"
+ # try:
+ # Reals - Integers
+ # self.fail("test_sub - expected TypeError")
+ # except TypeError:
+ # pass
+ X = Reals - Integers
+ self.assertIn(0.5, X)
+ self.assertNotIn(1, X)
+ with self.assertRaisesRegexp(
+ RangeDifferenceError, "We do not support subtracting an "
+ "infinite discrete range \[0:None\] from an infinite "
+ "continuous range \[None..None\]"):
+ X < Reals
+ # In the set rewrite, the following now works!
+ # try:
+ # a - Integers
+ # self.fail("test_sub - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a - c
+ # self.fail("test_sub - expected ValueError")
+ # except ValueError:
+ # pass
+ self.assertEqual(a - Integers, EmptySet)
+ self.assertEqual(a - c, a)
def test_mul(self):
a=Set(initialize=[1,2,3])
c=Set(initialize=[(1,2)])
+ a.construct()
c.construct()
- try:
- Reals * Integers
- self.fail("test_mul - expected TypeError")
- except TypeError:
- pass
- try:
- a * Integers
- self.fail("test_mul - expected TypeError")
- except TypeError:
- pass
+ # In the set rewrite, the following now works!
+ # try:
+ # Reals * Integers
+ # self.fail("test_mul - expected TypeError")
+ # except TypeError:
+ # pass
+ # try:
+ # a * Integers
+ # self.fail("test_mul - expected TypeError")
+ # except TypeError:
+ # pass
+ self.assertEqual((Reals * Integers).dimen, 2)
+ self.assertEqual((a * Integers).dimen, 2)
+
try:
a * 1
self.fail("test_mul - expected TypeError")
@@ -3195,6 +3386,12 @@ def test_mul(self):
b = a * c
def test_arrayset_construct(self):
+ def tmp_constructor(model, ctr, index):
+ if ctr == 10:
+ return Set.End
+ else:
+ return ctr
+
a=Set(initialize=[1,2,3])
a.construct()
b=Set(a, initialize=tmp_constructor)
@@ -3204,19 +3401,31 @@ def test_arrayset_construct(self):
except KeyError:
pass
b._constructed=False
- try:
- b.construct()
- self.fail("test_arrayset_construct - expected ValueError")
- except ValueError:
- pass
- b=Set(a,a, initialize=tmp_constructor)
+ # In the set rewrite, the following now works!
+ # try:
+ # b.construct()
+ # self.fail("test_arrayset_construct - expected ValueError")
+ # except ValueError:
+ # pass
+ b.construct()
+ self.assertEqual(len(b), 3)
for i in b:
self.assertEqual(i in a, True)
- try:
+ self.assertEqual(b[1], [1,2,3,4,5,6,7,8,9])
+ self.assertEqual(b[2], [1,2,3,4,5,6,7,8,9])
+ self.assertEqual(b[3], [1,2,3,4,5,6,7,8,9])
+
+ b=Set(a,a, initialize=tmp_constructor)
+ # In the set rewrite, the following still fails, but with a
+ # different exception:
+ # try:
+ # b.construct()
+ # self.fail("test_arrayset_construct - expected ValueError")
+ # except ValueError:
+ # pass
+ with self.assertRaisesRegexp(
+ TypeError, "'int' object is not iterable"):
b.construct()
- self.fail("test_arrayset_construct - expected ValueError")
- except ValueError:
- pass
def test_prodset(self):
a=Set(initialize=[1,2])
@@ -3226,17 +3435,21 @@ def test_prodset(self):
c=a*b
c.construct()
self.assertEqual((6,2) in c, False)
- c=pyomo.core.base.sets._SetProduct(a,b)
+ c=pyomo.core.base.set.SetProduct(a,b)
c.virtual=True
self.assertEqual((6,2) in c, False)
self.assertEqual((1,7) in c, True)
- #c=pyomo.core.base.sets._SetProduct()
+ #c=pyomo.core.base.set.SetProduct()
#c.virtual=True
#c.construct()
- c=pyomo.core.base.sets._SetProduct(a,b,initialize={(1,7):None,(2,6):None})
- c.construct()
- c=pyomo.core.base.sets._SetProduct(a,b,initialize=(1,7))
- c.construct()
+
+ # the set rewrite removed ALL support for 'initialize=' in
+ # SetOperators (without deprecation). This "feature" is vaguely
+ # defined and not documented.
+ # c=pyomo.core.base.set.SetProduct(a,b,initialize={(1,7):None,(2,6):None})
+ # c.construct()
+ # c=pyomo.core.base.set.SetProduct(a,b,initialize=(1,7))
+ # c.construct()
def virt_constructor(model, y):
@@ -3271,15 +3484,15 @@ def test_union(self):
union = s1 | s2 | s3 | s3 | s2
self.assertTrue(isinstance(inst.union1,
- pyomo.core.base.sets._SetUnion))
+ pyomo.core.base.set.SetUnion))
self.assertEqual(inst.union1,
(s1 | (s2 | (s3 | (s3 | s2)))))
self.assertTrue(isinstance(inst.union2,
- pyomo.core.base.sets._SetUnion))
+ pyomo.core.base.set.SetUnion))
self.assertEqual(inst.union2,
s1 | (s2 | (s3 | (s3 | s2))))
self.assertTrue(isinstance(inst.union3,
- pyomo.core.base.sets._SetUnion))
+ pyomo.core.base.set.SetUnion))
self.assertEqual(inst.union3,
((((s1 | s2) | s3) | s3) | s2))
@@ -3305,19 +3518,19 @@ def test_intersection(self):
inst = model.create_instance()
self.assertTrue(isinstance(inst.intersection1,
- pyomo.core.base.sets._SetIntersection))
+ pyomo.core.base.set.SetIntersection))
self.assertEqual(sorted(inst.intersection1),
sorted((s1 & (s2 & (s3 & (s3 & s2))))))
self.assertTrue(isinstance(inst.intersection2,
- pyomo.core.base.sets._SetIntersection))
+ pyomo.core.base.set.SetIntersection))
self.assertEqual(sorted(inst.intersection2),
sorted(s1 & (s2 & (s3 & (s3 & s2)))))
self.assertTrue(isinstance(inst.intersection3,
- pyomo.core.base.sets._SetIntersection))
+ pyomo.core.base.set.SetIntersection))
self.assertEqual(sorted(inst.intersection3),
sorted(((((s1 & s2) & s3) & s3) & s2)))
self.assertTrue(isinstance(inst.intersection4,
- pyomo.core.base.sets._SetIntersection))
+ pyomo.core.base.set.SetIntersection))
self.assertEqual(sorted(inst.intersection4),
sorted(s3 & s1 & s3))
@@ -3341,15 +3554,15 @@ def test_difference(self):
inst = model.create_instance()
self.assertTrue(isinstance(inst.difference1,
- pyomo.core.base.sets._SetDifference))
+ pyomo.core.base.set.SetDifference))
self.assertEqual(sorted(inst.difference1),
sorted((s1 - (s2 - (s3 - (s3 - s2))))))
self.assertTrue(isinstance(inst.difference2,
- pyomo.core.base.sets._SetDifference))
+ pyomo.core.base.set.SetDifference))
self.assertEqual(sorted(inst.difference2),
sorted(s1 - (s2 - (s3 - (s3 - s2)))))
self.assertTrue(isinstance(inst.difference3,
- pyomo.core.base.sets._SetDifference))
+ pyomo.core.base.set.SetDifference))
self.assertEqual(sorted(inst.difference3),
sorted(((((s1 - s2) - s3) - s3) - s2)))
@@ -3375,19 +3588,19 @@ def test_symmetric_difference(self):
inst = model.create_instance()
self.assertTrue(isinstance(inst.symdiff1,
- pyomo.core.base.sets._SetSymmetricDifference))
+ pyomo.core.base.set.SetSymmetricDifference))
self.assertEqual(sorted(inst.symdiff1),
sorted((s1 ^ (s2 ^ (s3 ^ (s3 ^ s2))))))
self.assertTrue(isinstance(inst.symdiff2,
- pyomo.core.base.sets._SetSymmetricDifference))
+ pyomo.core.base.set.SetSymmetricDifference))
self.assertEqual(sorted(inst.symdiff2),
sorted(s1 ^ (s2 ^ (s3 ^ (s3 ^ s2)))))
self.assertTrue(isinstance(inst.symdiff3,
- pyomo.core.base.sets._SetSymmetricDifference))
+ pyomo.core.base.set.SetSymmetricDifference))
self.assertEqual(sorted(inst.symdiff3),
sorted(((((s1 ^ s2) ^ s3) ^ s3) ^ s2)))
self.assertTrue(isinstance(inst.symdiff4,
- pyomo.core.base.sets._SetSymmetricDifference))
+ pyomo.core.base.set.SetSymmetricDifference))
self.assertEqual(sorted(inst.symdiff4),
sorted(s1 ^ s2 ^ s3))
@@ -3413,19 +3626,19 @@ def test_product(self):
p = itertools.product
self.assertTrue(isinstance(inst.product1,
- pyomo.core.base.sets._SetProduct))
+ pyomo.core.base.set.SetProduct))
prod1 = set([pyutilib_misc_flatten_tuple(i) \
for i in set( p(s1,p(s2,p(s3,p(s3,s2)))) )])
self.assertEqual(sorted(inst.product1),
sorted(prod1))
self.assertTrue(isinstance(inst.product2,
- pyomo.core.base.sets._SetProduct))
+ pyomo.core.base.set.SetProduct))
prod2 = set([pyutilib_misc_flatten_tuple(i) \
for i in set( p(s1,p(s2,p(s3,p(s3,s2)))) )])
self.assertEqual(sorted(inst.product2),
sorted(prod2))
self.assertTrue(isinstance(inst.product3,
- pyomo.core.base.sets._SetProduct))
+ pyomo.core.base.set.SetProduct))
prod3 = set([pyutilib_misc_flatten_tuple(i) \
for i in set( p(p(p(p(s1,s2),s3),s3),s2) )])
self.assertEqual(sorted(inst.product3),
diff --git a/pyomo/core/tests/unit/test_sos.py b/pyomo/core/tests/unit/test_sos.py
index 2e7e8b96347..3a1d88b2f9c 100644
--- a/pyomo/core/tests/unit/test_sos.py
+++ b/pyomo/core/tests/unit/test_sos.py
@@ -59,7 +59,7 @@ def test_negative_weights(self):
def test_ordered(self):
M = ConcreteModel()
- M.v = Var([1,2,3])
+ M.v = Var({1,2,3})
try:
M.c = SOSConstraint(var=M.v, sos=2)
self.fail("Expected ValueError")
diff --git a/pyomo/core/tests/unit/test_template_expr.py b/pyomo/core/tests/unit/test_template_expr.py
index b31939bf95e..9d6d818ec5e 100644
--- a/pyomo/core/tests/unit/test_template_expr.py
+++ b/pyomo/core/tests/unit/test_template_expr.py
@@ -2,8 +2,8 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
@@ -11,19 +11,24 @@
import pyutilib.th as unittest
-from pyomo.environ import ConcreteModel, RangeSet, Param, Var, Set, value
+from pyomo.environ import (
+ ConcreteModel, AbstractModel, RangeSet, Param, Var, Set, value,
+)
import pyomo.core.expr.current as EXPR
-from pyomo.core.base.template_expr import (
- IndexTemplate,
+from pyomo.core.expr.template_expr import (
+ IndexTemplate,
+ TemplateExpressionError,
_GetItemIndexer,
- substitute_template_expression,
+ resolve_template,
+ templatize_constraint,
+ substitute_template_expression,
substitute_getitem_with_param,
substitute_template_with_value,
)
import six
-class ExpressionObjectTester(object):
+class TestTemplateExpressions(unittest.TestCase):
def setUp(self):
self.m = m = ConcreteModel()
m.I = RangeSet(1,9)
@@ -33,150 +38,211 @@ def setUp(self):
m.p = Param(m.I, m.J, initialize=lambda m,i,j: 100*i+j)
m.s = Set(m.I, initialize=lambda m,i:range(i))
+ def test_nonTemplates(self):
+ m = self.m
+ self.assertIs(resolve_template(m.x[1]), m.x[1])
+ e = m.x[1] + m.x[2]
+ self.assertIs(resolve_template(e), e)
+
+ def test_IndexTemplate(self):
+ m = self.m
+ i = IndexTemplate(m.I)
+ with self.assertRaisesRegex(
+ TemplateExpressionError,
+ "Evaluating uninitialized IndexTemplate"):
+ value(i)
+
+ self.assertEqual(str(i), "{I}")
+
+ i.set_value(5)
+ self.assertEqual(value(i), 5)
+ self.assertIs(resolve_template(i), 5)
+
def test_template_scalar(self):
m = self.m
t = IndexTemplate(m.I)
e = m.x[t]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(tuple(e.args), (t,))
+ self.assertEqual(e.args, (m.x, t))
self.assertFalse(e.is_constant())
self.assertFalse(e.is_fixed())
self.assertEqual(e.polynomial_degree(), 1)
+ self.assertEqual(str(e), "x[{I}]")
t.set_value(5)
- self.assertEqual(e(), 6)
- self.assertIs(e.resolve_template(), m.x[5])
- t.set_value(None)
+ v = e()
+ self.assertIn(type(v), (int, float))
+ self.assertEqual(v, 6)
+ self.assertIs(resolve_template(e), m.x[5])
+ t.set_value()
e = m.p[t,10]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.p)
- self.assertEqual(tuple(e.args), (t,10))
+ self.assertEqual(e.args, (m.p,t,10))
self.assertFalse(e.is_constant())
self.assertTrue(e.is_fixed())
self.assertEqual(e.polynomial_degree(), 0)
+ self.assertEqual(str(e), "p[{I},10]")
t.set_value(5)
- self.assertEqual(e(), 510)
- self.assertIs(e.resolve_template(), m.p[5,10])
- t.set_value(None)
+ v = e()
+ self.assertIn(type(v), (int, float))
+ self.assertEqual(v, 510)
+ self.assertIs(resolve_template(e), m.p[5,10])
+ t.set_value()
e = m.p[5,t]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.p)
- self.assertEqual(tuple(e.args), (5,t))
+ self.assertEqual(e.args, (m.p,5,t))
self.assertFalse(e.is_constant())
self.assertTrue(e.is_fixed())
self.assertEqual(e.polynomial_degree(), 0)
+ self.assertEqual(str(e), "p[5,{I}]")
t.set_value(10)
- self.assertEqual(e(), 510)
- self.assertIs(e.resolve_template(), m.p[5,10])
- t.set_value(None)
+ v = e()
+ self.assertIn(type(v), (int, float))
+ self.assertEqual(v, 510)
+ self.assertIs(resolve_template(e), m.p[5,10])
+ t.set_value()
- # TODO: Fixing this test requires fixing Set
- def _test_template_scalar_with_set(self):
+ def test_template_scalar_with_set(self):
m = self.m
t = IndexTemplate(m.I)
e = m.s[t]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.s)
- self.assertEqual(tuple(e.args), (t,))
+ self.assertEqual(e.args, (m.s,t))
self.assertFalse(e.is_constant())
self.assertTrue(e.is_fixed())
self.assertEqual(e.polynomial_degree(), 0)
+ self.assertEqual(str(e), "s[{I}]")
t.set_value(5)
- self.assertRaises(TypeError, e)
- self.assertIs(e.resolve_template(), m.s[5])
- t.set_value(None)
+ v = e()
+ self.assertIs(v, m.s[5])
+ self.assertIs(resolve_template(e), m.s[5])
+ t.set_value()
def test_template_operation(self):
m = self.m
t = IndexTemplate(m.I)
e = m.x[t+m.P[5]]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(e.arg(0).arg(1), m.P[5])
-
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(e.arg(1).arg(1), m.P[5])
+ self.assertEqual(str(e), "x[{I} + P[5]]")
def test_nested_template_operation(self):
m = self.m
t = IndexTemplate(m.I)
e = m.x[t+m.P[t+1]]
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
-
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
+ self.assertEqual(str(e), "x[{I} + P[{I} + 1]]")
+
+ def test_block_templates(self):
+ m = ConcreteModel()
+ m.T = RangeSet(3)
+ @m.Block(m.T)
+ def b(b, i):
+ b.x = Var(initialize=i)
+
+ @b.Block(m.T)
+ def bb(bb, j):
+ bb.I =RangeSet(i*j)
+ bb.y = Var(bb.I, initialize=lambda m,i:i)
+ t = IndexTemplate(m.T)
+ e = m.b[t].x
+ self.assertIs(type(e), EXPR.GetAttrExpression)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(type(e.arg(0)), EXPR.GetItemExpression)
+ self.assertIs(e.arg(0).arg(0), m.b)
+ self.assertEqual(e.arg(0).nargs(), 2)
+ self.assertIs(e.arg(0).arg(1), t)
+ self.assertEqual(str(e), "b[{T}].x")
+ t.set_value(2)
+ v = e()
+ self.assertIn(type(v), (int, float))
+ self.assertEqual(v, 2)
+ self.assertIs(resolve_template(e), m.b[2].x)
+ t.set_value()
+
+ e = m.b[t].bb[t].y[1]
+ self.assertIs(type(e), EXPR.GetItemExpression)
+ self.assertEqual(e.nargs(), 2)
+ self.assertEqual(str(e), "b[{T}].bb[{T}].y[1]")
+ t.set_value(2)
+ v = e()
+ self.assertIn(type(v), (int, float))
+ self.assertEqual(v, 1)
+ self.assertIs(resolve_template(e), m.b[2].bb[2].y[1])
def test_template_name(self):
m = self.m
t = IndexTemplate(m.I)
E = m.x[t+m.P[1+t]] + m.P[1]
- self.assertEqual( str(E), "x({I} + P(1 + {I})) + P[1]")
+ self.assertEqual( str(E), "x[{I} + P[1 + {I}]] + P[1]")
E = m.x[t+m.P[1+t]**2.]**2. + m.P[1]
- self.assertEqual( str(E), "x({I} + P(1 + {I})**2.0)**2.0 + P[1]")
-
+ self.assertEqual( str(E), "x[{I} + P[1 + {I}]**2.0]**2.0 + P[1]")
def test_template_in_expression(self):
m = self.m
t = IndexTemplate(m.I)
E = m.x[t+m.P[t+1]] + m.P[1]
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(0)
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E = m.P[1] + m.x[t+m.P[t+1]]
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(1)
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E = m.x[t+m.P[t+1]] + 1
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(0)
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E = 1 + m.x[t+m.P[t+1]]
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(E.nargs()-1)
self.assertIs(type(e), EXPR.GetItemExpression)
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
-
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
def test_clone(self):
m = self.m
@@ -184,21 +250,21 @@ def test_clone(self):
E_base = m.x[t+m.P[t+1]] + m.P[1]
E = E_base.clone()
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(0)
self.assertIs(type(e), EXPR.GetItemExpression)
self.assertIsNot(e, E_base.arg(0))
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertIs(type(e.arg(0).arg(1)),
- type(E_base.arg(0).arg(0).arg(1)))
- self.assertIsNot(e.arg(0).arg(1),
- E_base.arg(0).arg(0).arg(1))
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIs(type(e.arg(1).arg(1)),
+ type(E_base.arg(0).arg(1).arg(1)))
+ self.assertIsNot(e.arg(1).arg(1),
+ E_base.arg(0).arg(1).arg(1))
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E_base = m.P[1] + m.x[t+m.P[t+1]]
E = E_base.clone()
@@ -206,65 +272,282 @@ def test_clone(self):
e = E.arg(1)
self.assertIs(type(e), EXPR.GetItemExpression)
self.assertIsNot(e, E_base.arg(0))
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertIs(type(e.arg(0).arg(1)),
- type(E_base.arg(1).arg(0).arg(1)))
- self.assertIsNot(e.arg(0).arg(1),
- E_base.arg(1).arg(0).arg(1))
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIs(type(e.arg(1).arg(1)),
+ type(E_base.arg(1).arg(1).arg(1)))
+ self.assertIsNot(e.arg(1).arg(1),
+ E_base.arg(1).arg(1).arg(1))
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E_base = m.x[t+m.P[t+1]] + 1
E = E_base.clone()
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(0)
self.assertIs(type(e), EXPR.GetItemExpression)
self.assertIsNot(e, E_base.arg(0))
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertIs(type(e.arg(0).arg(1)),
- type(E_base.arg(0).arg(0).arg(1)))
- self.assertIsNot(e.arg(0).arg(1),
- E_base.arg(0).arg(0).arg(1))
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIs(type(e.arg(1).arg(1)),
+ type(E_base.arg(0).arg(1).arg(1)))
+ self.assertIsNot(e.arg(1).arg(1),
+ E_base.arg(0).arg(1).arg(1))
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
E_base = 1 + m.x[t+m.P[t+1]]
E = E_base.clone()
- self.assertTrue(isinstance(E, EXPR.SumExpressionBase))
+ self.assertIsInstance(E, EXPR.SumExpressionBase)
e = E.arg(-1)
self.assertIs(type(e), EXPR.GetItemExpression)
self.assertIsNot(e, E_base.arg(0))
- self.assertIs(e._base, m.x)
- self.assertEqual(e.nargs(), 1)
- self.assertTrue(isinstance(e.arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(0), t)
- self.assertIs(type(e.arg(0).arg(1)), EXPR.GetItemExpression)
- self.assertIs(type(e.arg(0).arg(1)),
- type(E_base.arg(-1).arg(0).arg(1)))
- self.assertIsNot(e.arg(0).arg(1),
- E_base.arg(-1).arg(0).arg(1))
- self.assertTrue(isinstance(e.arg(0).arg(1).arg(0), EXPR.SumExpressionBase))
- self.assertIs(e.arg(0).arg(1).arg(0).arg(0), t)
-
-
-class TestTemplate_expressionObjects\
- ( ExpressionObjectTester, unittest.TestCase ):
-
- def setUp(self):
- # This class tests the Pyomo 4.x expression trees
- ExpressionObjectTester.setUp(self)
-
- @unittest.expectedFailure
- def test_template_scalar_with_set(self):
- self._test_template_scalar_with_set()
+ self.assertEqual(e.nargs(), 2)
+ self.assertIs(e.arg(0), m.x)
+ self.assertIsInstance(e.arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(0), t)
+ self.assertIs(type(e.arg(1).arg(1)), EXPR.GetItemExpression)
+ self.assertIs(type(e.arg(1).arg(1)),
+ type(E_base.arg(-1).arg(1).arg(1)))
+ self.assertIsNot(e.arg(1).arg(1),
+ E_base.arg(-1).arg(1).arg(1))
+ self.assertIsInstance(e.arg(1).arg(1).arg(1), EXPR.SumExpressionBase)
+ self.assertIs(e.arg(1).arg(1).arg(1).arg(0), t)
+
+
+class TestTemplatizeRule(unittest.TestCase):
+ def test_simple_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.x = Var(m.I)
+ @m.Constraint(m.I)
+ def c(m, i):
+ return m.x[i] <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 1)
+ self.assertIs(indices[0]._set, m.I)
+ self.assertEqual(str(template), "x[_1] <= 0.0")
+ # Test that the RangeSet iterator was put back
+ self.assertEqual(list(m.I), list(range(1,4)))
+ # Evaluate the template
+ indices[0].set_value(2)
+ self.assertEqual(str(resolve_template(template)), 'x[2] <= 0.0')
+
+ def test_simple_abstract_rule(self):
+ m = AbstractModel()
+ m.I = RangeSet(3)
+ m.x = Var(m.I)
+ @m.Constraint(m.I)
+ def c(m, i):
+ return m.x[i] <= 0
+
+ # Note: the constraint can be abstract, but the Set/Var must
+ # have been constructed (otherwise accessing the Set raises an
+ # exception)
+
+ with self.assertRaisesRegex(
+ ValueError, ".*has not been constructed"):
+ template, indices = templatize_constraint(m.c)
+
+ m.I.construct()
+ m.x.construct()
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 1)
+ self.assertIs(indices[0]._set, m.I)
+ self.assertEqual(str(template), "x[_1] <= 0.0")
+
+ def test_simple_sum_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.J = RangeSet(3)
+ m.x = Var(m.I,m.J)
+ @m.Constraint(m.I)
+ def c(m, i):
+ return sum(m.x[i,j] for j in m.J) <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 1)
+ self.assertIs(indices[0]._set, m.I)
+ self.assertEqual(
+ template.to_string(verbose=True),
+ "templatesum(getitem(x, _1, _2), iter(_2, J)) <= 0.0"
+ )
+ self.assertEqual(
+ str(template),
+ "SUM(x[_1,_2] for _2 in J) <= 0.0"
+ )
+ # Evaluate the template
+ indices[0].set_value(2)
+ self.assertEqual(
+ str(resolve_template(template)),
+ 'x[2,1] + x[2,2] + x[2,3] <= 0.0'
+ )
+
+ def test_nested_sum_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.J = RangeSet(3)
+ m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]})
+ m.x = Var(m.I,m.J,[10,20,30])
+ @m.Constraint()
+ def c(m):
+ return sum( sum(m.x[i,j,k] for k in m.K[i])
+ for j in m.J for i in m.I) <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 0)
+ self.assertEqual(
+ template.to_string(verbose=True),
+ "templatesum("
+ "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), "
+ "iter(_1, J), iter(_2, I)) <= 0.0"
+ )
+ self.assertEqual(
+ str(template),
+ "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) "
+ "for _1 in J for _2 in I) <= 0.0"
+ )
+ # Evaluate the template
+ self.assertEqual(
+ str(resolve_template(template)),
+ 'x[1,1,10] + '
+ '(x[2,1,10] + x[2,1,20]) + '
+ '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + '
+ '(x[1,2,10]) + '
+ '(x[2,2,10] + x[2,2,20]) + '
+ '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + '
+ '(x[1,3,10]) + '
+ '(x[2,3,10] + x[2,3,20]) + '
+ '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0'
+ )
+
+ def test_multidim_nested_sum_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.J = RangeSet(3)
+ m.JI = m.J*m.I
+ m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]})
+ m.x = Var(m.I,m.J,[10,20,30])
+ @m.Constraint()
+ def c(m):
+ return sum( sum(m.x[i,j,k] for k in m.K[i])
+ for j,i in m.JI) <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 0)
+ self.assertEqual(
+ template.to_string(verbose=True),
+ "templatesum("
+ "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), "
+ "iter(_1, _2, JI)) <= 0.0"
+ )
+ self.assertEqual(
+ str(template),
+ "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) "
+ "for _1, _2 in JI) <= 0.0"
+ )
+ # Evaluate the template
+ self.assertEqual(
+ str(resolve_template(template)),
+ 'x[1,1,10] + '
+ '(x[2,1,10] + x[2,1,20]) + '
+ '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + '
+ '(x[1,2,10]) + '
+ '(x[2,2,10] + x[2,2,20]) + '
+ '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + '
+ '(x[1,3,10]) + '
+ '(x[2,3,10] + x[2,3,20]) + '
+ '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0'
+ )
+
+ def test_multidim_nested_sum_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.J = RangeSet(3)
+ m.JI = m.J*m.I
+ m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]})
+ m.x = Var(m.I,m.J,[10,20,30])
+ @m.Constraint()
+ def c(m):
+ return sum( sum(m.x[i,j,k] for k in m.K[i])
+ for j,i in m.JI) <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 0)
+ self.assertEqual(
+ template.to_string(verbose=True),
+ "templatesum("
+ "templatesum(getitem(x, _2, _1, _3), iter(_3, getitem(K, _2))), "
+ "iter(_1, _2, JI)) <= 0.0"
+ )
+ self.assertEqual(
+ str(template),
+ "SUM(SUM(x[_2,_1,_3] for _3 in K[_2]) "
+ "for _1, _2 in JI) <= 0.0"
+ )
+ # Evaluate the template
+ self.assertEqual(
+ str(resolve_template(template)),
+ 'x[1,1,10] + '
+ '(x[2,1,10] + x[2,1,20]) + '
+ '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + '
+ '(x[1,2,10]) + '
+ '(x[2,2,10] + x[2,2,20]) + '
+ '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + '
+ '(x[1,3,10]) + '
+ '(x[2,3,10] + x[2,3,20]) + '
+ '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0'
+ )
+
+ def test_multidim_nested_getattr_sum_rule(self):
+ m = ConcreteModel()
+ m.I = RangeSet(3)
+ m.J = RangeSet(3)
+ m.JI = m.J*m.I
+ m.K = Set(m.I, initialize={1:[10], 2:[10,20], 3:[10,20,30]})
+ m.x = Var(m.I,m.J,[10,20,30])
+ @m.Block(m.I)
+ def b(b, i):
+ b.K = RangeSet(10, 10*i, 10)
+ @m.Constraint()
+ def c(m):
+ return sum( sum(m.x[i,j,k] for k in m.b[i].K)
+ for j,i in m.JI) <= 0
+
+ template, indices = templatize_constraint(m.c)
+ self.assertEqual(len(indices), 0)
+ self.assertEqual(
+ template.to_string(verbose=True),
+ "templatesum("
+ "templatesum(getitem(x, _2, _1, _3), "
+ "iter(_3, getattr(getitem(b, _2), 'K'))), "
+ "iter(_1, _2, JI)) <= 0.0"
+ )
+ self.assertEqual(
+ str(template),
+ "SUM(SUM(x[_2,_1,_3] for _3 in b[_2].K) "
+ "for _1, _2 in JI) <= 0.0"
+ )
+ # Evaluate the template
+ self.assertEqual(
+ str(resolve_template(template)),
+ 'x[1,1,10] + '
+ '(x[2,1,10] + x[2,1,20]) + '
+ '(x[3,1,10] + x[3,1,20] + x[3,1,30]) + '
+ '(x[1,2,10]) + '
+ '(x[2,2,10] + x[2,2,20]) + '
+ '(x[3,2,10] + x[3,2,20] + x[3,2,30]) + '
+ '(x[1,3,10]) + '
+ '(x[2,3,10] + x[2,3,20]) + '
+ '(x[3,3,10] + x[3,3,20] + x[3,3,30]) <= 0.0'
+ )
class TestTemplateSubstitution(unittest.TestCase):
@@ -296,22 +579,22 @@ def diffeq(m,t, i):
self.assertEqual( len(_map), 3 )
idx1 = _GetItemIndexer( m.x[t,1] )
- self.assertIs( idx1._base, m.x )
self.assertEqual( idx1.nargs(), 2 )
+ self.assertIs( idx1.base, m.x )
self.assertIs( idx1.arg(0), t )
self.assertEqual( idx1.arg(1), 1 )
self.assertIn( idx1, _map )
idx2 = _GetItemIndexer( m.dxdt[t,2] )
- self.assertIs( idx2._base, m.dxdt )
self.assertEqual( idx2.nargs(), 2 )
+ self.assertIs( idx2.base, m.dxdt )
self.assertIs( idx2.arg(0), t )
self.assertEqual( idx2.arg(1), 2 )
self.assertIn( idx2, _map )
idx3 = _GetItemIndexer( m.x[t,3] )
- self.assertIs( idx3._base, m.x )
self.assertEqual( idx3.nargs(), 2 )
+ self.assertIs( idx3.base, m.x )
self.assertIs( idx3.arg(0), t )
self.assertEqual( idx3.arg(1), 3 )
self.assertIn( idx3, _map )
diff --git a/pyomo/core/tests/unit/test_units.py b/pyomo/core/tests/unit/test_units.py
index 8d522750986..8a2e647b9f1 100644
--- a/pyomo/core/tests/unit/test_units.py
+++ b/pyomo/core/tests/unit/test_units.py
@@ -13,22 +13,19 @@
import pyutilib.th as unittest
from pyomo.environ import *
+from pyomo.util.check_units import assert_units_consistent, assert_units_equivalent
from pyomo.core.base.template_expr import IndexTemplate
from pyomo.core.expr import inequality
-import pyomo.core.expr.current as expr
-from pyomo.core.base.units_container import InconsistentUnitsError, UnitsError
+import pyomo.core.expr.current as EXPR
+from pyomo.core.base.units_container import (
+ pint_available, InconsistentUnitsError, UnitsError,
+)
from six import StringIO
-try:
- import pint
- pint_available = True
-except ImportError:
- pint_available = False
-
def python_callback_function(arg1, arg2):
return 42.0
-@unittest.skipIf(pint_available is False, 'Testing units requires pint')
+@unittest.skipIf(not pint_available, 'Testing units requires pint')
class TestPyomoUnit(unittest.TestCase):
def test_PyomoUnit_NumericValueMethods(self):
@@ -62,25 +59,25 @@ def test_PyomoUnit_NumericValueMethods(self):
with self.assertRaises(TypeError):
x = int(kg)
- self.assertTrue(uc.check_units_consistency(kg < m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg > m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg <= m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg >= m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg == m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg + m.kg, uc))
- self.assertTrue(uc.check_units_consistency(kg - m.kg, uc))
+ assert_units_consistent(kg < m.kg)
+ assert_units_consistent(kg > m.kg)
+ assert_units_consistent(kg <= m.kg)
+ assert_units_consistent(kg >= m.kg)
+ assert_units_consistent(kg == m.kg)
+ assert_units_consistent(kg + m.kg)
+ assert_units_consistent(kg - m.kg)
with self.assertRaises(InconsistentUnitsError):
- uc.check_units_consistency(kg + 3)
+ assert_units_consistent(kg + 3)
with self.assertRaises(InconsistentUnitsError):
- uc.check_units_consistency(kg - 3)
+ assert_units_consistent(kg - 3)
with self.assertRaises(InconsistentUnitsError):
- uc.check_units_consistency(3 + kg)
+ assert_units_consistent(3 + kg)
with self.assertRaises(InconsistentUnitsError):
- uc.check_units_consistency(3 - kg)
+ assert_units_consistent(3 - kg)
# should not assert
# check __mul__
@@ -96,9 +93,8 @@ def test_PyomoUnit_NumericValueMethods(self):
# check rpow
x = 2 ** kg # creation is allowed, only fails when units are "checked"
- self.assertFalse(uc.check_units_consistency(x, allow_exceptions=False))
with self.assertRaises(UnitsError):
- uc.check_units_consistency(x)
+ assert_units_consistent(x)
x = kg
x += kg
@@ -148,7 +144,7 @@ def _get_check_units_ok(self, x, pyomo_units_container, str_check=None, expected
if expected_type is not None:
self.assertEqual(expected_type, type(x))
- self.assertTrue(pyomo_units_container.check_units_consistency(x))
+ assert_units_consistent(x)
if str_check is not None:
self.assertEqual(str_check, str(pyomo_units_container.get_units(x)))
else:
@@ -159,13 +155,8 @@ def _get_check_units_fail(self, x, pyomo_units_container, expected_type=None, ex
if expected_type is not None:
self.assertEqual(expected_type, type(x))
- self.assertFalse(pyomo_units_container.check_units_consistency(x, allow_exceptions=False))
- with self.assertRaises(expected_error):
- pyomo_units_container.check_units_consistency(x, allow_exceptions=True)
-
with self.assertRaises(expected_error):
- # allow_exceptions=True should also be the default
- pyomo_units_container.check_units_consistency(x)
+ assert_units_consistent(x)
# we also expect get_units to fail
with self.assertRaises(expected_error):
@@ -186,199 +177,219 @@ def test_get_check_units_on_all_expressions(self):
model.y = Var()
model.z = Var()
model.p = Param(initialize=42.0, mutable=True)
+ model.xkg = Var(units=kg)
+ model.ym = Var(units=m)
# test equality
- self._get_check_units_ok(3.0*kg == 1.0*kg, uc, 'kg', expr.EqualityExpression)
- self._get_check_units_fail(3.0*kg == 2.0*m, uc, expr.EqualityExpression)
+ self._get_check_units_ok(3.0*kg == 1.0*kg, uc, 'kg', EXPR.EqualityExpression)
+ self._get_check_units_fail(3.0*kg == 2.0*m, uc, EXPR.EqualityExpression)
# test inequality
- self._get_check_units_ok(3.0*kg <= 1.0*kg, uc, 'kg', expr.InequalityExpression)
- self._get_check_units_fail(3.0*kg <= 2.0*m, uc, expr.InequalityExpression)
- self._get_check_units_ok(3.0*kg >= 1.0*kg, uc, 'kg', expr.InequalityExpression)
- self._get_check_units_fail(3.0*kg >= 2.0*m, uc, expr.InequalityExpression)
+ self._get_check_units_ok(3.0*kg <= 1.0*kg, uc, 'kg', EXPR.InequalityExpression)
+ self._get_check_units_fail(3.0*kg <= 2.0*m, uc, EXPR.InequalityExpression)
+ self._get_check_units_ok(3.0*kg >= 1.0*kg, uc, 'kg', EXPR.InequalityExpression)
+ self._get_check_units_fail(3.0*kg >= 2.0*m, uc, EXPR.InequalityExpression)
# test RangedExpression
- self._get_check_units_ok(inequality(3.0*kg, 4.0*kg, 5.0*kg), uc, 'kg', expr.RangedExpression)
- self._get_check_units_fail(inequality(3.0*m, 4.0*kg, 5.0*kg), uc, expr.RangedExpression)
- self._get_check_units_fail(inequality(3.0*kg, 4.0*m, 5.0*kg), uc, expr.RangedExpression)
- self._get_check_units_fail(inequality(3.0*kg, 4.0*kg, 5.0*m), uc, expr.RangedExpression)
+ self._get_check_units_ok(inequality(3.0*kg, 4.0*kg, 5.0*kg), uc, 'kg', EXPR.RangedExpression)
+ self._get_check_units_fail(inequality(3.0*m, 4.0*kg, 5.0*kg), uc, EXPR.RangedExpression)
+ self._get_check_units_fail(inequality(3.0*kg, 4.0*m, 5.0*kg), uc, EXPR.RangedExpression)
+ self._get_check_units_fail(inequality(3.0*kg, 4.0*kg, 5.0*m), uc, EXPR.RangedExpression)
# test SumExpression, NPV_SumExpression
- self._get_check_units_ok(3.0*model.x*kg + 1.0*model.y*kg + 3.65*model.z*kg, uc, 'kg', expr.SumExpression)
- self._get_check_units_fail(3.0*model.x*kg + 1.0*model.y*m + 3.65*model.z*kg, uc, expr.SumExpression)
+ self._get_check_units_ok(3.0*model.x*kg + 1.0*model.y*kg + 3.65*model.z*kg, uc, 'kg', EXPR.SumExpression)
+ self._get_check_units_fail(3.0*model.x*kg + 1.0*model.y*m + 3.65*model.z*kg, uc, EXPR.SumExpression)
- self._get_check_units_ok(3.0*kg + 1.0*kg + 2.0*kg, uc, 'kg', expr.NPV_SumExpression)
- self._get_check_units_fail(3.0*kg + 1.0*kg + 2.0*m, uc, expr.NPV_SumExpression)
+ self._get_check_units_ok(3.0*kg + 1.0*kg + 2.0*kg, uc, 'kg', EXPR.NPV_SumExpression)
+ self._get_check_units_fail(3.0*kg + 1.0*kg + 2.0*m, uc, EXPR.NPV_SumExpression)
# test ProductExpression, NPV_ProductExpression
- self._get_check_units_ok(model.x*kg * model.y*m, uc, 'kg * m', expr.ProductExpression)
- self._get_check_units_ok(3.0*kg * 1.0*m, uc, 'kg * m', expr.NPV_ProductExpression)
- self._get_check_units_ok(3.0*kg*m, uc, 'kg * m', expr.NPV_ProductExpression)
+ self._get_check_units_ok(model.x*kg * model.y*m, uc, 'kg * m', EXPR.ProductExpression)
+ self._get_check_units_ok(3.0*kg * 1.0*m, uc, 'kg * m', EXPR.NPV_ProductExpression)
+ self._get_check_units_ok(3.0*kg*m, uc, 'kg * m', EXPR.NPV_ProductExpression)
# I don't think that there are combinations that can "fail" for products
# test MonomialTermExpression
- self._get_check_units_ok(model.x*kg, uc, 'kg', expr.MonomialTermExpression)
+ self._get_check_units_ok(model.x*kg, uc, 'kg', EXPR.MonomialTermExpression)
# test DivisionExpression, NPV_DivisionExpression
- self._get_check_units_ok(1.0/(model.x*kg), uc, '1 / kg', expr.DivisionExpression)
- self._get_check_units_ok(2.0/kg, uc, '1 / kg', expr.NPV_DivisionExpression)
- self._get_check_units_ok((model.x*kg)/1.0, uc, 'kg', expr.MonomialTermExpression)
- self._get_check_units_ok(kg/2.0, uc, 'kg', expr.NPV_DivisionExpression)
- self._get_check_units_ok(model.y*m/(model.x*kg), uc, 'm / kg', expr.DivisionExpression)
- self._get_check_units_ok(m/kg, uc, 'm / kg', expr.NPV_DivisionExpression)
+ self._get_check_units_ok(1.0/(model.x*kg), uc, '1 / kg', EXPR.DivisionExpression)
+ self._get_check_units_ok(2.0/kg, uc, '1 / kg', EXPR.NPV_DivisionExpression)
+ self._get_check_units_ok((model.x*kg)/1.0, uc, 'kg', EXPR.MonomialTermExpression)
+ self._get_check_units_ok(kg/2.0, uc, 'kg', EXPR.NPV_DivisionExpression)
+ self._get_check_units_ok(model.y*m/(model.x*kg), uc, 'm / kg', EXPR.DivisionExpression)
+ self._get_check_units_ok(m/kg, uc, 'm / kg', EXPR.NPV_DivisionExpression)
# I don't think that there are combinations that can "fail" for products
# test PowExpression, NPV_PowExpression
# ToDo: fix the str representation to combine the powers or the expression system
- self._get_check_units_ok((model.x*kg**2)**3, uc, 'kg ** 6', expr.PowExpression) # would want this to be kg**6
- self._get_check_units_fail(kg**model.x, uc, expr.PowExpression, UnitsError)
- self._get_check_units_fail(model.x**kg, uc, expr.PowExpression, UnitsError)
- self._get_check_units_ok(kg**2, uc, 'kg ** 2', expr.NPV_PowExpression)
- self._get_check_units_fail(3.0**kg, uc, expr.NPV_PowExpression, UnitsError)
+ self._get_check_units_ok((model.x*kg**2)**3, uc, 'kg ** 6', EXPR.PowExpression) # would want this to be kg**6
+ self._get_check_units_fail(kg**model.x, uc, EXPR.PowExpression, UnitsError)
+ self._get_check_units_fail(model.x**kg, uc, EXPR.PowExpression, UnitsError)
+ self._get_check_units_ok(kg**2, uc, 'kg ** 2', EXPR.NPV_PowExpression)
+ self._get_check_units_fail(3.0**kg, uc, EXPR.NPV_PowExpression, UnitsError)
# test NegationExpression, NPV_NegationExpression
- self._get_check_units_ok(-(kg*model.x*model.y), uc, 'kg', expr.NegationExpression)
- self._get_check_units_ok(-kg, uc, 'kg', expr.NPV_NegationExpression)
+ self._get_check_units_ok(-(kg*model.x*model.y), uc, 'kg', EXPR.NegationExpression)
+ self._get_check_units_ok(-kg, uc, 'kg', EXPR.NPV_NegationExpression)
# don't think there are combinations that fan "fail" for negation
# test AbsExpression, NPV_AbsExpression
- self._get_check_units_ok(abs(kg*model.x), uc, 'kg', expr.AbsExpression)
- self._get_check_units_ok(abs(kg), uc, 'kg', expr.NPV_AbsExpression)
+ self._get_check_units_ok(abs(kg*model.x), uc, 'kg', EXPR.AbsExpression)
+ self._get_check_units_ok(abs(kg), uc, 'kg', EXPR.NPV_AbsExpression)
# don't think there are combinations that fan "fail" for abs
# test the different UnaryFunctionExpression / NPV_UnaryFunctionExpression types
# log
- self._get_check_units_ok(log(3.0*model.x), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(log(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(log(3.0*model.p), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(log(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(log(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(log(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(log(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(log(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# log10
- self._get_check_units_ok(log10(3.0*model.x), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(log10(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(log10(3.0*model.p), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(log10(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(log10(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(log10(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(log10(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(log10(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# sin
- self._get_check_units_ok(sin(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(sin(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(sin(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(sin(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(sin(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(sin(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(sin(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(sin(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(sin(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(sin(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# cos
- self._get_check_units_ok(cos(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(cos(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(cos(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(cos(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(cos(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(cos(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(cos(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(cos(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(cos(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(cos(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# tan
- self._get_check_units_ok(tan(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(tan(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(tan(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(tan(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(tan(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(tan(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(tan(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(tan(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(tan(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(tan(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# sin
- self._get_check_units_ok(sinh(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(sinh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(sinh(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(sinh(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(sinh(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(sinh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(sinh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(sinh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(sinh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(sinh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# cos
- self._get_check_units_ok(cosh(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(cosh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(cosh(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(cosh(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(cosh(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(cosh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(cosh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(cosh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(cosh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(cosh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# tan
- self._get_check_units_ok(tanh(3.0*model.x*uc.radians), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(tanh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_fail(tanh(3.0*kg*model.x*uc.kg), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(tanh(3.0*model.p*uc.radians), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(tanh(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(tanh(3.0*model.x*uc.radians), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(tanh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_fail(tanh(3.0*kg*model.x*uc.kg), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(tanh(3.0*model.p*uc.radians), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(tanh(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# asin
- self._get_check_units_ok(asin(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(asin(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(asin(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(asin(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(asin(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(asin(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(asin(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(asin(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# acos
- self._get_check_units_ok(acos(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(acos(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(acos(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(acos(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(acos(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(acos(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(acos(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(acos(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# atan
- self._get_check_units_ok(atan(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(atan(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(atan(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(atan(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(atan(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(atan(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(atan(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(atan(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# exp
- self._get_check_units_ok(exp(3.0*model.x), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_fail(exp(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(exp(3.0*model.p), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(exp(3.0*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(exp(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(exp(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(exp(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(exp(3.0*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# sqrt
- self._get_check_units_ok(sqrt(3.0*model.x), uc, None, expr.UnaryFunctionExpression)
- self._get_check_units_ok(sqrt(3.0*model.x*kg**2), uc, 'kg', expr.UnaryFunctionExpression)
- self._get_check_units_ok(sqrt(3.0*model.x*kg), uc, 'kg ** 0.5', expr.UnaryFunctionExpression)
- self._get_check_units_ok(sqrt(3.0*model.p), uc, None, expr.NPV_UnaryFunctionExpression)
- self._get_check_units_ok(sqrt(3.0*model.p*kg**2), uc, 'kg', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_ok(sqrt(3.0*model.p*kg), uc, 'kg ** 0.5', expr.NPV_UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.x), uc, None, EXPR.UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.x*kg**2), uc, 'kg', EXPR.UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.x*kg), uc, 'kg ** 0.5', EXPR.UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.p), uc, None, EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.p*kg**2), uc, 'kg', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_ok(sqrt(3.0*model.p*kg), uc, 'kg ** 0.5', EXPR.NPV_UnaryFunctionExpression)
# asinh
- self._get_check_units_ok(asinh(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(asinh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(asinh(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(asinh(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(asinh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(asinh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(asinh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(asinh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# acosh
- self._get_check_units_ok(acosh(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(acosh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(acosh(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(acosh(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(acosh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(acosh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(acosh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(acosh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# atanh
- self._get_check_units_ok(atanh(3.0*model.x), uc, 'rad', expr.UnaryFunctionExpression)
- self._get_check_units_fail(atanh(3.0*kg*model.x), uc, expr.UnaryFunctionExpression, UnitsError)
- self._get_check_units_ok(atanh(3.0*model.p), uc, 'rad', expr.NPV_UnaryFunctionExpression)
- self._get_check_units_fail(atanh(3.0*model.p*kg), uc, expr.NPV_UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(atanh(3.0*model.x), uc, 'rad', EXPR.UnaryFunctionExpression)
+ self._get_check_units_fail(atanh(3.0*kg*model.x), uc, EXPR.UnaryFunctionExpression, UnitsError)
+ self._get_check_units_ok(atanh(3.0*model.p), uc, 'rad', EXPR.NPV_UnaryFunctionExpression)
+ self._get_check_units_fail(atanh(3.0*model.p*kg), uc, EXPR.NPV_UnaryFunctionExpression, UnitsError)
# ceil
- self._get_check_units_ok(ceil(kg*model.x), uc, 'kg', expr.UnaryFunctionExpression)
- self._get_check_units_ok(ceil(kg), uc, 'kg', expr.NPV_UnaryFunctionExpression)
+ self._get_check_units_ok(ceil(kg*model.x), uc, 'kg', EXPR.UnaryFunctionExpression)
+ self._get_check_units_ok(ceil(kg), uc, 'kg', EXPR.NPV_UnaryFunctionExpression)
# don't think there are combinations that fan "fail" for ceil
# floor
- self._get_check_units_ok(floor(kg*model.x), uc, 'kg', expr.UnaryFunctionExpression)
- self._get_check_units_ok(floor(kg), uc, 'kg', expr.NPV_UnaryFunctionExpression)
+ self._get_check_units_ok(floor(kg*model.x), uc, 'kg', EXPR.UnaryFunctionExpression)
+ self._get_check_units_ok(floor(kg), uc, 'kg', EXPR.NPV_UnaryFunctionExpression)
# don't think there are combinations that fan "fail" for floor
# test Expr_ifExpression
# consistent if, consistent then/else
- self._get_check_units_ok(expr.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x*kg, ELSE=model.y*kg),
- uc, 'kg', expr.Expr_ifExpression)
+ self._get_check_units_ok(EXPR.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x*kg, ELSE=model.y*kg),
+ uc, 'kg', EXPR.Expr_ifExpression)
# unitless if, consistent then/else
- self._get_check_units_ok(expr.Expr_if(IF=model.x >= 2.0, THEN=model.x*kg, ELSE=model.y*kg),
- uc, 'kg', expr.Expr_ifExpression)
+ self._get_check_units_ok(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x*kg, ELSE=model.y*kg),
+ uc, 'kg', EXPR.Expr_ifExpression)
# consistent if, unitless then/else
- self._get_check_units_ok(expr.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x, ELSE=model.x),
- uc, None, expr.Expr_ifExpression)
+ self._get_check_units_ok(EXPR.Expr_if(IF=model.x*kg + kg >= 2.0*kg, THEN=model.x, ELSE=model.x),
+ uc, None, EXPR.Expr_ifExpression)
# inconsistent then/else
- self._get_check_units_fail(expr.Expr_if(IF=model.x >= 2.0, THEN=model.x*m, ELSE=model.y*kg),
- uc, expr.Expr_ifExpression)
+ self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.x*m, ELSE=model.y*kg),
+ uc, EXPR.Expr_ifExpression)
# inconsistent then/else NPV
- self._get_check_units_fail(expr.Expr_if(IF=model.x >= 2.0, THEN=model.p*m, ELSE=model.p*kg),
- uc, expr.Expr_ifExpression)
+ self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=model.p*m, ELSE=model.p*kg),
+ uc, EXPR.Expr_ifExpression)
# inconsistent then/else NPV units only
- self._get_check_units_fail(expr.Expr_if(IF=model.x >= 2.0, THEN=m, ELSE=kg),
- uc, expr.Expr_ifExpression)
+ self._get_check_units_fail(EXPR.Expr_if(IF=model.x >= 2.0, THEN=m, ELSE=kg),
+ uc, EXPR.Expr_ifExpression)
- # test IndexTemplate and GetItemExpression
+ # test EXPR.IndexTemplate and GetItemExpression
model.S = Set()
- i = IndexTemplate(model.S)
- j = IndexTemplate(model.S)
- self._get_check_units_ok(i, uc, None, IndexTemplate)
+ i = EXPR.IndexTemplate(model.S)
+ j = EXPR.IndexTemplate(model.S)
+ self._get_check_units_ok(i, uc, None, EXPR.IndexTemplate)
model.mat = Var(model.S, model.S)
- self._get_check_units_ok(model.mat[i,j+1], uc, None, expr.GetItemExpression)
+ self._get_check_units_ok(model.mat[i,j+1], uc, None, EXPR.GetItemExpression)
# test ExternalFunctionExpression, NPV_ExternalFunctionExpression
model.ef = ExternalFunction(python_callback_function)
- self._get_check_units_ok(model.ef(model.x, model.y), uc, None, expr.ExternalFunctionExpression)
- self._get_check_units_ok(model.ef(1.0, 2.0), uc, None, expr.NPV_ExternalFunctionExpression)
- self._get_check_units_fail(model.ef(model.x*kg, model.y), uc, expr.ExternalFunctionExpression, UnitsError)
- self._get_check_units_fail(model.ef(2.0*kg, 1.0), uc, expr.NPV_ExternalFunctionExpression, UnitsError)
+ self._get_check_units_ok(model.ef(model.x, model.y), uc, None, EXPR.ExternalFunctionExpression)
+ self._get_check_units_ok(model.ef(1.0, 2.0), uc, None, EXPR.NPV_ExternalFunctionExpression)
+ self._get_check_units_fail(model.ef(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError)
+ self._get_check_units_fail(model.ef(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError)
+
+ # test ExternalFunctionExpression, NPV_ExternalFunctionExpression
+ model.ef2 = ExternalFunction(python_callback_function, units=uc.kg)
+ self._get_check_units_ok(model.ef2(model.x, model.y), uc, 'kg', EXPR.ExternalFunctionExpression)
+ self._get_check_units_ok(model.ef2(1.0, 2.0), uc, 'kg', EXPR.NPV_ExternalFunctionExpression)
+ self._get_check_units_fail(model.ef2(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError)
+ self._get_check_units_fail(model.ef2(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError)
+
+ # test ExternalFunctionExpression, NPV_ExternalFunctionExpression
+ model.ef3 = ExternalFunction(python_callback_function, units=uc.kg, arg_units=[uc.kg, uc.m])
+ self._get_check_units_fail(model.ef3(model.x, model.y), uc, EXPR.ExternalFunctionExpression)
+ self._get_check_units_fail(model.ef3(1.0, 2.0), uc, EXPR.NPV_ExternalFunctionExpression)
+ self._get_check_units_fail(model.ef3(model.x*kg, model.y), uc, EXPR.ExternalFunctionExpression, UnitsError)
+ self._get_check_units_fail(model.ef3(2.0*kg, 1.0), uc, EXPR.NPV_ExternalFunctionExpression, UnitsError)
+ self._get_check_units_ok(model.ef3(2.0*kg, 1.0*uc.m), uc, 'kg', EXPR.NPV_ExternalFunctionExpression)
+ self._get_check_units_ok(model.ef3(model.x*kg, model.y*m), uc, 'kg', EXPR.ExternalFunctionExpression)
+ self._get_check_units_ok(model.ef3(model.xkg, model.ym), uc, 'kg', EXPR.ExternalFunctionExpression)
+ self._get_check_units_fail(model.ef3(model.ym, model.xkg), uc, EXPR.ExternalFunctionExpression, InconsistentUnitsError)
# @unittest.skip('Skipped testing LinearExpression since StreamBasedExpressionVisitor does not handle LinearExpressions')
def test_linear_expression(self):
@@ -390,19 +401,27 @@ def test_linear_expression(self):
# test LinearExpression
# ToDo: Once this test is working correctly, this code should be moved to the test above
model.vv = Var(['A', 'B', 'C'])
- self._get_check_units_ok(sum_product(model.vv), uc, None, expr.LinearExpression)
+ self._get_check_units_ok(sum_product(model.vv), uc, None, EXPR.LinearExpression)
linex1 = sum_product(model.vv, {'A': kg, 'B': kg, 'C':kg}, index=['A', 'B', 'C'])
- self._get_check_units_ok(linex1, uc, 'kg', expr.LinearExpression)
+ self._get_check_units_ok(linex1, uc, 'kg', EXPR.LinearExpression)
linex2 = sum_product(model.vv, {'A': kg, 'B': m, 'C':kg}, index=['A', 'B', 'C'])
- self._get_check_units_fail(linex2, uc, expr.LinearExpression)
+ self._get_check_units_fail(linex2, uc, EXPR.LinearExpression)
+
+ def test_named_expression(self):
+ uc = units
+ m = ConcreteModel()
+ m.x = Var(units=uc.kg)
+ m.y = Var(units=uc.m)
+ m.e = Expression(expr=m.x/m.y)
+ self.assertEqual(str(uc.get_units(m.e)), 'kg / m')
def test_dimensionless(self):
uc = units
kg = uc.kg
dless = uc.dimensionless
- self._get_check_units_ok(2.0 == 2.0*dless, uc, None, expr.EqualityExpression)
+ self._get_check_units_ok(2.0 == 2.0*dless, uc, None, EXPR.EqualityExpression)
self.assertEqual(uc.get_units(2.0*dless), uc.get_units(2.0))
self.assertEqual(None, uc.get_units(2.0*dless))
self.assertEqual(None, uc.get_units(kg/kg))
@@ -429,27 +448,103 @@ def test_temperatures(self):
R_str = R.getname()
#self.assertIn(R_str, ['rankine', '°R'])
- self._get_check_units_ok(2.0*R + 3.0*R, uc, R_str, expr.NPV_SumExpression)
- self._get_check_units_ok(2.0*K + 3.0*K, uc, 'K', expr.NPV_SumExpression)
+ self._get_check_units_ok(2.0*R + 3.0*R, uc, R_str, EXPR.NPV_SumExpression)
+ self._get_check_units_ok(2.0*K + 3.0*K, uc, 'K', EXPR.NPV_SumExpression)
ex = 2.0*delta_degC + 3.0*delta_degC + 1.0*delta_degC
- self.assertEqual(type(ex), expr.NPV_SumExpression)
- self.assertTrue(uc.check_units_consistency(ex))
+ self.assertEqual(type(ex), EXPR.NPV_SumExpression)
+ assert_units_consistent(ex)
ex = 2.0*delta_degF + 3.0*delta_degF
- self.assertEqual(type(ex), expr.NPV_SumExpression)
- self.assertTrue(uc.check_units_consistency(ex))
+ self.assertEqual(type(ex), EXPR.NPV_SumExpression)
+ assert_units_consistent(ex)
- self._get_check_units_fail(2.0*K + 3.0*R, uc, expr.NPV_SumExpression)
- self._get_check_units_fail(2.0*delta_degC + 3.0*delta_degF, uc, expr.NPV_SumExpression)
+ self._get_check_units_fail(2.0*K + 3.0*R, uc, EXPR.NPV_SumExpression)
+ self._get_check_units_fail(2.0*delta_degC + 3.0*delta_degF, uc, EXPR.NPV_SumExpression)
+
+ self.assertAlmostEqual(uc.convert_temp_K_to_C(323.15), 50.0, places=5)
+ self.assertAlmostEqual(uc.convert_temp_C_to_K(50.0), 323.15, places=5)
+ self.assertAlmostEqual(uc.convert_temp_R_to_F(509.67), 50.0, places=5)
+ self.assertAlmostEqual(uc.convert_temp_F_to_R(50.0), 509.67, places=5)
+
+ with self.assertRaises(UnitsError):
+ uc.convert_temp_K_to_C(ex)
def test_module_example(self):
- from pyomo.environ import ConcreteModel, Var, Objective, units # import components and 'units' instance
+ from pyomo.environ import ConcreteModel, Var, Objective, units
model = ConcreteModel()
model.acc = Var()
model.obj = Objective(expr=(model.acc*units.m/units.s**2 - 9.81*units.m/units.s**2)**2)
self.assertEqual('m ** 2 / s ** 4', str(units.get_units(model.obj.expr)))
+ def test_convert_value(self):
+ u = units
+ x = 0.4535923
+ expected_lb_value = 1.0
+ actual_lb_value = u.convert_value(num_value=x, from_units=u.kg, to_units=u.lb)
+ self.assertAlmostEqual(expected_lb_value, actual_lb_value, places=5)
+ actual_lb_value = u.convert_value(num_value=value(x*u.kg), from_units=u.kg, to_units=u.lb)
+ self.assertAlmostEqual(expected_lb_value, actual_lb_value, places=5)
+
+ with self.assertRaises(UnitsError):
+ # cannot convert from meters to pounds
+ actual_lb_value = u.convert_value(num_value=x, from_units=u.meters, to_units=u.lb)
+
+ with self.assertRaises(UnitsError):
+ # num_value must be a native numerical type
+ actual_lb_value = u.convert_value(num_value=x*u.kg, from_units=u.kg, to_units=u.lb)
+
+ def test_convert(self):
+ u = units
+ m = ConcreteModel()
+ m.dx = Var(units=u.m, initialize=0.10188943773836046)
+ m.dy = Var(units=u.m, initialize=0.0)
+ m.vx = Var(units=u.m/u.s, initialize=0.7071067769802851)
+ m.vy = Var(units=u.m/u.s, initialize=0.7071067769802851)
+ m.t = Var(units=u.min, bounds=(1e-5,10.0), initialize=0.0024015570927624456)
+ m.theta = Var(bounds=(0, 0.49*3.14), initialize=0.7853981693583533, units=u.radians)
+ m.a = Param(initialize=-32.2, units=u.ft/u.s**2)
+
+ m.obj = Objective(expr = m.dx, sense=maximize)
+ m.vx_con = Constraint(expr = m.vx == 1.0*u.m/u.s*cos(m.theta))
+ m.vy_con = Constraint(expr = m.vy == 1.0*u.m/u.s*sin(m.theta))
+ m.dx_con = Constraint(expr = m.dx == m.vx*u.convert(m.t, to_units=u.s))
+ m.dy_con = Constraint(expr = m.dy == m.vy*u.convert(m.t, to_units=u.s)
+ + 0.5*(u.convert(m.a, to_units=u.m/u.s**2))*(u.convert(m.t, to_units=u.s))**2)
+ m.ground = Constraint(expr = m.dy == 0)
+
+ with self.assertRaises(UnitsError):
+ u.convert(m.a, to_units=u.kg)
+
+ self.assertAlmostEqual(value(m.obj), 0.10188943773836046, places=5)
+ self.assertAlmostEqual(value(m.vx_con.body), 0.0, places=5)
+ self.assertAlmostEqual(value(m.vy_con.body), 0.0, places=5)
+ self.assertAlmostEqual(value(m.dx_con.body), 0.0, places=5)
+ self.assertAlmostEqual(value(m.dy_con.body), 0.0, places=5)
+ self.assertAlmostEqual(value(m.ground.body), 0.0, places=5)
+
+ def test_convert_dimensionless(self):
+ u = units
+ m = ConcreteModel()
+ m.x = Var()
+ foo = u.convert(m.x, to_units=u.dimensionless)
+ foo = u.convert(m.x, to_units=None)
+ foo = u.convert(m.x, to_units=1.0)
+ with self.assertRaises(InconsistentUnitsError):
+ foo = u.convert(m.x, to_units=u.kg)
+ m.y = Var(units=u.kg)
+ with self.assertRaises(InconsistentUnitsError):
+ foo = u.convert(m.y, to_units=u.dimensionless)
+ with self.assertRaises(InconsistentUnitsError):
+ foo = u.convert(m.y, to_units=None)
+ with self.assertRaises(InconsistentUnitsError):
+ foo = u.convert(m.y, to_units=1.0)
+
+ def test_usd(self):
+ u = units
+ u.load_definitions_from_strings(["USD = [currency]"])
+ expr = 3.0*u.USD
+ self._get_check_units_ok(expr, u, 'USD')
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/core/tests/unit/test_visitor.py b/pyomo/core/tests/unit/test_visitor.py
index 734f1ede225..9c3227f88be 100644
--- a/pyomo/core/tests/unit/test_visitor.py
+++ b/pyomo/core/tests/unit/test_visitor.py
@@ -26,6 +26,7 @@
from pyomo.environ import *
import pyomo.kernel
+from pyomo.common.log import LoggingIntercept
from pyomo.core.expr.numvalue import (
native_types, nonpyomo_leaf_types, NumericConstant, as_numeric,
is_potentially_variable,
@@ -55,7 +56,7 @@
from pyomo.core.base.var import SimpleVar
from pyomo.core.base.param import _ParamData, SimpleParam
from pyomo.core.base.label import *
-from pyomo.core.base.template_expr import IndexTemplate
+from pyomo.core.expr.template_expr import IndexTemplate
from pyomo.core.expr.expr_errors import TemplateExpressionError
@@ -730,7 +731,7 @@ def test_default(self):
self.assertEqual(ans, ref)
def test_beforeChild(self):
- def before(node, child):
+ def before(node, child, child_idx):
if type(child) in nonpyomo_leaf_types \
or not child.is_expression_type():
return False, [child]
@@ -752,10 +753,40 @@ def before(node, child):
ref = []
self.assertEqual(str(ans), str(ref))
+ def test_old_beforeChild(self):
+ def before(node, child):
+ if type(child) in nonpyomo_leaf_types \
+ or not child.is_expression_type():
+ return False, [child]
+ os = six.StringIO()
+ with LoggingIntercept(os, 'pyomo'):
+ walker = StreamBasedExpressionVisitor(beforeChild=before)
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the beforeChild() "
+ "method", os.getvalue().replace('\n',' '))
+
+ ans = walker.walk_expression(self.e)
+ m = self.m
+ ref = [
+ [[m.x], [2]],
+ [m.y],
+ [[m.z], [[m.x], [m.y]]]
+ ]
+ self.assertEqual(str(ans), str(ref))
+
+ ans = walker.walk_expression(m.x)
+ ref = []
+ self.assertEqual(str(ans), str(ref))
+
+ ans = walker.walk_expression(2)
+ ref = []
+ self.assertEqual(str(ans), str(ref))
+
def test_reduce_in_accept(self):
def enter(node):
return None, 1
- def accept(node, data, child_result):
+ def accept(node, data, child_result, child_idx):
return data + child_result
walker = StreamBasedExpressionVisitor(
enterNode=enter, acceptChildResult=accept)
@@ -878,6 +909,24 @@ def exit(node, data):
self.assertEqual(str(ans), str(ref))
def test_beforeChild_acceptChildResult_afterChild(self):
+ counts = [0,0,0]
+ def before(node, child, child_idx):
+ counts[0] += 1
+ if type(child) in nonpyomo_leaf_types \
+ or not child.is_expression_type():
+ return False, None
+ def accept(node, data, child_result, child_idx):
+ counts[1] += 1
+ def after(node, child, child_idx):
+ counts[2] += 1
+ walker = StreamBasedExpressionVisitor(
+ beforeChild=before, acceptChildResult=accept, afterChild=after)
+ ans = walker.walk_expression(self.e)
+ m = self.m
+ self.assertEqual(ans, None)
+ self.assertEquals(counts, [9,9,9])
+
+ def test_OLD_beforeChild_acceptChildResult_afterChild(self):
counts = [0,0,0]
def before(node, child):
counts[0] += 1
@@ -888,8 +937,24 @@ def accept(node, data, child_result):
counts[1] += 1
def after(node, child):
counts[2] += 1
- walker = StreamBasedExpressionVisitor(
- beforeChild=before, acceptChildResult=accept, afterChild=after)
+
+ os = six.StringIO()
+ with LoggingIntercept(os, 'pyomo'):
+ walker = StreamBasedExpressionVisitor(
+ beforeChild=before, acceptChildResult=accept, afterChild=after)
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "beforeChild() method", os.getvalue().replace('\n',' '))
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "acceptChildResult() method", os.getvalue().replace('\n',' '))
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "afterChild() method", os.getvalue().replace('\n',' '))
+
ans = walker.walk_expression(self.e)
m = self.m
self.assertEqual(ans, None)
@@ -897,11 +962,11 @@ def after(node, child):
def test_enterNode_acceptChildResult_beforeChild(self):
ans = []
- def before(node, child):
+ def before(node, child, child_idx):
if type(child) in nonpyomo_leaf_types \
or not child.is_expression_type():
return False, child
- def accept(node, data, child_result):
+ def accept(node, data, child_result, child_idx):
if data is not child_result:
data.append(child_result)
return data
@@ -916,11 +981,11 @@ def enter(node):
def test_finalize(self):
ans = []
- def before(node, child):
+ def before(node, child, child_idx):
if type(child) in nonpyomo_leaf_types \
or not child.is_expression_type():
return False, child
- def accept(node, data, child_result):
+ def accept(node, data, child_result, child_idx):
if data is not child_result:
data.append(child_result)
return data
@@ -945,11 +1010,11 @@ def enter(node):
ans.append("Enter %s" % (name(node)))
def exit(node, data):
ans.append("Exit %s" % (name(node)))
- def before(node, child):
+ def before(node, child, child_idx):
ans.append("Before %s (from %s)" % (name(child), name(node)))
- def accept(node, data, child_result):
+ def accept(node, data, child_result, child_idx):
ans.append("Accept into %s" % (name(node)))
- def after(node, child):
+ def after(node, child, child_idx):
ans.append("After %s (from %s)" % (name(child), name(node)))
def finalize(result):
ans.append("Finalize")
@@ -1007,6 +1072,81 @@ def finalize(result):
Finalize""")
def test_all_derived_class(self):
+ def name(x):
+ if type(x) in nonpyomo_leaf_types:
+ return str(x)
+ else:
+ return x.name
+ class all_callbacks(StreamBasedExpressionVisitor):
+ def __init__(self):
+ self.ans = []
+ super(all_callbacks, self).__init__()
+ def enterNode(self, node):
+ self.ans.append("Enter %s" % (name(node)))
+ def exitNode(self, node, data):
+ self.ans.append("Exit %s" % (name(node)))
+ def beforeChild(self, node, child, child_idx):
+ self.ans.append("Before %s (from %s)"
+ % (name(child), name(node)))
+ def acceptChildResult(self, node, data, child_result, child_idx):
+ self.ans.append("Accept into %s" % (name(node)))
+ def afterChild(self, node, child, child_idx):
+ self.ans.append("After %s (from %s)"
+ % (name(child), name(node)))
+ def finalizeResult(self, result):
+ self.ans.append("Finalize")
+ walker = all_callbacks()
+ self.assertIsNone( walker.walk_expression(self.e) )
+ self.assertEqual("\n".join(walker.ans),"""Enter sum
+Before pow (from sum)
+Enter pow
+Before x (from pow)
+Enter x
+Exit x
+Accept into pow
+After x (from pow)
+Before 2 (from pow)
+Enter 2
+Exit 2
+Accept into pow
+After 2 (from pow)
+Exit pow
+Accept into sum
+After pow (from sum)
+Before y (from sum)
+Enter y
+Exit y
+Accept into sum
+After y (from sum)
+Before prod (from sum)
+Enter prod
+Before z (from prod)
+Enter z
+Exit z
+Accept into prod
+After z (from prod)
+Before sum (from prod)
+Enter sum
+Before x (from sum)
+Enter x
+Exit x
+Accept into sum
+After x (from sum)
+Before y (from sum)
+Enter y
+Exit y
+Accept into sum
+After y (from sum)
+Exit sum
+Accept into prod
+After sum (from prod)
+Exit prod
+Accept into sum
+After prod (from sum)
+Exit sum
+Finalize""")
+
+ def test_all_derived_class_oldAPI(self):
def name(x):
if type(x) in nonpyomo_leaf_types:
return str(x)
@@ -1030,7 +1170,22 @@ def afterChild(self, node, child):
% (name(child), name(node)))
def finalizeResult(self, result):
self.ans.append("Finalize")
- walker = all_callbacks()
+ os = six.StringIO()
+ with LoggingIntercept(os, 'pyomo'):
+ walker = all_callbacks()
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "beforeChild() method", os.getvalue().replace('\n',' '))
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "acceptChildResult() method", os.getvalue().replace('\n',' '))
+ self.assertIn(
+ "Note that the API for the StreamBasedExpressionVisitor "
+ "has changed to include the child index for the "
+ "afterChild() method", os.getvalue().replace('\n',' '))
+
self.assertIsNone( walker.walk_expression(self.e) )
self.assertEqual("\n".join(walker.ans),"""Enter sum
Before pow (from sum)
diff --git a/pyomo/core/tests/unit/test_xfrm_discrete_vars.py b/pyomo/core/tests/unit/test_xfrm_discrete_vars.py
index 466edecc68e..713a1778d10 100644
--- a/pyomo/core/tests/unit/test_xfrm_discrete_vars.py
+++ b/pyomo/core/tests/unit/test_xfrm_discrete_vars.py
@@ -39,7 +39,7 @@ def test_solve_relax_transform(self):
self.assertEqual(len(m.dual), 0)
TransformationFactory('core.relax_discrete').apply_to(m)
- self.assertIs(m.x.domain, NonNegativeReals)
+ self.assertIs(m.x.domain, Reals)
self.assertEqual(m.x.lb, 0)
self.assertEqual(m.x.ub, 1)
s.solve(m)
diff --git a/pyomo/core/tests/unit/varpprint.txt b/pyomo/core/tests/unit/varpprint.txt
index 07f7c6d7e18..bd49b881417 100644
--- a/pyomo/core/tests/unit/varpprint.txt
+++ b/pyomo/core/tests/unit/varpprint.txt
@@ -1,10 +1,13 @@
3 Set Declarations
- a : Dim=0, Dimen=1, Size=3, Domain=None, Ordered=False, Bounds=(1, 3)
- [1, 2, 3]
- cl_index : Dim=0, Dimen=1, Size=10, Domain=None, Ordered=False, Bounds=None
- [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
- o3_index : Dim=0, Dimen=2, Size=9, Domain=None, Ordered=False, Bounds=None
- Virtual
+ a : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 3 : {1, 2, 3}
+ cl_index : Size=1, Index=None, Ordered=Insertion
+ Key : Dimen : Domain : Size : Members
+ None : 1 : Any : 10 : {1, 2, 3, 4, 5, 6, 7, 8, 9, 10}
+ o3_index : Size=1, Index=None, Ordered=True
+ Key : Dimen : Domain : Size : Members
+ None : 2 : a*a : 9 : {(1, 1), (1, 2), (1, 3), (2, 1), (2, 2), (2, 3), (3, 1), (3, 2), (3, 3)}
2 Param Declarations
A : Size=1, Index=None, Domain=Any, Default=-1, Mutable=True
diff --git a/pyomo/dae/contset.py b/pyomo/dae/contset.py
index d17a84a7788..58d8c0a2773 100644
--- a/pyomo/dae/contset.py
+++ b/pyomo/dae/contset.py
@@ -12,7 +12,7 @@
from pyomo.common.timing import ConstructionTimer
from pyomo.core import *
from pyomo.core.base.plugin import ModelComponentFactory
-from pyomo.core.base.sets import OrderedSimpleSet
+from pyomo.core.base.set import SortedSimpleSet
from pyomo.core.base.numvalue import native_numeric_types
logger = logging.getLogger('pyomo.dae')
@@ -22,7 +22,7 @@
@ModelComponentFactory.register(
"A bounded continuous numerical range optionally containing"
" discrete points of interest.")
-class ContinuousSet(OrderedSimpleSet):
+class ContinuousSet(SortedSimpleSet):
""" Represents a bounded continuous domain
Minimally, this set must contain two numeric values defining the
@@ -38,7 +38,7 @@ class ContinuousSet(OrderedSimpleSet):
bounds : `tuple`
The bounding points for the continuous domain. The bounds will
be included as discrete points in the :py:class:`ContinuousSet`
- but will not be used to restrict points added to the
+ and will be used to bound the points added to the
:py:class:`ContinuousSet` through the 'initialize' argument,
a data file, or the add() method
@@ -72,7 +72,8 @@ def __init__(self, *args, **kwds):
# if kwds.pop("within", None) is not None:
# raise TypeError("'within' is not a valid keyword argument for "
# ContinuousSet")
- if kwds.pop("dimen", None) is not None:
+ kwds.setdefault('dimen', 1)
+ if kwds["dimen"] != 1:
raise TypeError("'dimen' is not a valid keyword argument for "
"ContinuousSet")
if kwds.pop("virtual", None) is not None:
@@ -85,14 +86,10 @@ def __init__(self, *args, **kwds):
raise TypeError("A ContinuousSet expects no arguments")
kwds.setdefault('ctype', ContinuousSet)
- kwds.setdefault('ordered', Set.SortedOrder)
- self._type = ContinuousSet
self._changed = False
- self.concrete = True
- self.virtual = False
self._fe = []
self._discretization_info = {}
- OrderedSimpleSet.__init__(self, **kwds)
+ super(ContinuousSet, self).__init__(**kwds)
def get_finite_elements(self):
""" Returns the finite element points
@@ -213,45 +210,37 @@ def construct(self, values=None):
""" Constructs a :py:class:`ContinuousSet` component
"""
+ if self._constructed:
+ return
timer = ConstructionTimer(self)
- OrderedSimpleSet.construct(self, values)
+ super(ContinuousSet, self).construct(values)
- for val in self.value:
+ for val in self:
if type(val) is tuple:
raise ValueError("ContinuousSet cannot contain tuples")
if val.__class__ not in native_numeric_types:
raise ValueError("ContinuousSet can only contain numeric "
"values")
- if self._bounds is None:
- raise ValueError("ContinuousSet '%s' must have at least two values"
- " indicating the range over which a differential "
- "equation is to be discretized" % self.name)
-
- # If bounds were set using pyomo parameters, get their values
- lb = value(self._bounds[0])
- ub = value(self._bounds[1])
- self._bounds = (lb, ub)
-
- if self._bounds[0].__class__ not in native_numeric_types:
- raise ValueError("Bounds on ContinuousSet must be numeric values")
- if self._bounds[1].__class__ not in native_numeric_types:
- raise ValueError("Bounds on ContinuousSet must be numeric values")
-
# TBD: If a user specifies bounds they will be added to the set
# unless the user specified bounds have been overwritten during
# OrderedSimpleSet construction. This can lead to some unintuitive
# behavior when the ContinuousSet is both initialized with values and
# bounds are specified. The current implementation is consistent
# with how 'Set' treats this situation.
- if self._bounds[0] not in self.value:
- self.add(self._bounds[0])
- self._sort()
- if self._bounds[1] not in self.value:
- self.add(self._bounds[1])
- self._sort()
+ for bnd in self.domain.bounds():
+ # Note: the base class constructor ensures that any declared
+ # set members are already within the bounds.
+ if bnd is not None and bnd not in self:
+ self.add(bnd)
+
+ if None in self.bounds():
+ raise ValueError("ContinuousSet '%s' must have at least two values"
+ " indicating the range over which a differential "
+ "equation is to be discretized" % self.name)
if len(self) < 2:
+ # (reachable if lb==ub)
raise ValueError("ContinuousSet '%s' must have at least two values"
" indicating the range over which a differential "
"equation is to be discretized" % self.name)
diff --git a/pyomo/dae/diffvar.py b/pyomo/dae/diffvar.py
index 67df74140e4..091b968bcb4 100644
--- a/pyomo/dae/diffvar.py
+++ b/pyomo/dae/diffvar.py
@@ -9,6 +9,8 @@
# ___________________________________________________________________________
import weakref
+from pyomo.core import ComponentMap
+from pyomo.core.base.set import UnknownSetDimen
from pyomo.core.base.var import Var, _VarData
from pyomo.core.base.plugin import ModelComponentFactory
from pyomo.dae.contset import ContinuousSet
@@ -90,20 +92,29 @@ def __init__(self, sVar, **kwds):
# This dictionary keeps track of where the ContinuousSet appears
# in the index. This implementation assumes that every element
# in an indexing set has the same dimension.
- sVar._contset = {}
+ sVar._contset = ComponentMap()
sVar._derivative = {}
if sVar.dim() == 0:
num_contset = 0
- elif sVar.dim() == 1:
- sidx_sets = sVar._index
- if sidx_sets.type() is ContinuousSet:
- sVar._contset[sidx_sets] = 0
else:
- sidx_sets = sVar.index_set().set_tuple
+ sidx_sets = list(sVar.index_set().subsets())
loc = 0
for i, s in enumerate(sidx_sets):
- if s.type() is ContinuousSet:
+ if s.ctype is ContinuousSet:
sVar._contset[s] = loc
+ _dim = s.dimen
+ if _dim is None:
+ raise DAE_Error(
+ "The variable %s is indexed by a Set (%s) with a "
+ "non-fixed dimension. A DerivativeVar may only be "
+ "indexed by Sets with constant dimension"
+ % (sVar, s.name))
+ elif _dim is UnknownSetDimen:
+ raise DAE_Error(
+ "The variable %s is indexed by a Set (%s) with an "
+ "unknown dimension. A DerivativeVar may only be "
+ "indexed by Sets with known constant dimension"
+ % (sVar, s.name))
loc += s.dimen
num_contset = len(sVar._contset)
diff --git a/pyomo/dae/flatten.py b/pyomo/dae/flatten.py
new file mode 100644
index 00000000000..ea6e392fe40
--- /dev/null
+++ b/pyomo/dae/flatten.py
@@ -0,0 +1,140 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+from pyomo.core.base import Block, Var, Reference
+from pyomo.core.base.block import SubclassOf
+from pyomo.core.base.indexed_component_slice import IndexedComponent_slice
+
+
+def generate_time_only_slices(obj, time):
+ o_sets = obj.index_set().subsets()
+ # Given a potentially complex set, determine the index of the TIME
+ # set, as well as all other "fixed" indices. We will even support a
+ # single Set with dimen==None (using ellipsis in the slice).
+ ellipsis_idx = None
+ time_idx = None
+ regular_idx = []
+ idx = 0
+ for s in o_sets:
+ if s is time:
+ time_idx = idx
+ idx += 1
+ elif s.dimen is not None:
+ for sub_idx in range(s.dimen):
+ regular_idx.append(idx+sub_idx)
+ idx += s.dimen
+ elif ellipsis_idx is None:
+ ellipsis_idx = idx
+ idx += 1
+ else:
+ raise RuntimeError(
+ "We can only handle a single Set with dimen=None")
+ # To support Sets with dimen==None (using ellipsis), we need to have
+ # all fixed/time indices be positive if they appear before the
+ # ellipsis and negative (counting from the end of the list) if they
+ # are after the ellipsis.
+ if ellipsis_idx:
+ if time_idx > ellipsis_idx:
+ time_idx = time_idx - idx
+ regular_idx = [ i - idx if i > ellipsis_idx else i
+ for i in fixed_idx ]
+ # We now form a temporary slice that slices over all the regular
+ # indices for a fixed value of the time index.
+ tmp_sliced = {i: slice(None) for i in regular_idx}
+ tmp_fixed = {time_idx: time.first()}
+ tmp_ellipsis = ellipsis_idx
+ _slice = IndexedComponent_slice(
+ obj, tmp_fixed, tmp_sliced, tmp_ellipsis
+ )
+ # For each combination of regular indices, we can generate a single
+ # slice over the time index
+ time_sliced = [time_idx]
+ for key in _slice.wildcard_keys():
+ if type(key) is not tuple:
+ key = (key,)
+ time_fixed = dict(
+ (i, val) if i tol or
+ value(condata.lower) - value(condata.body) > tol):
+ inconsistent.add(condata)
+
+ for blk in model.component_objects(Block, active=True):
+ # What if there are time-indexed blocks at multiple levels
+ # of a hierarchy?
+ # My preferred convention is to only check the first (highest-
+ # level) time index, but distinguishing between different-level
+ # time indices is an expensive operation.
+ if not is_explicitly_indexed_by(blk, time):
+ continue
+ if is_in_block_indexed_by(blk, time):
+ continue
+ info = get_index_set_except(blk, time)
+ non_time_set = info['set_except']
+ index_getter = info['index_getter']
+ for non_time_index in non_time_set:
+ index = index_getter(non_time_index, t0)
+ blkdata = blk[index]
+ for condata in blkdata.component_data_objects(Constraint,
+ active=True):
+ if (value(condata.body) - value(condata.upper) > tol or
+ value(condata.lower) - value(condata.body) > tol):
+ if condata in inconsistent:
+ raise ValueError(
+ '%s has already been visited. The only way this '
+ 'should happen is if the model has nested time-'
+ 'indexed blocks, which is not supported.')
+ inconsistent.add(condata)
+
+ return list(inconsistent)
+
+
+def solve_consistent_initial_conditions(model, time, solver):
+ """
+ Solves a model with all Constraints and Blocks deactivated except
+ at the initial value of the Set time. Reactivates Constraints and
+ Blocks that got deactivated.
+
+ Args:
+ model: Model that will be solved
+ time: Set whose initial conditions will remain active for solve
+ solver: Something that implements a solve method that accepts
+ a model as an argument
+
+ Returns:
+ The object returned by the solver's solve method
+ """
+ # Need to deactivate discretization equations, wrt time, at t == 0
+ # This is challenging as the only way (to my knowledge) to do this
+ # is to identify_variables in the expression, find the (assume only one?)
+ # DerivativeVar, and access its get_continuousset_list
+ # I would like a get_continuousset_list for discretization equations.
+ # Possibly as a ComponentMap, possibly as an attribute of some new
+ # DiscEquation subclass of Constraint
+ # Until I have this, this function will only work for backward
+ # discretization schemes
+
+ # Also, would like to be able to check for zero degrees of freedom here
+
+ scheme = time.get_discretization_info()['scheme']
+ if scheme != 'LAGRANGE-RADAU' and scheme != 'BACKWARD Difference':
+ raise NotImplementedError(
+ '%s discretization scheme is not supported' % scheme)
+
+ t0 = time.first()
+ timelist = list(time)[1:]
+ deactivated_dict = deactivate_model_at(model, time, timelist)
+
+ result = solver.solve(model)
+
+ for t in timelist:
+ for comp in deactivated_dict[t]:
+ comp.activate()
+
+ return result
+
diff --git a/pyomo/dae/integral.py b/pyomo/dae/integral.py
index 95f45076b3a..9e531e5ce0d 100644
--- a/pyomo/dae/integral.py
+++ b/pyomo/dae/integral.py
@@ -170,7 +170,7 @@ def is_fully_discretized(self):
setlist = self.index_set().set_tuple
for i in setlist:
- if i.type() is ContinuousSet:
+ if i.ctype is ContinuousSet:
if 'scheme' not in i.get_discretization_info():
return False
return True
diff --git a/pyomo/dae/misc.py b/pyomo/dae/misc.py
index 533ab5343c9..32a75073fb2 100644
--- a/pyomo/dae/misc.py
+++ b/pyomo/dae/misc.py
@@ -48,7 +48,6 @@ def generate_finite_elements(ds, nfe):
ds.add(round(tmp, 6))
tmp += step
ds.set_changed(True)
- ds._sort()
ds._fe = list(ds)
return
else:
@@ -67,7 +66,6 @@ def generate_finite_elements(ds, nfe):
_add_point(ds)
addpts -= 1
ds.set_changed(True)
- ds._sort()
ds._fe = list(ds)
return
@@ -100,7 +98,6 @@ def generate_colloc_points(ds, tau):
if pt not in ds:
ds.add(pt)
ds.set_changed(True)
- ds._sort()
def expand_components(block):
@@ -191,7 +188,7 @@ def update_contset_indexed_component(comp, expansion_map):
# you must initialize it with every index you would like to have
# access to!
- if comp.type() is Suffix:
+ if comp.ctype is Suffix:
return
# Params indexed by a ContinuousSet should include an initialize
@@ -199,13 +196,13 @@ def update_contset_indexed_component(comp, expansion_map):
# parameter value at a new point in the ContinuousSet is
# requested. Therefore, no special processing is required for
# Params.
- if comp.type() is Param:
+ if comp.ctype is Param:
return
# Integral components are handled after every ContinuousSet has been
# discretized. Import is deferred to here due to circular references.
from pyomo.dae import Integral
- if comp.type() is Integral:
+ if comp.ctype is Integral:
return
# Skip components that do not have a 'dim' attribute. This assumes that
@@ -228,22 +225,22 @@ def update_contset_indexed_component(comp, expansion_map):
indexset = [temp,]
for s in indexset:
- if s.type() == ContinuousSet and s.get_changed():
+ if s.ctype == ContinuousSet and s.get_changed():
if isinstance(comp, Var): # Don't use the type() method here
# because we want to catch DerivativeVar components as well
# as Var components
expansion_map[comp] = _update_var
_update_var(comp)
- elif comp.type() == Constraint:
+ elif comp.ctype == Constraint:
expansion_map[comp] = _update_constraint
_update_constraint(comp)
- elif comp.type() == Expression:
+ elif comp.ctype == Expression:
expansion_map[comp] = _update_expression
_update_expression(comp)
elif isinstance(comp, Piecewise):
expansion_map[comp] =_update_piecewise
_update_piecewise(comp)
- elif comp.type() == Block:
+ elif comp.ctype == Block:
expansion_map[comp] = _update_block
_update_block(comp)
else:
@@ -254,7 +251,7 @@ def update_contset_indexed_component(comp, expansion_map):
"discretization transformation in pyomo.dae. "
"Try adding the component to the model "
"after discretizing. Alert the pyomo developers "
- "for more assistance." % (str(comp), comp.type()))
+ "for more assistance." % (str(comp), comp.ctype))
def _update_var(v):
@@ -336,23 +333,10 @@ def _update_block(blk):
'function on Block-derived components that override '
'construct()' % blk.name)
- # Code taken from the construct() method of Block
missing_idx = getattr(blk, '_dae_missing_idx', set([]))
for idx in list(missing_idx):
- _block = blk[idx]
- obj = apply_indexed_rule(
- blk, blk._rule, _block, idx, blk._options)
-
- if isinstance(obj, _BlockData) and obj is not _block:
- # If the user returns a block, use their block instead
- # of the empty one we just created.
- for c in list(obj.component_objects(descend_into=False)):
- obj.del_component(c)
- _block.add_component(c.local_name, c)
- # transfer over any other attributes that are not components
- for name, val in iteritems(obj.__dict__):
- if not hasattr(_block, name) and not hasattr(blk, name):
- super(_BlockData, _block).__setattr__(name, val)
+ # Trigger block creation (including calling the Block's rule)
+ blk[idx]
# Remove book-keeping data after Block is discretized
if hasattr(blk, '_dae_missing_idx'):
diff --git a/pyomo/dae/plugins/colloc.py b/pyomo/dae/plugins/colloc.py
index 5de89c605e7..813376a3d8c 100644
--- a/pyomo/dae/plugins/colloc.py
+++ b/pyomo/dae/plugins/colloc.py
@@ -12,8 +12,15 @@
from six.moves import xrange
from six import next
+# If the user has numpy then the collocation points and the a matrix for
+# the Runge-Kutta basis formulation will be calculated as needed.
+# If the user does not have numpy then these values will be read from a
+# stored dictionary for up to 10 collocation points.
+from pyomo.common.dependencies import numpy, numpy_available
+
from pyomo.core.base import Transformation, TransformationFactory
from pyomo.core import Var, ConstraintList, Expression, Objective
+from pyomo.core.kernel.component_set import ComponentSet
from pyomo.dae import ContinuousSet, DerivativeVar, Integral
from pyomo.dae.misc import generate_finite_elements
@@ -28,16 +35,6 @@
from pyomo.common.config import ConfigBlock, ConfigValue, PositiveInt, In
-# If the user has numpy then the collocation points and the a matrix for
-# the Runge-Kutta basis formulation will be calculated as needed.
-# If the user does not have numpy then these values will be read from a
-# stored dictionary for up to 10 collocation points.
-try:
- import numpy
- numpy_available = True
-except ImportError: # pragma:nocover
- numpy_available = False
-
logger = logging.getLogger('pyomo.dae')
@@ -377,7 +374,7 @@ def _apply_to(self, instance, **kwds):
tmpds = config.wrt
if tmpds is not None:
- if tmpds.type() is not ContinuousSet:
+ if tmpds.ctype is not ContinuousSet:
raise TypeError("The component specified using the 'wrt' "
"keyword must be a continuous set")
elif 'scheme' in tmpds.get_discretization_info():
@@ -453,7 +450,7 @@ def _transformBlock(self, block, currentds):
for d in block.component_objects(DerivativeVar, descend_into=True):
dsets = d.get_continuousset_list()
- for i in set(dsets):
+ for i in ComponentSet(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
@@ -561,7 +558,7 @@ def reduce_collocation_points(self, instance, var=None, ncp=None,
if contset is None:
raise TypeError("A continuous set must be specified using the "
"keyword 'contset'")
- if contset.type() is not ContinuousSet:
+ if contset.ctype is not ContinuousSet:
raise TypeError("The component specified using the 'contset' "
"keyword must be a ContinuousSet")
ds = contset
@@ -581,7 +578,7 @@ def reduce_collocation_points(self, instance, var=None, ncp=None,
if var is None:
raise TypeError("A variable must be specified")
- if var.type() is not Var:
+ if var.ctype is not Var:
raise TypeError("The component specified using the 'var' keyword "
"must be a variable")
diff --git a/pyomo/dae/plugins/finitedifference.py b/pyomo/dae/plugins/finitedifference.py
index 577be629381..7acd069a117 100644
--- a/pyomo/dae/plugins/finitedifference.py
+++ b/pyomo/dae/plugins/finitedifference.py
@@ -12,6 +12,7 @@
from pyomo.core.base import Transformation, TransformationFactory
from pyomo.core import Var, Expression, Objective
+from pyomo.core.kernel.component_set import ComponentSet
from pyomo.dae import ContinuousSet, DerivativeVar, Integral
from pyomo.dae.misc import generate_finite_elements
@@ -171,7 +172,7 @@ def _apply_to(self, instance, **kwds):
tmpds = config.wrt
if tmpds is not None:
- if tmpds.type() is not ContinuousSet:
+ if tmpds.ctype is not ContinuousSet:
raise TypeError("The component specified using the 'wrt' "
"keyword must be a continuous set")
elif 'scheme' in tmpds.get_discretization_info():
@@ -236,7 +237,7 @@ def _transformBlock(self, block, currentds):
for d in block.component_objects(DerivativeVar, descend_into=True):
dsets = d.get_continuousset_list()
- for i in set(dsets):
+ for i in ComponentSet(dsets):
if currentds is None or i.name == currentds:
oldexpr = d.get_derivative_expression()
loc = d.get_state_var()._contset[i]
diff --git a/pyomo/dae/set_utils.py b/pyomo/dae/set_utils.py
new file mode 100644
index 00000000000..c1a979c9cf5
--- /dev/null
+++ b/pyomo/dae/set_utils.py
@@ -0,0 +1,284 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+from collections import Counter
+from pyomo.core.base import Constraint, Block
+from pyomo.core.kernel.component_set import ComponentSet
+from pyomo.core.base.set import SetProduct
+
+
+def index_warning(name, index):
+ return 'WARNING: %s has no index %s' % (name, index)
+
+
+def is_explicitly_indexed_by(comp, *sets, **kwargs):
+ """
+ Function for determining whether a pyomo component is indexed by a
+ set or group of sets.
+
+ Args:
+ comp : Some Pyomo component, possibly indexed
+ sets : Pyomo Sets to check indexing by
+ expand_all_set_operators : Whether or not to expand all set operators
+ in the subsets method
+
+ Returns:
+ A bool that is True if comp is directly indexed by every set in sets.
+ """
+ if not comp.is_indexed():
+ return False
+ for s in sets:
+ if isinstance(s, SetProduct):
+ msg = ('Checking for explicit indexing by a SetProduct '
+ 'is not supported')
+ raise TypeError(msg)
+
+ expand_all_set_operators = kwargs.pop('expand_all_set_operators', False)
+ if kwargs:
+ keys = kwargs.keys()
+ raise ValueError('Unrecognized keyword arguments: %s' % str(keys))
+
+ projected_subsets = comp.index_set().subsets(expand_all_set_operators=
+ expand_all_set_operators)
+ # Expanding all set operators here can be dangerous because it will not
+ # distinguish between operators that contain their operands (e.g. union,
+ # where you might consider the component to be considered indexed by
+ # the operands) and operators that don't.
+ # Ideally would like to check for containment by inclusion and containment
+ # by product in one search of the set operators.
+ subset_set = ComponentSet(projected_subsets)
+
+ return all([_ in subset_set for _ in sets])
+
+
+def is_in_block_indexed_by(comp, s, stop_at=None):
+ """
+ Function for determining whether a component is contained in a
+ block that is indexed by a particular set.
+
+ Args:
+ comp : Component whose parent blocks are checked
+ s : Set for which indices are checked
+ stop_at : Block at which to stop searching if reached, regardless
+ of whether or not it is indexed by s
+
+ Returns:
+ Bool that is true if comp is contained in a block indexed by s
+ """
+ parent = comp.parent_block()
+
+ # Stop when top-level block has been reached
+ while parent is not None:
+ # If we have reached our stopping point, quit.
+ if parent is stop_at:
+ return False
+
+ # Look at the potentially-indexed block containing our component
+ parent = parent.parent_component()
+ # Check again for the stopping point in case an IndexedBlock was used
+ if parent is stop_at:
+ return False
+
+ # Check potentially-indexed block for index s:
+ if is_explicitly_indexed_by(parent, s):
+ return True
+ # Continue up the tree, checking the parent block of our
+ # potentially-indexed block:
+ else:
+ parent = parent.parent_block()
+ # Return False if top-level block was reached
+ return False
+
+
+def get_index_set_except(comp, *sets):
+ """
+ Function for getting indices of a component over a product of its
+ indexing sets other than those specified. Indices for the specified
+ sets can be used to construct indices of the proper dimension for the
+ original component via the index_getter function.
+
+ Args:
+ comp : Component whose indexing sets are to be manipulated
+ sets : Sets to omit from the set_except product
+
+ Returns:
+ A dictionary. Maps 'set_except' to a Pyomo Set or SetProduct
+ of comp's index set, excluding those in sets. Maps
+ 'index_getter' to a function that returns an index of the
+ proper dimension for comp, given an element of set_except
+ and a value for each set excluded. These values must be provided
+ in the same order their Sets were provided in the sets argument.
+ """
+ n_set = len(sets)
+ s_set = ComponentSet(sets)
+ try:
+ total_s_dim = sum([s.dimen for s in sets])
+ except TypeError:
+ msg = ('get_index_set_except does not support sets with '
+ 'dimen == None, including those with inconsistent dimen')
+ raise TypeError(msg)
+
+ info = {}
+
+ if not is_explicitly_indexed_by(comp, *sets):
+ msg = (comp.name + ' is not indexed by at least one of ' +
+ str([s.name for s in sets]))
+ raise ValueError(msg)
+
+ index_set = comp.index_set()
+ if isinstance(index_set, SetProduct):
+ projection_sets = list(index_set.subsets())
+ counter = Counter([id(_) for _ in projection_sets])
+ for s in sets:
+ if counter[id(s)] != 1:
+ msg = 'Cannot omit sets that appear multiple times'
+ raise ValueError(msg)
+ # Need to know the location of each set within comp's index_set
+ # location will map:
+ # location in comp's subsets() -> location in input sets
+ location = {}
+ # location should be well defined even for higher dimension sets
+ # because this maps between lists of sets, not lists of indices
+ other_ind_sets = []
+ for ind_loc, ind_set in enumerate(projection_sets):
+ found_set = False
+ for s_loc, s_set in enumerate(sets):
+ if ind_set is s_set:
+ location[ind_loc] = s_loc
+ found_set = True
+ break
+ if not found_set:
+ other_ind_sets.append(ind_set)
+ else:
+ # If index_set is not a SetProduct, only one set must have been
+ # provided, so len(sets) == 1
+ # Location in sets and in comp's indexing set are the same.
+ location = {0: 0}
+ other_ind_sets = []
+
+ if comp.dim() == total_s_dim:
+ # comp indexed by all sets and having this dimension
+ # is sufficient to know that comp is only indexed by
+ # Sets in *sets
+
+ # In this case, return the trivial set_except and index_getter
+
+ # Problem: cannot construct location without a set tuple
+ # is that a problem with this syntax?
+ # Here len(newvals) should == 1
+ info['set_except'] = [None]
+ # index_getter returns an index corresponding to the values passed to
+ # it, re-ordered according to order of indexing sets in component.
+ info['index_getter'] = (lambda incomplete_index, *newvals:
+ newvals[0] if len(newvals) <= 1 else
+ tuple([newvals[location[i]] for i in location]))
+ return info
+
+ # Now may assume other_ind_sets is nonempty.
+ if len(other_ind_sets) == 1:
+ set_except = other_ind_sets[0]
+ elif len(other_ind_sets) >= 2:
+ set_except = other_ind_sets[0].cross(*other_ind_sets[1:])
+ else:
+ raise ValueError('Did not expect this to happen')
+
+ index_getter = (lambda incomplete_index, *newvals:
+ _complete_index(location, incomplete_index, *newvals))
+
+ info['set_except'] = set_except
+ info['index_getter'] = index_getter
+ return info
+
+
+def _complete_index(loc, index, *newvals):
+ """
+ Function for inserting new values into a partial index.
+ Used by get_index_set_except function to construct the
+ index_getter function for completing indices of a particular
+ component with particular sets excluded.
+
+ Args:
+ loc : Dictionary mapping location in the new index to
+ location in newvals
+ index : Partial index
+ newvals : New values to insert into index. Can be scalars
+ or tuples (for higher-dimension sets)
+
+ Returns:
+ An index (tuple) with values from newvals inserted in
+ locations specified by loc
+ """
+ if type(index) is not tuple:
+ index = (index,)
+ keys = sorted(loc.keys())
+ if len(keys) != len(newvals):
+ raise ValueError('Wrong number of values to complete index')
+ for i in sorted(loc.keys()):
+ newval = newvals[loc[i]]
+ if type(newval) is not tuple:
+ newval = (newval,)
+ index = index[0:i] + newval + index[i:]
+ return index
+
+
+def deactivate_model_at(b, cset, pts, allow_skip=True,
+ suppress_warnings=False):
+ """
+ Finds any block or constraint in block b, indexed explicitly (and not
+ implicitly) by cset, and deactivates it at points specified.
+ Implicitly indexed components are excluded because one of their parent
+ blocks will be deactivated, so deactivating them too would be redundant.
+
+ Args:
+ b : Block to search
+ cset : ContinuousSet of interest
+ pts : Value or list of values, in ContinuousSet, to deactivate at
+
+ Returns:
+ A dictionary mapping points in pts to lists of
+ component data that have been deactivated there
+ """
+ if type(pts) is not list:
+ pts = [pts]
+ for pt in pts:
+ if pt not in cset:
+ msg = str(pt) + ' is not in ContinuousSet ' + cset.name
+ raise ValueError(msg)
+ deactivated = {pt: [] for pt in pts}
+
+ visited = set()
+ for comp in b.component_objects([Block, Constraint], active=True):
+ # Record components that have been visited in case component_objects
+ # contains duplicates (due to references)
+ if id(comp) in visited:
+ continue
+ visited.add(id(comp))
+
+ if (is_explicitly_indexed_by(comp, cset) and
+ not is_in_block_indexed_by(comp, cset)):
+ info = get_index_set_except(comp, cset)
+ non_cset_set = info['set_except']
+ index_getter = info['index_getter']
+
+ for non_cset_index in non_cset_set:
+ for pt in pts:
+ index = index_getter(non_cset_index, pt)
+ try:
+ comp[index].deactivate()
+ deactivated[pt].append(comp[index])
+ except KeyError:
+ # except KeyError to allow Constraint/Block.Skip
+ if not suppress_warnings:
+ print(index_warning(comp.name, index))
+ if not allow_skip:
+ raise
+ continue
+
+ return deactivated
diff --git a/pyomo/dae/simulator.py b/pyomo/dae/simulator.py
index 27fce82d7af..b021ec13de3 100644
--- a/pyomo/dae/simulator.py
+++ b/pyomo/dae/simulator.py
@@ -6,14 +6,18 @@
# the U.S. Government retains certain rights in this software.
# This software is distributed under the BSD License.
# _________________________________________________________________________
-from pyomo.core.base import Constraint, Param, value, Suffix, Block
+from pyomo.core.base import Constraint, Param, Var, value, Suffix, Block
from pyomo.dae import ContinuousSet, DerivativeVar
from pyomo.dae.diffvar import DAE_Error
from pyomo.core.expr import current as EXPR
-from pyomo.core.expr.numvalue import NumericValue, native_numeric_types
-from pyomo.core.base.template_expr import IndexTemplate, _GetItemIndexer
+from pyomo.core.expr.numvalue import (
+ NumericValue, native_numeric_types, nonpyomo_leaf_types,
+)
+from pyomo.core.expr.template_expr import IndexTemplate, _GetItemIndexer
+from pyomo.core.base.indexed_component_slice import IndexedComponent_slice
+from pyomo.core.base.reference import Reference
from six import iterkeys, itervalues
@@ -22,28 +26,29 @@
__all__ = ('Simulator', )
logger = logging.getLogger('pyomo.core')
-# Check numpy availability
-numpy_available = True
-try:
- import numpy as np
-except ImportError:
- numpy_available = True
+from pyomo.common.dependencies import (
+ numpy as np, numpy_available, attempt_import,
+)
# Check integrator availability
-scipy_available = True
-try:
- import platform
- if platform.python_implementation() == "PyPy": # pragma:nocover
- # scipy is importable into PyPy, but ODE integrators don't work. (2/18)
- raise ImportError
- import scipy.integrate as scipy
-except ImportError:
- scipy_available = False
-
-casadi_available = True
-try:
- import casadi
- casadi_intrinsic = {
+# scipy_available = True
+# try:
+# import platform
+# if platform.python_implementation() == "PyPy": # pragma:nocover
+# # scipy is importable into PyPy, but ODE integrators don't work. (2/18)
+# raise ImportError
+# import scipy.integrate as scipy
+# except ImportError:
+# scipy_available = False
+import platform
+is_pypy = platform.python_implementation() == "PyPy"
+
+scipy, scipy_available = attempt_import('scipy.integrate', alt_names=['scipy'])
+
+casadi_intrinsic = {}
+def _finalize_casadi(casadi, available):
+ if available:
+ casadi_intrinsic.update({
'log': casadi.log,
'log10': casadi.log10,
'sin': casadi.sin,
@@ -61,9 +66,9 @@
'acosh': casadi.acosh,
'atanh': casadi.atanh,
'ceil': casadi.ceil,
- 'floor': casadi.floor}
-except ImportError:
- casadi_available = False
+ 'floor': casadi.floor,
+ })
+casadi, casadi_available = attempt_import('casadi', callback=_finalize_casadi)
def _check_getitemexpression(expr, i):
@@ -74,7 +79,7 @@ def _check_getitemexpression(expr, i):
GetItemExpression for the :py:class:`DerivativeVar` and
the RHS. If not, return None.
"""
- if type(expr.arg(i)._base) is DerivativeVar:
+ if type(expr.arg(i).arg(0)) is DerivativeVar:
return [expr.arg(i), expr.arg(1 - i)]
else:
return None
@@ -93,7 +98,7 @@ def _check_productexpression(expr, i):
stack = [(expr_, 1)]
pterms = []
dv = None
-
+
while stack:
curr, e_ = stack.pop()
if curr.__class__ is EXPR.ProductExpression:
@@ -105,7 +110,7 @@ def _check_productexpression(expr, i):
elif curr.__class__ is EXPR.ReciprocalExpression:
stack.append((curr.arg(0), - e_))
elif type(curr) is EXPR.GetItemExpression and \
- type(curr._base) is DerivativeVar:
+ type(curr.arg(0)) is DerivativeVar:
dv = (curr, e_)
else:
pterms.append((curr, e_))
@@ -117,9 +122,9 @@ def _check_productexpression(expr, i):
denom = 1
for term, e_ in pterms:
if e_ == 1:
- denom *= term
+ denom *= term
else:
- numer *= term
+ numer *= term
curr, e_ = dv
if e_ == 1:
return [curr, expr.arg(1 - i) * numer / denom]
@@ -139,7 +144,7 @@ def _check_negationexpression(expr, i):
arg = expr.arg(i).arg(0)
if type(arg) is EXPR.GetItemExpression and \
- type(arg._base) is DerivativeVar:
+ type(arg.arg(0)) is DerivativeVar:
return [arg, - expr.arg(1 - i)]
if type(arg) is EXPR.ProductExpression:
@@ -150,7 +155,7 @@ def _check_negationexpression(expr, i):
not lhs.is_potentially_variable()):
return None
if not (type(rhs) is EXPR.GetItemExpression and
- type(rhs._base) is DerivativeVar):
+ type(rhs.arg(0)) is DerivativeVar):
return None
return [rhs, - expr.arg(1 - i) / lhs]
@@ -177,7 +182,7 @@ def _check_viewsumexpression(expr, i):
if dv is not None:
items.append(item)
elif type(item) is EXPR.GetItemExpression and \
- type(item._base) is DerivativeVar:
+ type(item.arg(0)) is DerivativeVar:
dv = item
elif type(item) is EXPR.ProductExpression:
# This will contain the constant coefficient if there is one
@@ -187,7 +192,7 @@ def _check_viewsumexpression(expr, i):
if (type(lhs) in native_numeric_types or
not lhs.is_potentially_variable()) \
and (type(rhs) is EXPR.GetItemExpression and
- type(rhs._base) is DerivativeVar):
+ type(rhs.arg(0)) is DerivativeVar):
dv = rhs
dvcoef = lhs
else:
@@ -204,33 +209,31 @@ def _check_viewsumexpression(expr, i):
return None
-if scipy_available:
- class Pyomo2Scipy_Visitor(EXPR.ExpressionReplacementVisitor):
- """
- Expression walker that replaces _GetItemExpression
- instances with mutable parameters.
- """
+class Pyomo2Scipy_Visitor(EXPR.ExpressionReplacementVisitor):
+ """
+ Expression walker that replaces _GetItemExpression
+ instances with mutable parameters.
+ """
- def __init__(self, templatemap):
- super(Pyomo2Scipy_Visitor, self).__init__()
- self.templatemap = templatemap
+ def __init__(self, templatemap):
+ super(Pyomo2Scipy_Visitor, self).__init__()
+ self.templatemap = templatemap
- def visiting_potential_leaf(self, node):
- if type(node) is IndexTemplate:
- return True, node
+ def visiting_potential_leaf(self, node):
+ if type(node) is IndexTemplate:
+ return True, node
- if type(node) is EXPR.GetItemExpression:
- _id = _GetItemIndexer(node)
- if _id not in self.templatemap:
- self.templatemap[_id] = Param(mutable=True)
- self.templatemap[_id].construct()
- _args = []
- self.templatemap[_id]._name = "%s[%s]" % (
- node._base.name, ','.join(str(x) for x in _id._args))
- return True, self.templatemap[_id]
+ if type(node) is EXPR.GetItemExpression:
+ _id = _GetItemIndexer(node)
+ if _id not in self.templatemap:
+ self.templatemap[_id] = Param(mutable=True)
+ self.templatemap[_id].construct()
+ self.templatemap[_id]._name = "%s[%s]" % (
+ _id.base.name, ','.join(str(x) for x in _id.args))
+ return True, self.templatemap[_id]
- return super(
- Pyomo2Scipy_Visitor, self).visiting_potential_leaf(node)
+ return super(
+ Pyomo2Scipy_Visitor, self).visiting_potential_leaf(node)
def convert_pyomo2scipy(expr, templatemap):
@@ -253,84 +256,83 @@ def convert_pyomo2scipy(expr, templatemap):
return visitor.dfs_postorder_stack(expr)
-if casadi_available:
- class Substitute_Pyomo2Casadi_Visitor(EXPR.ExpressionReplacementVisitor):
- """
- Expression walker that replaces
-
- * _UnaryFunctionExpression instances with unary functions that
- point to casadi intrinsic functions.
-
- * _GetItemExpressions with _GetItemIndexer objects that references
- CasADi variables.
- """
+class Substitute_Pyomo2Casadi_Visitor(EXPR.ExpressionReplacementVisitor):
+ """
+ Expression walker that replaces
- def __init__(self, templatemap):
- super(Substitute_Pyomo2Casadi_Visitor, self).__init__()
- self.templatemap = templatemap
-
- def visit(self, node, values):
- """Replace a node if it's a unary function."""
- if type(node) is EXPR.UnaryFunctionExpression:
- return EXPR.UnaryFunctionExpression(
- values[0],
- node._name,
- casadi_intrinsic[node._name])
- return node
-
- def visiting_potential_leaf(self, node):
- """Replace a node if it's a _GetItemExpression."""
- if type(node) is EXPR.GetItemExpression:
- _id = _GetItemIndexer(node)
- if _id not in self.templatemap:
- name = "%s[%s]" % (
- node._base.name, ','.join(str(x) for x in _id._args))
- self.templatemap[_id] = casadi.SX.sym(name)
- return True, self.templatemap[_id]
+ * _UnaryFunctionExpression instances with unary functions that
+ point to casadi intrinsic functions.
- if type(node) in native_numeric_types or \
- not node.is_expression_type() or \
- type(node) is IndexTemplate:
- return True, node
+ * _GetItemExpressions with _GetItemIndexer objects that references
+ CasADi variables.
+ """
- return False, None
+ def __init__(self, templatemap):
+ super(Substitute_Pyomo2Casadi_Visitor, self).__init__()
+ self.templatemap = templatemap
+
+ def visit(self, node, values):
+ """Replace a node if it's a unary function."""
+ if type(node) is EXPR.UnaryFunctionExpression:
+ return EXPR.UnaryFunctionExpression(
+ values[0],
+ node._name,
+ casadi_intrinsic[node._name])
+ return node
+
+ def visiting_potential_leaf(self, node):
+ """Replace a node if it's a _GetItemExpression."""
+ if type(node) is EXPR.GetItemExpression:
+ _id = _GetItemIndexer(node)
+ if _id not in self.templatemap:
+ name = "%s[%s]" % (
+ _id.base.name, ','.join(str(x) for x in _id.args))
+ self.templatemap[_id] = casadi.SX.sym(name)
+ return True, self.templatemap[_id]
+
+ if type(node) in native_numeric_types or \
+ not node.is_expression_type() or \
+ type(node) is IndexTemplate:
+ return True, node
+
+ return False, None
+
+
+class Convert_Pyomo2Casadi_Visitor(EXPR.ExpressionValueVisitor):
+ """
+ Expression walker that evaluates an expression
+ generated by the Substitute_Pyomo2Casadi_Visitor walker.
+
+ In Coopr3 this walker was not necessary because the expression could
+ be simply evaluated. But in Pyomo5, the evaluation logic was
+ changed to be non-recursive, which involves checks on the types of
+ leaves in the expression tree. Hence, the evaluation logic fails if
+ leaves in the tree are not standard Pyomo5 variable types.
+ """
+ def visit(self, node, values):
+ """ Visit nodes that have been expanded """
+ return node._apply_operation(values)
- class Convert_Pyomo2Casadi_Visitor(EXPR.ExpressionValueVisitor):
- """
- Expression walker that evaluates an expression
- generated by the Substitute_Pyomo2Casadi_Visitor walker.
-
- In Coopr3 this walker was not necessary because the expression could
- be simply evaluated. But in Pyomo5, the evaluation logic was
- changed to be non-recursive, which involves checks on the types of
- leaves in the expression tree. Hence, the evaluation logic fails if
- leaves in the tree are not standard Pyomo5 variable types.
+ def visiting_potential_leaf(self, node):
"""
+ Visiting a potential leaf.
- def visit(self, node, values):
- """ Visit nodes that have been expanded """
- return node._apply_operation(values)
-
- def visiting_potential_leaf(self, node):
- """
- Visiting a potential leaf.
-
- Return True if the node is not expanded.
- """
- if node.__class__ in native_numeric_types:
- return True, node
+ Return True if the node is not expanded.
+ """
+ if node.__class__ in native_numeric_types:
+ return True, node
- if node.__class__ is casadi.SX:
- return True, node
+ if node.__class__ is casadi.SX:
+ return True, node
- if node.is_variable_type():
- return True, value(node)
+ if node.is_variable_type():
+ return True, value(node)
- if not node.is_expression_type():
- return True, value(node)
+ if not node.is_expression_type():
+ return True, value(node)
- return False, None
+ return False, None
def substitute_pyomo2casadi(expr, templatemap):
@@ -393,7 +395,7 @@ class Simulator:
"""
def __init__(self, m, package='scipy'):
-
+
self._intpackage = package
if self._intpackage not in ['scipy', 'casadi']:
raise DAE_Error(
@@ -404,14 +406,20 @@ def __init__(self, m, package='scipy'):
if not scipy_available:
# Converting this to a warning so that Simulator initialization
# can be tested even when scipy is unavailable
- logger.warning("The scipy module is not available. You may "
- "build the Simulator object but you will not "
- "be able to run the simulation.")
+ logger.warning(
+ "The scipy module is not available. "
+ "You may build the Simulator object but you will not "
+ "be able to run the simulation.")
+ elif is_pypy:
+ logger.warning(
+ "The scipy ODE integrators do not work in pypy. "
+ "You may build the Simulator object but you will not "
+ "be able to run the simulation.")
else:
if not casadi_available:
# Initializing the simulator for use with casadi requires
# access to casadi objects. Therefore, we must throw an error
- # here instead of a warning.
+ # here instead of a warning.
raise ValueError("The casadi module is not available. "
"Cannot simulate model.")
@@ -452,17 +460,17 @@ def __init__(self, m, package='scipy'):
# RHS. Must find a RHS for every derivative var otherwise ERROR. Build
# dictionary of DerivativeVar:RHS equation.
for con in m.component_objects(Constraint, active=True):
-
+
# Skip the discretization equations if model is discretized
if '_disc_eq' in con.name:
continue
-
+
# Check dimension of the Constraint. Check if the
# Constraint is indexed by the continuous set and
# determine its order in the indexing sets
if con.dim() == 0:
continue
-
+
conindex = con.index_set()
if not hasattr(conindex, 'set_tuple'):
# Check if the continuous set is the indexing set
@@ -497,7 +505,7 @@ def __init__(self, m, package='scipy'):
for i in noncsidx:
# Insert the index template and call the rule to
- # create a templated expression
+ # create a templated expression
if i is None:
tempexp = conrule(m, cstemplate)
else:
@@ -509,14 +517,14 @@ def __init__(self, m, package='scipy'):
# Check to make sure it's an EqualityExpression
if not type(tempexp) is EXPR.EqualityExpression:
continue
-
+
# Check to make sure it's a differential equation with
# separable RHS
args = None
- # Case 1: m.dxdt[t] = RHS
+ # Case 1: m.dxdt[t] = RHS
if type(tempexp.arg(0)) is EXPR.GetItemExpression:
args = _check_getitemexpression(tempexp, 0)
-
+
# Case 2: RHS = m.dxdt[t]
if args is None:
if type(tempexp.arg(1)) is EXPR.GetItemExpression:
@@ -581,7 +589,7 @@ def __init__(self, m, package='scipy'):
algexp = substitute_pyomo2casadi(tempexp, templatemap)
alglist.append(algexp)
continue
-
+
# Add the differential equation to rhsdict and derivlist
dv = args[0]
RHS = args[1]
@@ -590,7 +598,7 @@ def __init__(self, m, package='scipy'):
raise DAE_Error(
"Found multiple RHS expressions for the "
"DerivativeVar %s" % str(dvkey))
-
+
derivlist.append(dvkey)
if self._intpackage == 'casadi':
rhsdict[dvkey] = substitute_pyomo2casadi(RHS, templatemap)
@@ -610,7 +618,7 @@ def __init__(self, m, package='scipy'):
diffvars = []
for deriv in derivlist:
- sv = deriv._base.get_state_var()
+ sv = deriv.base.get_state_var()
diffvars.append(_GetItemIndexer(sv[deriv._args]))
# Create ordered list of algebraic variables and time-varying
@@ -618,7 +626,7 @@ def __init__(self, m, package='scipy'):
algvars = []
for item in iterkeys(templatemap):
- if item._base.name in derivs:
+ if item.base.name in derivs:
# Make sure there are no DerivativeVars in the
# template map
raise DAE_Error(
@@ -627,7 +635,7 @@ def __init__(self, m, package='scipy'):
if item not in diffvars:
# Finds time varying parameters and algebraic vars
algvars.append(item)
-
+
if self._intpackage == 'scipy':
# Function sent to scipy integrator
def _rhsfun(t, x):
@@ -641,14 +649,14 @@ def _rhsfun(t, x):
residual.append(rhsdict[d]())
return residual
- self._rhsfun = _rhsfun
-
+ self._rhsfun = _rhsfun
+
# Add any diffvars not added by expression walker to self._templatemap
if self._intpackage == 'casadi':
for _id in diffvars:
if _id not in templatemap:
name = "%s[%s]" % (
- _id._base.name, ','.join(str(x) for x in _id._args))
+ _id.base.name, ','.join(str(x) for x in _id.args))
templatemap[_id] = casadi.SX.sym(name)
self._contset = contset
@@ -693,7 +701,7 @@ def get_variable_order(self, vartype=None):
-------
`list`
- """
+ """
if vartype == 'time-varying':
return self._algvars
elif vartype == 'algebraic':
@@ -702,7 +710,7 @@ def get_variable_order(self, vartype=None):
return self._siminputvars
else:
return self._diffvars
-
+
def simulate(self, numpoints=None, tstep=None, integrator=None,
varying_inputs=None, initcon=None, integrator_options=None):
"""
@@ -765,7 +773,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
integrator = 'lsoda'
else:
# Specify the casadi integrator to use for simulation.
- # Only a subset of these integrators may be used for
+ # Only a subset of these integrators may be used for
# DAE simulation. We defer this check to CasADi.
valid_integrators = ['cvodes', 'idas', 'collocation', 'rk']
if integrator is None:
@@ -784,7 +792,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
raise ValueError(
"The step size %6.2f is larger than the span of the "
"ContinuousSet %s" % (tstep, self._contset.name()))
-
+
if tstep is not None and numpoints is not None:
raise ValueError(
"Cannot specify both the step size and the number of "
@@ -818,7 +826,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
for alg in self._algvars:
if alg._base in varying_inputs:
- # Find all the switching points
+ # Find all the switching points
switchpts += varying_inputs[alg._base].keys()
# Add to dictionary of siminputvars
self._siminputvars[alg._base] = alg
@@ -835,7 +843,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
"for more information.")
# Get the set of unique points
- switchpts = list(set(switchpts))
+ switchpts = list(set(switchpts))
switchpts.sort()
# Make sure all the switchpts are within the bounds of
@@ -881,7 +889,10 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
if self._intpackage == 'scipy':
if not scipy_available:
raise ValueError("The scipy module is not available. "
- "Cannot simulate the model.")
+ "Cannot simulate the model.")
+ if is_pypy:
+ raise ValueError("The scipy ODE integrators do not work "
+ "under pypy. Cannot simulate the model.")
tsim, profile = self._simulate_with_scipy(initcon, tsim, switchpts,
varying_inputs,
integrator,
@@ -902,7 +913,7 @@ def simulate(self, numpoints=None, tstep=None, integrator=None,
self._tsim = tsim
self._simsolution = profile
-
+
return [tsim, profile]
def _simulate_with_scipy(self, initcon, tsim, switchpts,
@@ -1041,11 +1052,11 @@ def initialize_model(self):
"Tried to initialize the model without simulating it first")
tvals = list(self._contset)
-
+
# Build list of state and algebraic variables
# that can be initialized
initvars = self._diffvars + self._simalgvars
-
+
for idx, v in enumerate(initvars):
for idx2, i in enumerate(v._args):
if type(i) is IndexTemplate:
diff --git a/pyomo/dae/tests/simulator_dae_example.casadi.txt b/pyomo/dae/tests/simulator_dae_example.casadi.txt
index 1f9e67c8b21..ca05a8e213c 100644
--- a/pyomo/dae/tests/simulator_dae_example.casadi.txt
+++ b/pyomo/dae/tests/simulator_dae_example.casadi.txt
@@ -1,3 +1,8 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 1)
+ Key : Finite : Members
+ None : False : [0.0..1]
+
4 Param Declarations
p1 : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
Key : Value
@@ -15,7 +20,7 @@
5 Var Declarations
dza : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.00571 : None : None : None : False : True : Reals
0.027684 : None : None : None : False : True : Reals
0.058359 : None : None : None : False : True : Reals
@@ -68,7 +73,7 @@
1 : None : None : None : False : True : Reals
dzb : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.00571 : None : None : None : False : True : Reals
0.027684 : None : None : None : False : True : Reals
0.058359 : None : None : None : False : True : Reals
@@ -120,169 +125,169 @@
0.986024 : None : None : None : False : True : Reals
1 : None : None : None : False : True : Reals
za : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 1.0 : None : False : False : Reals
- 0.00571 : None : 0.977812279974 : None : False : False : Reals
- 0.027684 : None : 0.897420371598 : None : False : False : Reals
- 0.058359 : None : 0.798743226376 : None : False : False : Reals
- 0.086024 : None : 0.721067811612 : None : False : False : Reals
- 0.1 : None : 0.685294140966 : None : False : False : Reals
- 0.10571 : None : 0.671448724858 : None : False : False : Reals
- 0.127684 : None : 0.621070519067 : None : False : False : Reals
- 0.158359 : None : 0.55867835577 : None : False : False : Reals
- 0.186024 : None : 0.509433843825 : None : False : False : Reals
- 0.2 : None : 0.486785158115 : None : False : False : Reals
- 0.20571 : None : 0.477975912233 : None : False : False : Reals
- 0.227684 : None : 0.446049259303 : None : False : False : Reals
- 0.258359 : None : 0.406494394239 : None : False : False : Reals
- 0.286024 : None : 0.375259797217 : None : False : False : Reals
- 0.3 : None : 0.36091878828 : None : False : False : Reals
- 0.30571 : None : 0.355313817067 : None : False : False : Reals
- 0.327684 : None : 0.335080228323 : None : False : False : Reals
- 0.358359 : None : 0.310003844561 : None : False : False : Reals
- 0.386024 : None : 0.290192955028 : None : False : False : Reals
- 0.4 : None : 0.281112407744 : None : False : False : Reals
- 0.40571 : None : 0.277546421538 : None : False : False : Reals
- 0.427684 : None : 0.264723559557 : None : False : False : Reals
- 0.458359 : None : 0.248826345617 : None : False : False : Reals
- 0.486024 : None : 0.236261383877 : None : False : False : Reals
- 0.5 : None : 0.230511758295 : None : False : False : Reals
- 0.50571 : None : 0.228243159375 : None : False : False : Reals
- 0.527684 : None : 0.220116926615 : None : False : False : Reals
- 0.558359 : None : 0.210039118925 : None : False : False : Reals
- 0.586024 : None : 0.202070111838 : None : False : False : Reals
- 0.6 : None : 0.198429643562 : None : False : False : Reals
- 0.60571 : None : 0.196990497732 : None : False : False : Reals
- 0.627684 : None : 0.191836834341 : None : False : False : Reals
- 0.658359 : None : 0.185448275941 : None : False : False : Reals
- 0.686024 : None : 0.180400845587 : None : False : False : Reals
- 0.7 : None : 0.178089178747 : None : False : False : Reals
- 0.70571 : None : 0.177180945029 : None : False : False : Reals
- 0.727684 : None : 0.173907783607 : None : False : False : Reals
- 0.758359 : None : 0.169857970679 : None : False : False : Reals
- 0.786024 : None : 0.166661860054 : None : False : False : Reals
- 0.8 : None : 0.165193212111 : None : False : False : Reals
- 0.80571 : None : 0.164620034483 : None : False : False : Reals
- 0.827684 : None : 0.16254375454 : None : False : False : Reals
- 0.858359 : None : 0.159974903173 : None : False : False : Reals
- 0.886024 : None : 0.157950280624 : None : False : False : Reals
- 0.9 : None : 0.157017256849 : None : False : False : Reals
- 0.90571 : None : 0.15665552715 : None : False : False : Reals
- 0.927684 : None : 0.155339576788 : None : False : False : Reals
- 0.958359 : None : 0.153710866922 : None : False : False : Reals
- 0.986024 : None : 0.152426586541 : None : False : False : Reals
- 1 : None : 0.151833863083 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 1.0 : None : False : False : Reals
+ 0.00571 : None : 0.9778122799736729 : None : False : False : Reals
+ 0.027684 : None : 0.8974203715977497 : None : False : False : Reals
+ 0.058359 : None : 0.7987432263757179 : None : False : False : Reals
+ 0.086024 : None : 0.7210678116123416 : None : False : False : Reals
+ 0.1 : None : 0.6852941409656236 : None : False : False : Reals
+ 0.10571 : None : 0.6714487248581024 : None : False : False : Reals
+ 0.127684 : None : 0.621070519066829 : None : False : False : Reals
+ 0.158359 : None : 0.5586783557695114 : None : False : False : Reals
+ 0.186024 : None : 0.5094338438254381 : None : False : False : Reals
+ 0.2 : None : 0.4867851581150437 : None : False : False : Reals
+ 0.20571 : None : 0.4779759122327108 : None : False : False : Reals
+ 0.227684 : None : 0.4460492593031096 : None : False : False : Reals
+ 0.258359 : None : 0.40649439423897077 : None : False : False : Reals
+ 0.286024 : None : 0.37525979721675473 : None : False : False : Reals
+ 0.3 : None : 0.3609187882803063 : None : False : False : Reals
+ 0.30571 : None : 0.35531381706670456 : None : False : False : Reals
+ 0.327684 : None : 0.335080228323394 : None : False : False : Reals
+ 0.358359 : None : 0.31000384456130714 : None : False : False : Reals
+ 0.386024 : None : 0.2901929550275578 : None : False : False : Reals
+ 0.4 : None : 0.2811124077440353 : None : False : False : Reals
+ 0.40571 : None : 0.2775464215381259 : None : False : False : Reals
+ 0.427684 : None : 0.26472355955677956 : None : False : False : Reals
+ 0.458359 : None : 0.24882634561734235 : None : False : False : Reals
+ 0.486024 : None : 0.23626138387744378 : None : False : False : Reals
+ 0.5 : None : 0.2305117582951921 : None : False : False : Reals
+ 0.50571 : None : 0.22824315937524348 : None : False : False : Reals
+ 0.527684 : None : 0.22011692661458637 : None : False : False : Reals
+ 0.558359 : None : 0.2100391189250308 : None : False : False : Reals
+ 0.586024 : None : 0.2020701118382127 : None : False : False : Reals
+ 0.6 : None : 0.1984296435622711 : None : False : False : Reals
+ 0.60571 : None : 0.1969904977320466 : None : False : False : Reals
+ 0.627684 : None : 0.19183683434108842 : None : False : False : Reals
+ 0.658359 : None : 0.1854482759405722 : None : False : False : Reals
+ 0.686024 : None : 0.18040084558731742 : None : False : False : Reals
+ 0.7 : None : 0.17808917874651323 : None : False : False : Reals
+ 0.70571 : None : 0.17718094502902584 : None : False : False : Reals
+ 0.727684 : None : 0.17390778360749842 : None : False : False : Reals
+ 0.758359 : None : 0.16985797067926942 : None : False : False : Reals
+ 0.786024 : None : 0.1666618600541457 : None : False : False : Reals
+ 0.8 : None : 0.1651932121106028 : None : False : False : Reals
+ 0.80571 : None : 0.16462003448308787 : None : False : False : Reals
+ 0.827684 : None : 0.1625437545396941 : None : False : False : Reals
+ 0.858359 : None : 0.15997490317271887 : None : False : False : Reals
+ 0.886024 : None : 0.15795028062400923 : None : False : False : Reals
+ 0.9 : None : 0.15701725684856155 : None : False : False : Reals
+ 0.90571 : None : 0.15665552714999134 : None : False : False : Reals
+ 0.927684 : None : 0.15533957678837768 : None : False : False : Reals
+ 0.958359 : None : 0.15371086692249716 : None : False : False : Reals
+ 0.986024 : None : 0.15242658654141075 : None : False : False : Reals
+ 1 : None : 0.15183386308321142 : None : False : False : Reals
zb : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.00571 : None : 0.0184570025544 : None : False : False : Reals
- 0.027684 : None : 0.0666150289684 : None : False : False : Reals
- 0.058359 : None : 0.101674636549 : None : False : False : Reals
- 0.086024 : None : 0.124411470858 : None : False : False : Reals
- 0.1 : None : 0.134522400341 : None : False : False : Reals
- 0.10571 : None : 0.138407400785 : None : False : False : Reals
- 0.127684 : None : 0.152494714234 : None : False : False : Reals
- 0.158359 : None : 0.16988956616 : None : False : False : Reals
- 0.186024 : None : 0.183608424397 : None : False : False : Reals
- 0.2 : None : 0.189917250002 : None : False : False : Reals
- 0.20571 : None : 0.192371022979 : None : False : False : Reals
- 0.227684 : None : 0.201263907497 : None : False : False : Reals
- 0.258359 : None : 0.212281436418 : None : False : False : Reals
- 0.286024 : None : 0.220981460928 : None : False : False : Reals
- 0.3 : None : 0.224975972595 : None : False : False : Reals
- 0.30571 : None : 0.226537166716 : None : False : False : Reals
- 0.327684 : None : 0.232172974599 : None : False : False : Reals
- 0.358359 : None : 0.239157687348 : None : False : False : Reals
- 0.386024 : None : 0.244675759814 : None : False : False : Reals
- 0.4 : None : 0.247205030488 : None : False : False : Reals
- 0.40571 : None : 0.248198290662 : None : False : False : Reals
- 0.427684 : None : 0.251769937364 : None : False : False : Reals
- 0.458359 : None : 0.256197905295 : None : False : False : Reals
- 0.486024 : None : 0.259697716041 : None : False : False : Reals
- 0.5 : None : 0.261299201697 : None : False : False : Reals
- 0.50571 : None : 0.261931091289 : None : False : False : Reals
- 0.527684 : None : 0.264194550578 : None : False : False : Reals
- 0.558359 : None : 0.267001596114 : None : False : False : Reals
- 0.586024 : None : 0.269221261995 : None : False : False : Reals
- 0.6 : None : 0.270235268477 : None : False : False : Reals
- 0.60571 : None : 0.270636124408 : None : False : False : Reals
- 0.627684 : None : 0.272071612481 : None : False : False : Reals
- 0.658359 : None : 0.273851064662 : None : False : False : Reals
- 0.686024 : None : 0.275256962333 : None : False : False : Reals
- 0.7 : None : 0.275900847832 : None : False : False : Reals
- 0.70571 : None : 0.276153824835 : None : False : False : Reals
- 0.727684 : None : 0.277065522557 : None : False : False : Reals
- 0.758359 : None : 0.278193546817 : None : False : False : Reals
- 0.786024 : None : 0.279083782998 : None : False : False : Reals
- 0.8 : None : 0.279492856263 : None : False : False : Reals
- 0.80571 : None : 0.279652507622 : None : False : False : Reals
- 0.827684 : None : 0.280230829098 : None : False : False : Reals
- 0.858359 : None : 0.280946350161 : None : False : False : Reals
- 0.886024 : None : 0.281510283184 : None : False : False : Reals
- 0.9 : None : 0.281770165157 : None : False : False : Reals
- 0.90571 : None : 0.281870920384 : None : False : False : Reals
- 0.927684 : None : 0.282237461689 : None : False : False : Reals
- 0.958359 : None : 0.282691118198 : None : False : False : Reals
- 0.986024 : None : 0.283048838258 : None : False : False : Reals
- 1 : None : 0.28321393388 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.00571 : None : 0.018457002554371972 : None : False : False : Reals
+ 0.027684 : None : 0.0666150289683904 : None : False : False : Reals
+ 0.058359 : None : 0.10167463654898541 : None : False : False : Reals
+ 0.086024 : None : 0.1244114708578883 : None : False : False : Reals
+ 0.1 : None : 0.13452240034093335 : None : False : False : Reals
+ 0.10571 : None : 0.13840740078485717 : None : False : False : Reals
+ 0.127684 : None : 0.1524947142344445 : None : False : False : Reals
+ 0.158359 : None : 0.16988956616012657 : None : False : False : Reals
+ 0.186024 : None : 0.1836084243974377 : None : False : False : Reals
+ 0.2 : None : 0.1899172500023451 : None : False : False : Reals
+ 0.20571 : None : 0.19237102297893427 : None : False : False : Reals
+ 0.227684 : None : 0.2012639074972283 : None : False : False : Reals
+ 0.258359 : None : 0.2122814364181978 : None : False : False : Reals
+ 0.286024 : None : 0.22098146092821289 : None : False : False : Reals
+ 0.3 : None : 0.22497597259469362 : None : False : False : Reals
+ 0.30571 : None : 0.2265371667157954 : None : False : False : Reals
+ 0.327684 : None : 0.23217297459915592 : None : False : False : Reals
+ 0.358359 : None : 0.23915768734848866 : None : False : False : Reals
+ 0.386024 : None : 0.24467575981408343 : None : False : False : Reals
+ 0.4 : None : 0.2472050304883578 : None : False : False : Reals
+ 0.40571 : None : 0.24819829066205695 : None : False : False : Reals
+ 0.427684 : None : 0.25176993736394293 : None : False : False : Reals
+ 0.458359 : None : 0.2561979052953936 : None : False : False : Reals
+ 0.486024 : None : 0.2596977160413475 : None : False : False : Reals
+ 0.5 : None : 0.2612992016967095 : None : False : False : Reals
+ 0.50571 : None : 0.26193109128880954 : None : False : False : Reals
+ 0.527684 : None : 0.2641945505775068 : None : False : False : Reals
+ 0.558359 : None : 0.2670015961143072 : None : False : False : Reals
+ 0.586024 : None : 0.2692212619948366 : None : False : False : Reals
+ 0.6 : None : 0.27023526847710955 : None : False : False : Reals
+ 0.60571 : None : 0.2706361244078388 : None : False : False : Reals
+ 0.627684 : None : 0.27207161248132894 : None : False : False : Reals
+ 0.658359 : None : 0.273851064661633 : None : False : False : Reals
+ 0.686024 : None : 0.27525696233275676 : None : False : False : Reals
+ 0.7 : None : 0.2759008478322489 : None : False : False : Reals
+ 0.70571 : None : 0.27615382483549455 : None : False : False : Reals
+ 0.727684 : None : 0.27706552255662475 : None : False : False : Reals
+ 0.758359 : None : 0.27819354681730285 : None : False : False : Reals
+ 0.786024 : None : 0.279083782997723 : None : False : False : Reals
+ 0.8 : None : 0.27949285626276915 : None : False : False : Reals
+ 0.80571 : None : 0.2796525076223737 : None : False : False : Reals
+ 0.827684 : None : 0.28023082909832486 : None : False : False : Reals
+ 0.858359 : None : 0.28094635016142827 : None : False : False : Reals
+ 0.886024 : None : 0.2815102831843716 : None : False : False : Reals
+ 0.9 : None : 0.2817701651569773 : None : False : False : Reals
+ 0.90571 : None : 0.2818709203841266 : None : False : False : Reals
+ 0.927684 : None : 0.2822374616892232 : None : False : False : Reals
+ 0.958359 : None : 0.2826911181983395 : None : False : False : Reals
+ 0.986024 : None : 0.2830488382578243 : None : False : False : Reals
+ 1 : None : 0.28321393387972915 : None : False : False : Reals
zc : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : -5.77096113154e-12 : None : False : False : Reals
- 0.00571 : None : 0.00373071746945 : None : False : False : Reals
- 0.027684 : None : 0.0359645994339 : None : False : False : Reals
- 0.058359 : None : 0.0995821370753 : None : False : False : Reals
- 0.086024 : None : 0.15452071753 : None : False : False : Reals
- 0.1 : None : 0.180183458693 : None : False : False : Reals
- 0.10571 : None : 0.190143874357 : None : False : False : Reals
- 0.127684 : None : 0.226434766699 : None : False : False : Reals
- 0.158359 : None : 0.27143207807 : None : False : False : Reals
- 0.186024 : None : 0.306957731777 : None : False : False : Reals
- 0.2 : None : 0.323297591883 : None : False : False : Reals
- 0.20571 : None : 0.329653064788 : None : False : False : Reals
- 0.227684 : None : 0.3526868332 : None : False : False : Reals
- 0.258359 : None : 0.381224169343 : None : False : False : Reals
- 0.286024 : None : 0.403758741855 : None : False : False : Reals
- 0.3 : None : 0.414105239125 : None : False : False : Reals
- 0.30571 : None : 0.418149016218 : None : False : False : Reals
- 0.327684 : None : 0.432746797077 : None : False : False : Reals
- 0.358359 : None : 0.45083846809 : None : False : False : Reals
- 0.386024 : None : 0.465131285158 : None : False : False : Reals
- 0.4 : None : 0.471682561768 : None : False : False : Reals
- 0.40571 : None : 0.4742552878 : None : False : False : Reals
- 0.427684 : None : 0.483506503079 : None : False : False : Reals
- 0.458359 : None : 0.494975749087 : None : False : False : Reals
- 0.486024 : None : 0.504040900081 : None : False : False : Reals
- 0.5 : None : 0.508189040008 : None : False : False : Reals
- 0.50571 : None : 0.509825749336 : None : False : False : Reals
- 0.527684 : None : 0.515688522808 : None : False : False : Reals
- 0.558359 : None : 0.522959284961 : None : False : False : Reals
- 0.586024 : None : 0.528708626167 : None : False : False : Reals
- 0.6 : None : 0.531335087961 : None : False : False : Reals
- 0.60571 : None : 0.53237337786 : None : False : False : Reals
- 0.627684 : None : 0.536091553178 : None : False : False : Reals
- 0.658359 : None : 0.540700659398 : None : False : False : Reals
- 0.686024 : None : 0.54434219208 : None : False : False : Reals
- 0.7 : None : 0.546009973421 : None : False : False : Reals
- 0.70571 : None : 0.546665230135 : None : False : False : Reals
- 0.727684 : None : 0.549026693836 : None : False : False : Reals
- 0.758359 : None : 0.551948482503 : None : False : False : Reals
- 0.786024 : None : 0.554254356948 : None : False : False : Reals
- 0.8 : None : 0.555313931627 : None : False : False : Reals
- 0.80571 : None : 0.555727457895 : None : False : False : Reals
- 0.827684 : None : 0.557225416362 : None : False : False : Reals
- 0.858359 : None : 0.559078746666 : None : False : False : Reals
- 0.886024 : None : 0.560539436192 : None : False : False : Reals
- 0.9 : None : 0.561212577994 : None : False : False : Reals
- 0.90571 : None : 0.561473552466 : None : False : False : Reals
- 0.927684 : None : 0.562422961522 : None : False : False : Reals
- 0.958359 : None : 0.563598014879 : None : False : False : Reals
- 0.986024 : None : 0.564524575201 : None : False : False : Reals
- 1 : None : 0.564952203037 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : -5.770962826643652e-12 : None : False : False : Reals
+ 0.00571 : None : 0.0037307174694463847 : None : False : False : Reals
+ 0.027684 : None : 0.035964599433859795 : None : False : False : Reals
+ 0.058359 : None : 0.09958213707529827 : None : False : False : Reals
+ 0.086024 : None : 0.15452071752977012 : None : False : False : Reals
+ 0.1 : None : 0.18018345869344304 : None : False : False : Reals
+ 0.10571 : None : 0.1901438743570405 : None : False : False : Reals
+ 0.127684 : None : 0.22643476669872656 : None : False : False : Reals
+ 0.158359 : None : 0.271432078070362 : None : False : False : Reals
+ 0.186024 : None : 0.3069577317771242 : None : False : False : Reals
+ 0.2 : None : 0.32329759188261126 : None : False : False : Reals
+ 0.20571 : None : 0.3296530647883549 : None : False : False : Reals
+ 0.227684 : None : 0.35268683319966193 : None : False : False : Reals
+ 0.258359 : None : 0.3812241693428313 : None : False : False : Reals
+ 0.286024 : None : 0.4037587418550322 : None : False : False : Reals
+ 0.3 : None : 0.4141052391250002 : None : False : False : Reals
+ 0.30571 : None : 0.4181490162175002 : None : False : False : Reals
+ 0.327684 : None : 0.43274679707745 : None : False : False : Reals
+ 0.358359 : None : 0.45083846809020434 : None : False : False : Reals
+ 0.386024 : None : 0.46513128515835883 : None : False : False : Reals
+ 0.4 : None : 0.47168256176760714 : None : False : False : Reals
+ 0.40571 : None : 0.47425528779981724 : None : False : False : Reals
+ 0.427684 : None : 0.48350650307927745 : None : False : False : Reals
+ 0.458359 : None : 0.49497574908726427 : None : False : False : Reals
+ 0.486024 : None : 0.5040409000812086 : None : False : False : Reals
+ 0.5 : None : 0.5081890400080985 : None : False : False : Reals
+ 0.50571 : None : 0.509825749335947 : None : False : False : Reals
+ 0.527684 : None : 0.5156885228079069 : None : False : False : Reals
+ 0.558359 : None : 0.5229592849606621 : None : False : False : Reals
+ 0.586024 : None : 0.5287086261669508 : None : False : False : Reals
+ 0.6 : None : 0.5313350879606192 : None : False : False : Reals
+ 0.60571 : None : 0.5323733778601143 : None : False : False : Reals
+ 0.627684 : None : 0.5360915531775823 : None : False : False : Reals
+ 0.658359 : None : 0.5407006593977952 : None : False : False : Reals
+ 0.686024 : None : 0.5443421920799257 : None : False : False : Reals
+ 0.7 : None : 0.5460099734212374 : None : False : False : Reals
+ 0.70571 : None : 0.5466652301354791 : None : False : False : Reals
+ 0.727684 : None : 0.5490266938358767 : None : False : False : Reals
+ 0.758359 : None : 0.5519484825034281 : None : False : False : Reals
+ 0.786024 : None : 0.5542543569481307 : None : False : False : Reals
+ 0.8 : None : 0.5553139316266272 : None : False : False : Reals
+ 0.80571 : None : 0.5557274578945376 : None : False : False : Reals
+ 0.827684 : None : 0.5572254163619818 : None : False : False : Reals
+ 0.858359 : None : 0.5590787466658532 : None : False : False : Reals
+ 0.886024 : None : 0.5605394361916178 : None : False : False : Reals
+ 0.9 : None : 0.5612125779944603 : None : False : False : Reals
+ 0.90571 : None : 0.5614735524658817 : None : False : False : Reals
+ 0.927684 : None : 0.5624229615224005 : None : False : False : Reals
+ 0.958359 : None : 0.5635980148791631 : None : False : False : Reals
+ 0.986024 : None : 0.5645245752007635 : None : False : False : Reals
+ 1 : None : 0.5649522030370595 : None : False : False : Reals
5 Constraint Declarations
algeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 1.0 : za[0.0] + zb[0.0] + zc[0.0] : 1.0 : True
+ 0 : 1.0 : za[0] + zb[0] + zc[0] : 1.0 : True
0.00571 : 1.0 : za[0.00571] + zb[0.00571] + zc[0.00571] : 1.0 : True
0.027684 : 1.0 : za[0.027684] + zb[0.027684] + zc[0.027684] : 1.0 : True
0.058359 : 1.0 : za[0.058359] + zb[0.058359] + zc[0.058359] : 1.0 : True
@@ -335,7 +340,7 @@
1 : 1.0 : za[1] + zb[1] + zc[1] : 1.0 : True
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dza[0.0] - (-4.0*za[0.0] + 2.0*zb[0.0]) : 0.0 : True
+ 0 : 0.0 : dza[0] - (-4.0*za[0] + 2.0*zb[0]) : 0.0 : True
0.00571 : 0.0 : dza[0.00571] - (-4.0*za[0.00571] + 2.0*zb[0.00571]) : 0.0 : True
0.027684 : 0.0 : dza[0.027684] - (-4.0*za[0.027684] + 2.0*zb[0.027684]) : 0.0 : True
0.058359 : 0.0 : dza[0.058359] - (-4.0*za[0.058359] + 2.0*zb[0.058359]) : 0.0 : True
@@ -388,7 +393,7 @@
1 : 0.0 : dza[1] - (-4.0*za[1] + 2.0*zb[1]) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dzb[0.0] - (4.0*za[0.0] - 42.0*zb[0.0] + 20.0*zc[0.0]) : 0.0 : True
+ 0 : 0.0 : dzb[0] - (4.0*za[0] - 42.0*zb[0] + 20.0*zc[0]) : 0.0 : True
0.00571 : 0.0 : dzb[0.00571] - (4.0*za[0.00571] - 42.0*zb[0.00571] + 20.0*zc[0.00571]) : 0.0 : True
0.027684 : 0.0 : dzb[0.027684] - (4.0*za[0.027684] - 42.0*zb[0.027684] + 20.0*zc[0.027684]) : 0.0 : True
0.058359 : 0.0 : dzb[0.058359] - (4.0*za[0.058359] - 42.0*zb[0.058359] + 20.0*zc[0.058359]) : 0.0 : True
@@ -441,11 +446,11 @@
1 : 0.0 : dzb[1] - (4.0*za[1] - 42.0*zb[1] + 20.0*zc[1]) : 0.0 : True
dza_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.00571 : 0.0 : dza[0.00571] - (-110.386792412*za[0.0] + 87.5592397794*za[0.00571] + 28.9194261538*za[0.027684] - 8.751863962*za[0.058359] + 3.9970520794*za[0.086024] - 1.33706163849*za[0.1]) : 0.0 : True
- 0.027684 : 0.0 : dza[0.027684] - (35.830685225*za[0.0] - 71.6138072015*za[0.00571] + 18.0607772408*za[0.027684] + 23.6379717607*za[0.058359] - 8.65900780283*za[0.086024] + 2.74338077775*za[0.1]) : 0.0 : True
- 0.058359 : 0.0 : dza[0.058359] - (-23.441715579*za[0.0] + 41.2216524624*za[0.00571] - 44.9601712581*za[0.027684] + 8.56765245397*za[0.058359] + 25.1832094921*za[0.086024] - 6.57062757134*za[0.1]) : 0.0 : True
- 0.086024 : 0.0 : dza[0.086024] - (22.8263550021*za[0.0] - 38.7866321972*za[0.00571] + 33.9315191806*za[0.027684] - 51.8834090641*za[0.058359] + 5.81233052581*za[0.086024] + 28.0998365528*za[0.1]) : 0.0 : True
- 0.1 : 0.0 : dza[0.1] - (-50.0*za[0.0] + 84.1242422359*za[0.00571] - 69.7025611666*za[0.027684] + 87.7711420415*za[0.058359] - 182.192823111*za[0.086024] + 130.0*za[0.1]) : 0.0 : True
+ 0.00571 : 0.0 : dza[0.00571] - (-110.386792412*za[0] + 87.5592397794*za[0.00571] + 28.9194261538*za[0.027684] - 8.751863962*za[0.058359] + 3.9970520794*za[0.086024] - 1.33706163849*za[0.1]) : 0.0 : True
+ 0.027684 : 0.0 : dza[0.027684] - (35.830685225*za[0] - 71.6138072015*za[0.00571] + 18.0607772408*za[0.027684] + 23.6379717607*za[0.058359] - 8.65900780283*za[0.086024] + 2.74338077775*za[0.1]) : 0.0 : True
+ 0.058359 : 0.0 : dza[0.058359] - (-23.441715579*za[0] + 41.2216524624*za[0.00571] - 44.9601712581*za[0.027684] + 8.56765245397*za[0.058359] + 25.1832094921*za[0.086024] - 6.57062757134*za[0.1]) : 0.0 : True
+ 0.086024 : 0.0 : dza[0.086024] - (22.8263550021*za[0] - 38.7866321972*za[0.00571] + 33.9315191806*za[0.027684] - 51.8834090641*za[0.058359] + 5.81233052581*za[0.086024] + 28.0998365528*za[0.1]) : 0.0 : True
+ 0.1 : 0.0 : dza[0.1] - (-50.0*za[0] + 84.1242422359*za[0.00571] - 69.7025611666*za[0.027684] + 87.7711420415*za[0.058359] - 182.192823111*za[0.086024] + 130.0*za[0.1]) : 0.0 : True
0.10571 : 0.0 : dza[0.10571] - (-110.386792412*za[0.1] + 87.5592397794*za[0.10571] + 28.9194261538*za[0.127684] - 8.751863962*za[0.158359] + 3.9970520794*za[0.186024] - 1.33706163849*za[0.2]) : 0.0 : True
0.127684 : 0.0 : dza[0.127684] - (35.830685225*za[0.1] - 71.6138072015*za[0.10571] + 18.0607772408*za[0.127684] + 23.6379717607*za[0.158359] - 8.65900780283*za[0.186024] + 2.74338077775*za[0.2]) : 0.0 : True
0.158359 : 0.0 : dza[0.158359] - (-23.441715579*za[0.1] + 41.2216524624*za[0.10571] - 44.9601712581*za[0.127684] + 8.56765245397*za[0.158359] + 25.1832094921*za[0.186024] - 6.57062757134*za[0.2]) : 0.0 : True
@@ -493,11 +498,11 @@
1 : 0.0 : dza[1] - (-50.0*za[0.9] + 84.1242422359*za[0.90571] - 69.7025611666*za[0.927684] + 87.7711420415*za[0.958359] - 182.192823111*za[0.986024] + 130.0*za[1]) : 0.0 : True
dzb_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.00571 : 0.0 : dzb[0.00571] - (-110.386792412*zb[0.0] + 87.5592397794*zb[0.00571] + 28.9194261538*zb[0.027684] - 8.751863962*zb[0.058359] + 3.9970520794*zb[0.086024] - 1.33706163849*zb[0.1]) : 0.0 : True
- 0.027684 : 0.0 : dzb[0.027684] - (35.830685225*zb[0.0] - 71.6138072015*zb[0.00571] + 18.0607772408*zb[0.027684] + 23.6379717607*zb[0.058359] - 8.65900780283*zb[0.086024] + 2.74338077775*zb[0.1]) : 0.0 : True
- 0.058359 : 0.0 : dzb[0.058359] - (-23.441715579*zb[0.0] + 41.2216524624*zb[0.00571] - 44.9601712581*zb[0.027684] + 8.56765245397*zb[0.058359] + 25.1832094921*zb[0.086024] - 6.57062757134*zb[0.1]) : 0.0 : True
- 0.086024 : 0.0 : dzb[0.086024] - (22.8263550021*zb[0.0] - 38.7866321972*zb[0.00571] + 33.9315191806*zb[0.027684] - 51.8834090641*zb[0.058359] + 5.81233052581*zb[0.086024] + 28.0998365528*zb[0.1]) : 0.0 : True
- 0.1 : 0.0 : dzb[0.1] - (-50.0*zb[0.0] + 84.1242422359*zb[0.00571] - 69.7025611666*zb[0.027684] + 87.7711420415*zb[0.058359] - 182.192823111*zb[0.086024] + 130.0*zb[0.1]) : 0.0 : True
+ 0.00571 : 0.0 : dzb[0.00571] - (-110.386792412*zb[0] + 87.5592397794*zb[0.00571] + 28.9194261538*zb[0.027684] - 8.751863962*zb[0.058359] + 3.9970520794*zb[0.086024] - 1.33706163849*zb[0.1]) : 0.0 : True
+ 0.027684 : 0.0 : dzb[0.027684] - (35.830685225*zb[0] - 71.6138072015*zb[0.00571] + 18.0607772408*zb[0.027684] + 23.6379717607*zb[0.058359] - 8.65900780283*zb[0.086024] + 2.74338077775*zb[0.1]) : 0.0 : True
+ 0.058359 : 0.0 : dzb[0.058359] - (-23.441715579*zb[0] + 41.2216524624*zb[0.00571] - 44.9601712581*zb[0.027684] + 8.56765245397*zb[0.058359] + 25.1832094921*zb[0.086024] - 6.57062757134*zb[0.1]) : 0.0 : True
+ 0.086024 : 0.0 : dzb[0.086024] - (22.8263550021*zb[0] - 38.7866321972*zb[0.00571] + 33.9315191806*zb[0.027684] - 51.8834090641*zb[0.058359] + 5.81233052581*zb[0.086024] + 28.0998365528*zb[0.1]) : 0.0 : True
+ 0.1 : 0.0 : dzb[0.1] - (-50.0*zb[0] + 84.1242422359*zb[0.00571] - 69.7025611666*zb[0.027684] + 87.7711420415*zb[0.058359] - 182.192823111*zb[0.086024] + 130.0*zb[0.1]) : 0.0 : True
0.10571 : 0.0 : dzb[0.10571] - (-110.386792412*zb[0.1] + 87.5592397794*zb[0.10571] + 28.9194261538*zb[0.127684] - 8.751863962*zb[0.158359] + 3.9970520794*zb[0.186024] - 1.33706163849*zb[0.2]) : 0.0 : True
0.127684 : 0.0 : dzb[0.127684] - (35.830685225*zb[0.1] - 71.6138072015*zb[0.10571] + 18.0607772408*zb[0.127684] + 23.6379717607*zb[0.158359] - 8.65900780283*zb[0.186024] + 2.74338077775*zb[0.2]) : 0.0 : True
0.158359 : 0.0 : dzb[0.158359] - (-23.441715579*zb[0.1] + 41.2216524624*zb[0.10571] - 44.9601712581*zb[0.127684] + 8.56765245397*zb[0.158359] + 25.1832094921*zb[0.186024] - 6.57062757134*zb[0.2]) : 0.0 : True
@@ -545,10 +550,11 @@
1 : 0.0 : dzb[1] - (-50.0*zb[0.9] + 84.1242422359*zb[0.90571] - 69.7025611666*zb[0.927684] + 87.7711420415*zb[0.958359] - 182.192823111*zb[0.986024] + 130.0*zb[1]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 1)
- [0.0, 0.00571, 0.027684, 0.058359, 0.086024, 0.1, 0.10571, 0.127684, 0.158359, 0.186024, 0.2, 0.20571, 0.227684, 0.258359, 0.286024, 0.3, 0.30571, 0.327684, 0.358359, 0.386024, 0.4, 0.40571, 0.427684, 0.458359, 0.486024, 0.5, 0.50571, 0.527684, 0.558359, 0.586024, 0.6, 0.60571, 0.627684, 0.658359, 0.686024, 0.7, 0.70571, 0.727684, 0.758359, 0.786024, 0.8, 0.80571, 0.827684, 0.858359, 0.886024, 0.9, 0.90571, 0.927684, 0.958359, 0.986024, 1]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..1] : 51 : {0, 0.00571, 0.027684, 0.058359, 0.086024, 0.1, 0.10571, 0.127684, 0.158359, 0.186024, 0.2, 0.20571, 0.227684, 0.258359, 0.286024, 0.3, 0.30571, 0.327684, 0.358359, 0.386024, 0.4, 0.40571, 0.427684, 0.458359, 0.486024, 0.5, 0.50571, 0.527684, 0.558359, 0.586024, 0.6, 0.60571, 0.627684, 0.658359, 0.686024, 0.7, 0.70571, 0.727684, 0.758359, 0.786024, 0.8, 0.80571, 0.827684, 0.858359, 0.886024, 0.9, 0.90571, 0.927684, 0.958359, 0.986024, 1}
-15 Declarations: t p1 p2 p3 p4 za zb zc dza dzb diffeq1 diffeq2 algeq1 dza_disc_eq dzb_disc_eq
+16 Declarations: t_domain t p1 p2 p3 p4 za zb zc dza dzb diffeq1 diffeq2 algeq1 dza_disc_eq dzb_disc_eq
[[ 1.0000 0.0000 -0.0000]
[ 0.9607 0.0327 0.0066]
[ 0.9236 0.0547 0.0217]
diff --git a/pyomo/dae/tests/simulator_dae_multindex_example.casadi.txt b/pyomo/dae/tests/simulator_dae_multindex_example.casadi.txt
index 7cc1b76e38b..742935790d2 100644
--- a/pyomo/dae/tests/simulator_dae_multindex_example.casadi.txt
+++ b/pyomo/dae/tests/simulator_dae_multindex_example.casadi.txt
@@ -1,7 +1,12 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 1)
+ Key : Finite : Members
+ None : False : [0.0..1]
+
4 Param Declarations
p1 : Size=51, Index=t, Domain=Any, Default=(function), Mutable=False
Key : Value
- 0.0 : 4.0
+ 0 : 4.0
0.5 : 4.0
1 : 4.0
p2 : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
@@ -17,7 +22,7 @@
5 Var Declarations
dza : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.003569 : None : None : None : False : True : Reals
0.017303 : None : None : None : False : True : Reals
0.036474 : None : None : None : False : True : Reals
@@ -70,7 +75,7 @@
1 : None : None : None : False : True : Reals
dzb : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.003569 : None : None : None : False : True : Reals
0.017303 : None : None : None : False : True : Reals
0.036474 : None : None : None : False : True : Reals
@@ -122,169 +127,169 @@
0.98253 : None : None : None : False : True : Reals
1 : None : None : None : False : True : Reals
za : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 1.0 : None : False : False : Reals
- 0.003569 : None : 0.986131703541 : None : False : False : Reals
- 0.017303 : None : 0.934250838694 : None : False : False : Reals
- 0.036474 : None : 0.867709611122 : None : False : False : Reals
- 0.053765 : None : 0.812664118094 : None : False : False : Reals
- 0.0625 : None : 0.786455874997 : None : False : False : Reals
- 0.066069 : None : 0.776132551908 : None : False : False : Reals
- 0.079803 : None : 0.737597532656 : None : False : False : Reals
- 0.098974 : None : 0.687877478472 : None : False : False : Reals
- 0.116265 : None : 0.646624432281 : None : False : False : Reals
- 0.125 : None : 0.626956139912 : None : False : False : Reals
- 0.128569 : None : 0.61912883896 : None : False : False : Reals
- 0.142303 : None : 0.59017652772 : None : False : False : Reals
- 0.161474 : None : 0.55272633432 : None : False : False : Reals
- 0.178765 : None : 0.521748559352 : None : False : False : Reals
- 0.1875 : None : 0.50697548736 : None : False : False : Reals
- 0.191069 : None : 0.501037186112 : None : False : False : Reals
- 0.204803 : None : 0.479349165515 : None : False : False : Reals
- 0.223974 : None : 0.45118038064 : None : False : False : Reals
- 0.241265 : None : 0.427805693207 : None : False : False : Reals
- 0.25 : None : 0.416710624775 : None : False : False : Reals
- 0.257138 : None : 0.407962333424 : None : False : False : Reals
- 0.284605 : None : 0.37674575132 : None : False : False : Reals
- 0.322949 : None : 0.339223523994 : None : False : False : Reals
- 0.35753 : None : 0.310630383394 : None : False : False : Reals
- 0.375 : None : 0.297769734643 : None : False : False : Reals
- 0.382138 : None : 0.292816904119 : None : False : False : Reals
- 0.409605 : None : 0.275188203946 : None : False : False : Reals
- 0.447949 : None : 0.253969349981 : None : False : False : Reals
- 0.48253 : None : 0.237763980628 : None : False : False : Reals
- 0.5 : None : 0.230484407303 : None : False : False : Reals
- 0.507138 : None : 0.232527501369 : None : False : False : Reals
- 0.534605 : None : 0.239996287605 : None : False : False : Reals
- 0.572949 : None : 0.249711906 : None : False : False : Reals
- 0.60753 : None : 0.257950282412 : None : False : False : Reals
- 0.625 : None : 0.261937428851 : None : False : False : Reals
- 0.632138 : None : 0.263531507346 : None : False : False : Reals
- 0.659605 : None : 0.26950531611 : None : False : False : Reals
- 0.697949 : None : 0.277411169023 : None : False : False : Reals
- 0.73253 : None : 0.284125274347 : None : False : False : Reals
- 0.75 : None : 0.287378287549 : None : False : False : Reals
- 0.757138 : None : 0.288682802708 : None : False : False : Reals
- 0.784605 : None : 0.293552545417 : None : False : False : Reals
- 0.822949 : None : 0.299999461607 : None : False : False : Reals
- 0.85753 : None : 0.305480483747 : None : False : False : Reals
- 0.875 : None : 0.308131445597 : None : False : False : Reals
- 0.882138 : None : 0.309193948971 : None : False : False : Reals
- 0.909605 : None : 0.313170517789 : None : False : False : Reals
- 0.947949 : None : 0.318428696723 : None : False : False : Reals
- 0.98253 : None : 0.322897292504 : None : False : False : Reals
- 1 : None : 0.32506319462 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 1.0 : None : False : False : Reals
+ 0.003569 : None : 0.986131703541118 : None : False : False : Reals
+ 0.017303 : None : 0.9342508386935109 : None : False : False : Reals
+ 0.036474 : None : 0.8677096111218232 : None : False : False : Reals
+ 0.053765 : None : 0.8126641180938677 : None : False : False : Reals
+ 0.0625 : None : 0.7864558749971251 : None : False : False : Reals
+ 0.066069 : None : 0.7761325519083723 : None : False : False : Reals
+ 0.079803 : None : 0.737597532655553 : None : False : False : Reals
+ 0.098974 : None : 0.6878774784720678 : None : False : False : Reals
+ 0.116265 : None : 0.6466244322809769 : None : False : False : Reals
+ 0.125 : None : 0.6269561399119233 : None : False : False : Reals
+ 0.128569 : None : 0.6191288389600765 : None : False : False : Reals
+ 0.142303 : None : 0.5901765277203566 : None : False : False : Reals
+ 0.161474 : None : 0.5527263343203653 : None : False : False : Reals
+ 0.178765 : None : 0.5217485593521063 : None : False : False : Reals
+ 0.1875 : None : 0.5069754873604765 : None : False : False : Reals
+ 0.191069 : None : 0.501037186112201 : None : False : False : Reals
+ 0.204803 : None : 0.4793491655149263 : None : False : False : Reals
+ 0.223974 : None : 0.4511803806395512 : None : False : False : Reals
+ 0.241265 : None : 0.4278056932067797 : None : False : False : Reals
+ 0.25 : None : 0.41671062477535553 : None : False : False : Reals
+ 0.257138 : None : 0.40796233342413146 : None : False : False : Reals
+ 0.284605 : None : 0.3767457513199262 : None : False : False : Reals
+ 0.322949 : None : 0.33922352399441796 : None : False : False : Reals
+ 0.35753 : None : 0.3106303833940118 : None : False : False : Reals
+ 0.375 : None : 0.29776973464278017 : None : False : False : Reals
+ 0.382138 : None : 0.2928169041191659 : None : False : False : Reals
+ 0.409605 : None : 0.27518820394616705 : None : False : False : Reals
+ 0.447949 : None : 0.25396934998147525 : None : False : False : Reals
+ 0.48253 : None : 0.23776398062754575 : None : False : False : Reals
+ 0.5 : None : 0.23048440730298844 : None : False : False : Reals
+ 0.507138 : None : 0.23252750136899458 : None : False : False : Reals
+ 0.534605 : None : 0.23999628760485606 : None : False : False : Reals
+ 0.572949 : None : 0.2497119059996273 : None : False : False : Reals
+ 0.60753 : None : 0.25795028241229795 : None : False : False : Reals
+ 0.625 : None : 0.26193742885066273 : None : False : False : Reals
+ 0.632138 : None : 0.263531507345661 : None : False : False : Reals
+ 0.659605 : None : 0.26950531611020345 : None : False : False : Reals
+ 0.697949 : None : 0.2774111690225376 : None : False : False : Reals
+ 0.73253 : None : 0.28412527434728513 : None : False : False : Reals
+ 0.75 : None : 0.2873782875487776 : None : False : False : Reals
+ 0.757138 : None : 0.28868280270753516 : None : False : False : Reals
+ 0.784605 : None : 0.2935525454169698 : None : False : False : Reals
+ 0.822949 : None : 0.29999946160710617 : None : False : False : Reals
+ 0.85753 : None : 0.30548048374729914 : None : False : False : Reals
+ 0.875 : None : 0.3081314455965223 : None : False : False : Reals
+ 0.882138 : None : 0.30919394897116637 : None : False : False : Reals
+ 0.909605 : None : 0.31317051778907457 : None : False : False : Reals
+ 0.947949 : None : 0.318428696723039 : None : False : False : Reals
+ 0.98253 : None : 0.32289729250413535 : None : False : False : Reals
+ 1 : None : 0.3250631946200124 : None : False : False : Reals
zb : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.003569 : None : 0.0115364347298 : None : False : False : Reals
- 0.017303 : None : 0.0483561001833 : None : False : False : Reals
- 0.036474 : None : 0.0785795698643 : None : False : False : Reals
- 0.053765 : None : 0.0973535498162 : None : False : False : Reals
- 0.0625 : None : 0.105412392254 : None : False : False : Reals
- 0.066069 : None : 0.108473109906 : None : False : False : Reals
- 0.079803 : None : 0.119701191676 : None : False : False : Reals
- 0.098974 : None : 0.133794724374 : None : False : False : Reals
- 0.116265 : None : 0.145357457404 : None : False : False : Reals
- 0.125 : None : 0.150852078114 : None : False : False : Reals
- 0.128569 : None : 0.153036740945 : None : False : False : Reals
- 0.142303 : None : 0.161111586991 : None : False : False : Reals
- 0.161474 : None : 0.171548118433 : None : False : False : Reals
- 0.178765 : None : 0.18017805536 : None : False : False : Reals
- 0.1875 : None : 0.184293247623 : None : False : False : Reals
- 0.191069 : None : 0.185947392979 : None : False : False : Reals
- 0.204803 : None : 0.191988527732 : None : False : False : Reals
- 0.223974 : None : 0.199834699859 : None : False : False : Reals
- 0.241265 : None : 0.206345455647 : None : False : False : Reals
- 0.25 : None : 0.209435853444 : None : False : False : Reals
- 0.257138 : None : 0.211872582834 : None : False : False : Reals
- 0.284605 : None : 0.220567572125 : None : False : False : Reals
- 0.322949 : None : 0.231018914829 : None : False : False : Reals
- 0.35753 : None : 0.238983172439 : None : False : False : Reals
- 0.375 : None : 0.242565343428 : None : False : False : Reals
- 0.382138 : None : 0.243944891702 : None : False : False : Reals
- 0.409605 : None : 0.248855143076 : None : False : False : Reals
- 0.447949 : None : 0.254765386359 : None : False : False : Reals
- 0.48253 : None : 0.259279186941 : None : False : False : Reals
- 0.5 : None : 0.261306819979 : None : False : False : Reals
- 0.507138 : None : 0.257985457951 : None : False : False : Reals
- 0.534605 : None : 0.25132310951 : None : False : False : Reals
- 0.572949 : None : 0.247392942517 : None : False : False : Reals
- 0.60753 : None : 0.244717148705 : None : False : False : Reals
- 0.625 : None : 0.243455155504 : None : False : False : Reals
- 0.632138 : None : 0.242952200422 : None : False : False : Reals
- 0.659605 : None : 0.241070113832 : None : False : False : Reals
- 0.697949 : None : 0.238581560802 : None : False : False : Reals
- 0.73253 : None : 0.236468442507 : None : False : False : Reals
- 0.75 : None : 0.235444641301 : None : False : False : Reals
- 0.757138 : None : 0.235034079817 : None : False : False : Reals
- 0.784605 : None : 0.233501459113 : None : False : False : Reals
- 0.822949 : None : 0.231472466338 : None : False : False : Reals
- 0.85753 : None : 0.229747462762 : None : False : False : Reals
- 0.875 : None : 0.228913144178 : None : False : False : Reals
- 0.882138 : None : 0.228578749961 : None : False : False : Reals
- 0.909605 : None : 0.227327232417 : None : False : False : Reals
- 0.947949 : None : 0.225672362733 : None : False : False : Reals
- 0.98253 : None : 0.224265993002 : None : False : False : Reals
- 1 : None : 0.223584333858 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.003569 : None : 0.011536434729799675 : None : False : False : Reals
+ 0.017303 : None : 0.048356100183263764 : None : False : False : Reals
+ 0.036474 : None : 0.07857956986428966 : None : False : False : Reals
+ 0.053765 : None : 0.09735354981621511 : None : False : False : Reals
+ 0.0625 : None : 0.10541239225355176 : None : False : False : Reals
+ 0.066069 : None : 0.10847310990581088 : None : False : False : Reals
+ 0.079803 : None : 0.1197011916761294 : None : False : False : Reals
+ 0.098974 : None : 0.13379472437418877 : None : False : False : Reals
+ 0.116265 : None : 0.14535745740425296 : None : False : False : Reals
+ 0.125 : None : 0.1508520781144476 : None : False : False : Reals
+ 0.128569 : None : 0.15303674094460523 : None : False : False : Reals
+ 0.142303 : None : 0.16111158699071282 : None : False : False : Reals
+ 0.161474 : None : 0.17154811843336243 : None : False : False : Reals
+ 0.178765 : None : 0.18017805535974285 : None : False : False : Reals
+ 0.1875 : None : 0.18429324762326083 : None : False : False : Reals
+ 0.191069 : None : 0.185947392979341 : None : False : False : Reals
+ 0.204803 : None : 0.1919885277322887 : None : False : False : Reals
+ 0.223974 : None : 0.19983469985874322 : None : False : False : Reals
+ 0.241265 : None : 0.20634545564695037 : None : False : False : Reals
+ 0.25 : None : 0.20943585344438775 : None : False : False : Reals
+ 0.257138 : None : 0.2118725828343882 : None : False : False : Reals
+ 0.284605 : None : 0.22056757212475445 : None : False : False : Reals
+ 0.322949 : None : 0.23101891482863982 : None : False : False : Reals
+ 0.35753 : None : 0.23898317243929718 : None : False : False : Reals
+ 0.375 : None : 0.24256534342782096 : None : False : False : Reals
+ 0.382138 : None : 0.24394489170191538 : None : False : False : Reals
+ 0.409605 : None : 0.24885514307571055 : None : False : False : Reals
+ 0.447949 : None : 0.2547653863589431 : None : False : False : Reals
+ 0.48253 : None : 0.25927918694067165 : None : False : False : Reals
+ 0.5 : None : 0.2613068199794028 : None : False : False : Reals
+ 0.507138 : None : 0.25798545795058325 : None : False : False : Reals
+ 0.534605 : None : 0.2513231095095534 : None : False : False : Reals
+ 0.572949 : None : 0.24739294251710064 : None : False : False : Reals
+ 0.60753 : None : 0.24471714870512382 : None : False : False : Reals
+ 0.625 : None : 0.2434551555041877 : None : False : False : Reals
+ 0.632138 : None : 0.24295220042235288 : None : False : False : Reals
+ 0.659605 : None : 0.24107011383158589 : None : False : False : Reals
+ 0.697949 : None : 0.23858156080235898 : None : False : False : Reals
+ 0.73253 : None : 0.23646844250714363 : None : False : False : Reals
+ 0.75 : None : 0.23544464130126255 : None : False : False : Reals
+ 0.757138 : None : 0.23503407981650193 : None : False : False : Reals
+ 0.784605 : None : 0.23350145911300113 : None : False : False : Reals
+ 0.822949 : None : 0.23147246633786336 : None : False : False : Reals
+ 0.85753 : None : 0.22974746276246172 : None : False : False : Reals
+ 0.875 : None : 0.22891314417789999 : None : False : False : Reals
+ 0.882138 : None : 0.22857874996126462 : None : False : False : Reals
+ 0.909605 : None : 0.22732723241706118 : None : False : False : Reals
+ 0.947949 : None : 0.22567236273264127 : None : False : False : Reals
+ 0.98253 : None : 0.22426599300223676 : None : False : False : Reals
+ 1 : None : 0.22358433385829213 : None : False : False : Reals
zc : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.003569 : None : 0.00233186172908 : None : False : False : Reals
- 0.017303 : None : 0.0173930611232 : None : False : False : Reals
- 0.036474 : None : 0.0537108190139 : None : False : False : Reals
- 0.053765 : None : 0.0899823320899 : None : False : False : Reals
- 0.0625 : None : 0.108131732749 : None : False : False : Reals
- 0.066069 : None : 0.115394338186 : None : False : False : Reals
- 0.079803 : None : 0.142701275668 : None : False : False : Reals
- 0.098974 : None : 0.178327797154 : None : False : False : Reals
- 0.116265 : None : 0.208018110315 : None : False : False : Reals
- 0.125 : None : 0.222191781974 : None : False : False : Reals
- 0.128569 : None : 0.227834420095 : None : False : False : Reals
- 0.142303 : None : 0.248711885289 : None : False : False : Reals
- 0.161474 : None : 0.275725547246 : None : False : False : Reals
- 0.178765 : None : 0.298073385288 : None : False : False : Reals
- 0.1875 : None : 0.308731265016 : None : False : False : Reals
- 0.191069 : None : 0.313015420908 : None : False : False : Reals
- 0.204803 : None : 0.328662306753 : None : False : False : Reals
- 0.223974 : None : 0.348984919502 : None : False : False : Reals
- 0.241265 : None : 0.365848851146 : None : False : False : Reals
- 0.25 : None : 0.37385352178 : None : False : False : Reals
- 0.257138 : None : 0.380165083741 : None : False : False : Reals
- 0.284605 : None : 0.402686676555 : None : False : False : Reals
- 0.322949 : None : 0.429757561177 : None : False : False : Reals
- 0.35753 : None : 0.450386444167 : None : False : False : Reals
- 0.375 : None : 0.459664921929 : None : False : False : Reals
- 0.382138 : None : 0.463238204179 : None : False : False : Reals
- 0.409605 : None : 0.475956652978 : None : False : False : Reals
- 0.447949 : None : 0.49126526366 : None : False : False : Reals
- 0.48253 : None : 0.502956832432 : None : False : False : Reals
- 0.5 : None : 0.508208772718 : None : False : False : Reals
- 0.507138 : None : 0.50948704068 : None : False : False : Reals
- 0.534605 : None : 0.508680602886 : None : False : False : Reals
- 0.572949 : None : 0.502895151483 : None : False : False : Reals
- 0.60753 : None : 0.497332568883 : None : False : False : Reals
- 0.625 : None : 0.494607415645 : None : False : False : Reals
- 0.632138 : None : 0.493516292232 : None : False : False : Reals
- 0.659605 : None : 0.489424570058 : None : False : False : Reals
- 0.697949 : None : 0.484007270175 : None : False : False : Reals
- 0.73253 : None : 0.479406283146 : None : False : False : Reals
- 0.75 : None : 0.47717707115 : None : False : False : Reals
- 0.757138 : None : 0.476283117476 : None : False : False : Reals
- 0.784605 : None : 0.47294599547 : None : False : False : Reals
- 0.822949 : None : 0.468528072055 : None : False : False : Reals
- 0.85753 : None : 0.46477205349 : None : False : False : Reals
- 0.875 : None : 0.462955410226 : None : False : False : Reals
- 0.882138 : None : 0.462227301068 : None : False : False : Reals
- 0.909605 : None : 0.459502249794 : None : False : False : Reals
- 0.947949 : None : 0.455898940544 : None : False : False : Reals
- 0.98253 : None : 0.452836714494 : None : False : False : Reals
- 1 : None : 0.451352471522 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.003569 : None : 0.0023318617290822813 : None : False : False : Reals
+ 0.017303 : None : 0.01739306112322537 : None : False : False : Reals
+ 0.036474 : None : 0.05371081901388766 : None : False : False : Reals
+ 0.053765 : None : 0.08998233208991725 : None : False : False : Reals
+ 0.0625 : None : 0.10813173274932315 : None : False : False : Reals
+ 0.066069 : None : 0.11539433818581682 : None : False : False : Reals
+ 0.079803 : None : 0.14270127566831767 : None : False : False : Reals
+ 0.098974 : None : 0.1783277971537434 : None : False : False : Reals
+ 0.116265 : None : 0.2080181103147703 : None : False : False : Reals
+ 0.125 : None : 0.22219178197362932 : None : False : False : Reals
+ 0.128569 : None : 0.22783442009531835 : None : False : False : Reals
+ 0.142303 : None : 0.24871188528893057 : None : False : False : Reals
+ 0.161474 : None : 0.27572554724627235 : None : False : False : Reals
+ 0.178765 : None : 0.2980733852881508 : None : False : False : Reals
+ 0.1875 : None : 0.3087312650162627 : None : False : False : Reals
+ 0.191069 : None : 0.31301542090845796 : None : False : False : Reals
+ 0.204803 : None : 0.328662306752785 : None : False : False : Reals
+ 0.223974 : None : 0.3489849195017057 : None : False : False : Reals
+ 0.241265 : None : 0.36584885114627 : None : False : False : Reals
+ 0.25 : None : 0.37385352178025677 : None : False : False : Reals
+ 0.257138 : None : 0.3801650837414804 : None : False : False : Reals
+ 0.284605 : None : 0.40268667655531926 : None : False : False : Reals
+ 0.322949 : None : 0.42975756117694225 : None : False : False : Reals
+ 0.35753 : None : 0.45038644416669105 : None : False : False : Reals
+ 0.375 : None : 0.45966492192939884 : None : False : False : Reals
+ 0.382138 : None : 0.46323820417891864 : None : False : False : Reals
+ 0.409605 : None : 0.4759566529781225 : None : False : False : Reals
+ 0.447949 : None : 0.49126526365958184 : None : False : False : Reals
+ 0.48253 : None : 0.5029568324317825 : None : False : False : Reals
+ 0.5 : None : 0.5082087727176089 : None : False : False : Reals
+ 0.507138 : None : 0.5094870406804222 : None : False : False : Reals
+ 0.534605 : None : 0.5086806028855905 : None : False : False : Reals
+ 0.572949 : None : 0.502895151483272 : None : False : False : Reals
+ 0.60753 : None : 0.49733256888257826 : None : False : False : Reals
+ 0.625 : None : 0.4946074156451496 : None : False : False : Reals
+ 0.632138 : None : 0.4935162922319862 : None : False : False : Reals
+ 0.659605 : None : 0.48942457005821055 : None : False : False : Reals
+ 0.697949 : None : 0.48400727017510337 : None : False : False : Reals
+ 0.73253 : None : 0.4794062831455712 : None : False : False : Reals
+ 0.75 : None : 0.47717707114995994 : None : False : False : Reals
+ 0.757138 : None : 0.47628311747596297 : None : False : False : Reals
+ 0.784605 : None : 0.4729459954700291 : None : False : False : Reals
+ 0.822949 : None : 0.4685280720550305 : None : False : False : Reals
+ 0.85753 : None : 0.4647720534902391 : None : False : False : Reals
+ 0.875 : None : 0.46295541022557773 : None : False : False : Reals
+ 0.882138 : None : 0.4622273010675691 : None : False : False : Reals
+ 0.909605 : None : 0.45950224979386434 : None : False : False : Reals
+ 0.947949 : None : 0.45589894054431973 : None : False : False : Reals
+ 0.98253 : None : 0.45283671449362783 : None : False : False : Reals
+ 1 : None : 0.45135247152169555 : None : False : False : Reals
5 Constraint Declarations
algeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 1.0 : za[0.0] + zb[0.0] + zc[0.0] : 1.0 : True
+ 0 : 1.0 : za[0] + zb[0] + zc[0] : 1.0 : True
0.003569 : 1.0 : za[0.003569] + zb[0.003569] + zc[0.003569] : 1.0 : True
0.017303 : 1.0 : za[0.017303] + zb[0.017303] + zc[0.017303] : 1.0 : True
0.036474 : 1.0 : za[0.036474] + zb[0.036474] + zc[0.036474] : 1.0 : True
@@ -337,7 +342,7 @@
1 : 1.0 : za[1] + zb[1] + zc[1] : 1.0 : True
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dza[0.0] - (-4.0*za[0.0] + 2.0*zb[0.0]) : 0.0 : True
+ 0 : 0.0 : dza[0] - (-4.0*za[0] + 2.0*zb[0]) : 0.0 : True
0.003569 : 0.0 : dza[0.003569] - (-4.0*za[0.003569] + 2.0*zb[0.003569]) : 0.0 : True
0.017303 : 0.0 : dza[0.017303] - (-4.0*za[0.017303] + 2.0*zb[0.017303]) : 0.0 : True
0.036474 : 0.0 : dza[0.036474] - (-4.0*za[0.036474] + 2.0*zb[0.036474]) : 0.0 : True
@@ -390,7 +395,7 @@
1 : 0.0 : dza[1] - (-4.0*za[1] + 2.0*zb[1]) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dzb[0.0] - (4.0*za[0.0] - 42.0*zb[0.0] + 20.0*zc[0.0]) : 0.0 : True
+ 0 : 0.0 : dzb[0] - (4.0*za[0] - 42.0*zb[0] + 20.0*zc[0]) : 0.0 : True
0.003569 : 0.0 : dzb[0.003569] - (4.0*za[0.003569] - 42.0*zb[0.003569] + 20.0*zc[0.003569]) : 0.0 : True
0.017303 : 0.0 : dzb[0.017303] - (4.0*za[0.017303] - 42.0*zb[0.017303] + 20.0*zc[0.017303]) : 0.0 : True
0.036474 : 0.0 : dzb[0.036474] - (4.0*za[0.036474] - 42.0*zb[0.036474] + 20.0*zc[0.036474]) : 0.0 : True
@@ -443,11 +448,11 @@
1 : 0.0 : dzb[1] - (4.0*za[1] - 42.0*zb[1] + 20.0*zc[1]) : 0.0 : True
dza_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.003569 : 0.0 : dza[0.003569] - (-176.618867859*za[0.0] + 140.094783647*za[0.003569] + 46.2710818461*za[0.017303] - 14.0029823392*za[0.036474] + 6.39528332704*za[0.053765] - 2.13929862159*za[0.0625]) : 0.0 : True
- 0.017303 : 0.0 : dza[0.017303] - (57.32909636*za[0.0] - 114.582091522*za[0.003569] + 28.8972435853*za[0.017303] + 37.8207548171*za[0.036474] - 13.8544124845*za[0.053765] + 4.3894092444*za[0.0625]) : 0.0 : True
- 0.036474 : 0.0 : dza[0.036474] - (-37.5067449265*za[0.0] + 65.9546439399*za[0.003569] - 71.936274013*za[0.017303] + 13.7082439264*za[0.036474] + 40.2931351874*za[0.053765] - 10.5130041141*za[0.0625]) : 0.0 : True
- 0.053765 : 0.0 : dza[0.053765] - (36.5221680033*za[0.0] - 62.0586115156*za[0.003569] + 54.290430689*za[0.017303] - 83.0134545025*za[0.036474] + 9.29972884129*za[0.053765] + 44.9597384845*za[0.0625]) : 0.0 : True
- 0.0625 : 0.0 : dza[0.0625] - (-80.0*za[0.0] + 134.598787578*za[0.003569] - 111.524097867*za[0.017303] + 140.433827266*za[0.036474] - 291.508516977*za[0.053765] + 208.0*za[0.0625]) : 0.0 : True
+ 0.003569 : 0.0 : dza[0.003569] - (-176.618867859*za[0] + 140.094783647*za[0.003569] + 46.2710818461*za[0.017303] - 14.0029823392*za[0.036474] + 6.39528332704*za[0.053765] - 2.13929862159*za[0.0625]) : 0.0 : True
+ 0.017303 : 0.0 : dza[0.017303] - (57.32909636*za[0] - 114.582091522*za[0.003569] + 28.8972435853*za[0.017303] + 37.8207548171*za[0.036474] - 13.8544124845*za[0.053765] + 4.3894092444*za[0.0625]) : 0.0 : True
+ 0.036474 : 0.0 : dza[0.036474] - (-37.5067449265*za[0] + 65.9546439399*za[0.003569] - 71.936274013*za[0.017303] + 13.7082439264*za[0.036474] + 40.2931351874*za[0.053765] - 10.5130041141*za[0.0625]) : 0.0 : True
+ 0.053765 : 0.0 : dza[0.053765] - (36.5221680033*za[0] - 62.0586115156*za[0.003569] + 54.290430689*za[0.017303] - 83.0134545025*za[0.036474] + 9.29972884129*za[0.053765] + 44.9597384845*za[0.0625]) : 0.0 : True
+ 0.0625 : 0.0 : dza[0.0625] - (-80.0*za[0] + 134.598787578*za[0.003569] - 111.524097867*za[0.017303] + 140.433827266*za[0.036474] - 291.508516977*za[0.053765] + 208.0*za[0.0625]) : 0.0 : True
0.066069 : 0.0 : dza[0.066069] - (-176.618867859*za[0.0625] + 140.094783647*za[0.066069] + 46.2710818461*za[0.079803] - 14.0029823392*za[0.098974] + 6.39528332704*za[0.116265] - 2.13929862159*za[0.125]) : 0.0 : True
0.079803 : 0.0 : dza[0.079803] - (57.32909636*za[0.0625] - 114.582091522*za[0.066069] + 28.8972435853*za[0.079803] + 37.8207548171*za[0.098974] - 13.8544124845*za[0.116265] + 4.3894092444*za[0.125]) : 0.0 : True
0.098974 : 0.0 : dza[0.098974] - (-37.5067449265*za[0.0625] + 65.9546439399*za[0.066069] - 71.936274013*za[0.079803] + 13.7082439264*za[0.098974] + 40.2931351874*za[0.116265] - 10.5130041141*za[0.125]) : 0.0 : True
@@ -464,42 +469,42 @@
0.241265 : 0.0 : dza[0.241265] - (36.5221680033*za[0.1875] - 62.0586115156*za[0.191069] + 54.290430689*za[0.204803] - 83.0134545025*za[0.223974] + 9.29972884129*za[0.241265] + 44.9597384845*za[0.25]) : 0.0 : True
0.25 : 0.0 : dza[0.25] - (-80.0*za[0.1875] + 134.598787578*za[0.191069] - 111.524097867*za[0.204803] + 140.433827266*za[0.223974] - 291.508516977*za[0.241265] + 208.0*za[0.25]) : 0.0 : True
0.257138 : 0.0 : dza[0.257138] - (-88.3094339297*za[0.25] + 70.0473918235*za[0.257138] + 23.135540923*za[0.284605] - 7.0014911696*za[0.322949] + 3.19764166352*za[0.35753] - 1.06964931079*za[0.375]) : 0.0 : True
- 0.284605 : 0.0 : dza[0.284605] - (28.66454818*za[0.25] - 57.2910457612*za[0.257138] + 14.4486217927*za[0.284605] + 18.9103774085*za[0.322949] - 6.92720624227*za[0.35753] + 2.1947046222*za[0.375]) : 0.0 : True
+ 0.284605 : 0.0 : dza[0.284605] - (28.66454818*za[0.25] - 57.2910457612*za[0.257138] + 14.4486217927*za[0.284605] + 18.9103774085*za[0.322949] - 6.92720624226*za[0.35753] + 2.1947046222*za[0.375]) : 0.0 : True
0.322949 : 0.0 : dza[0.322949] - (-18.7533724632*za[0.25] + 32.9773219699*za[0.257138] - 35.9681370065*za[0.284605] + 6.85412196318*za[0.322949] + 20.1465675937*za[0.35753] - 5.25650205707*za[0.375]) : 0.0 : True
0.35753 : 0.0 : dza[0.35753] - (18.2610840016*za[0.25] - 31.0293057578*za[0.257138] + 27.1452153445*za[0.284605] - 41.5067272513*za[0.322949] + 4.64986442065*za[0.35753] + 22.4798692422*za[0.375]) : 0.0 : True
0.375 : 0.0 : dza[0.375] - (-40.0*za[0.25] + 67.2993937888*za[0.257138] - 55.7620489333*za[0.284605] + 70.2169136332*za[0.322949] - 145.754258489*za[0.35753] + 104.0*za[0.375]) : 0.0 : True
0.382138 : 0.0 : dza[0.382138] - (-88.3094339297*za[0.375] + 70.0473918235*za[0.382138] + 23.135540923*za[0.409605] - 7.0014911696*za[0.447949] + 3.19764166352*za[0.48253] - 1.06964931079*za[0.5]) : 0.0 : True
- 0.409605 : 0.0 : dza[0.409605] - (28.66454818*za[0.375] - 57.2910457612*za[0.382138] + 14.4486217927*za[0.409605] + 18.9103774085*za[0.447949] - 6.92720624227*za[0.48253] + 2.1947046222*za[0.5]) : 0.0 : True
+ 0.409605 : 0.0 : dza[0.409605] - (28.66454818*za[0.375] - 57.2910457612*za[0.382138] + 14.4486217927*za[0.409605] + 18.9103774085*za[0.447949] - 6.92720624226*za[0.48253] + 2.1947046222*za[0.5]) : 0.0 : True
0.447949 : 0.0 : dza[0.447949] - (-18.7533724632*za[0.375] + 32.9773219699*za[0.382138] - 35.9681370065*za[0.409605] + 6.85412196318*za[0.447949] + 20.1465675937*za[0.48253] - 5.25650205707*za[0.5]) : 0.0 : True
0.48253 : 0.0 : dza[0.48253] - (18.2610840016*za[0.375] - 31.0293057578*za[0.382138] + 27.1452153445*za[0.409605] - 41.5067272513*za[0.447949] + 4.64986442065*za[0.48253] + 22.4798692422*za[0.5]) : 0.0 : True
0.5 : 0.0 : dza[0.5] - (-40.0*za[0.375] + 67.2993937888*za[0.382138] - 55.7620489333*za[0.409605] + 70.2169136332*za[0.447949] - 145.754258489*za[0.48253] + 104.0*za[0.5]) : 0.0 : True
0.507138 : 0.0 : dza[0.507138] - (-88.3094339297*za[0.5] + 70.0473918235*za[0.507138] + 23.135540923*za[0.534605] - 7.0014911696*za[0.572949] + 3.19764166352*za[0.60753] - 1.06964931079*za[0.625]) : 0.0 : True
- 0.534605 : 0.0 : dza[0.534605] - (28.66454818*za[0.5] - 57.2910457612*za[0.507138] + 14.4486217927*za[0.534605] + 18.9103774085*za[0.572949] - 6.92720624227*za[0.60753] + 2.1947046222*za[0.625]) : 0.0 : True
+ 0.534605 : 0.0 : dza[0.534605] - (28.66454818*za[0.5] - 57.2910457612*za[0.507138] + 14.4486217927*za[0.534605] + 18.9103774085*za[0.572949] - 6.92720624226*za[0.60753] + 2.1947046222*za[0.625]) : 0.0 : True
0.572949 : 0.0 : dza[0.572949] - (-18.7533724632*za[0.5] + 32.9773219699*za[0.507138] - 35.9681370065*za[0.534605] + 6.85412196318*za[0.572949] + 20.1465675937*za[0.60753] - 5.25650205707*za[0.625]) : 0.0 : True
0.60753 : 0.0 : dza[0.60753] - (18.2610840016*za[0.5] - 31.0293057578*za[0.507138] + 27.1452153445*za[0.534605] - 41.5067272513*za[0.572949] + 4.64986442065*za[0.60753] + 22.4798692422*za[0.625]) : 0.0 : True
0.625 : 0.0 : dza[0.625] - (-40.0*za[0.5] + 67.2993937888*za[0.507138] - 55.7620489333*za[0.534605] + 70.2169136332*za[0.572949] - 145.754258489*za[0.60753] + 104.0*za[0.625]) : 0.0 : True
0.632138 : 0.0 : dza[0.632138] - (-88.3094339297*za[0.625] + 70.0473918235*za[0.632138] + 23.135540923*za[0.659605] - 7.0014911696*za[0.697949] + 3.19764166352*za[0.73253] - 1.06964931079*za[0.75]) : 0.0 : True
- 0.659605 : 0.0 : dza[0.659605] - (28.66454818*za[0.625] - 57.2910457612*za[0.632138] + 14.4486217927*za[0.659605] + 18.9103774085*za[0.697949] - 6.92720624227*za[0.73253] + 2.1947046222*za[0.75]) : 0.0 : True
+ 0.659605 : 0.0 : dza[0.659605] - (28.66454818*za[0.625] - 57.2910457612*za[0.632138] + 14.4486217927*za[0.659605] + 18.9103774085*za[0.697949] - 6.92720624226*za[0.73253] + 2.1947046222*za[0.75]) : 0.0 : True
0.697949 : 0.0 : dza[0.697949] - (-18.7533724632*za[0.625] + 32.9773219699*za[0.632138] - 35.9681370065*za[0.659605] + 6.85412196318*za[0.697949] + 20.1465675937*za[0.73253] - 5.25650205707*za[0.75]) : 0.0 : True
0.73253 : 0.0 : dza[0.73253] - (18.2610840016*za[0.625] - 31.0293057578*za[0.632138] + 27.1452153445*za[0.659605] - 41.5067272513*za[0.697949] + 4.64986442065*za[0.73253] + 22.4798692422*za[0.75]) : 0.0 : True
0.75 : 0.0 : dza[0.75] - (-40.0*za[0.625] + 67.2993937888*za[0.632138] - 55.7620489333*za[0.659605] + 70.2169136332*za[0.697949] - 145.754258489*za[0.73253] + 104.0*za[0.75]) : 0.0 : True
0.757138 : 0.0 : dza[0.757138] - (-88.3094339297*za[0.75] + 70.0473918235*za[0.757138] + 23.135540923*za[0.784605] - 7.0014911696*za[0.822949] + 3.19764166352*za[0.85753] - 1.06964931079*za[0.875]) : 0.0 : True
- 0.784605 : 0.0 : dza[0.784605] - (28.66454818*za[0.75] - 57.2910457612*za[0.757138] + 14.4486217927*za[0.784605] + 18.9103774085*za[0.822949] - 6.92720624227*za[0.85753] + 2.1947046222*za[0.875]) : 0.0 : True
+ 0.784605 : 0.0 : dza[0.784605] - (28.66454818*za[0.75] - 57.2910457612*za[0.757138] + 14.4486217927*za[0.784605] + 18.9103774085*za[0.822949] - 6.92720624226*za[0.85753] + 2.1947046222*za[0.875]) : 0.0 : True
0.822949 : 0.0 : dza[0.822949] - (-18.7533724632*za[0.75] + 32.9773219699*za[0.757138] - 35.9681370065*za[0.784605] + 6.85412196318*za[0.822949] + 20.1465675937*za[0.85753] - 5.25650205707*za[0.875]) : 0.0 : True
0.85753 : 0.0 : dza[0.85753] - (18.2610840016*za[0.75] - 31.0293057578*za[0.757138] + 27.1452153445*za[0.784605] - 41.5067272513*za[0.822949] + 4.64986442065*za[0.85753] + 22.4798692422*za[0.875]) : 0.0 : True
0.875 : 0.0 : dza[0.875] - (-40.0*za[0.75] + 67.2993937888*za[0.757138] - 55.7620489333*za[0.784605] + 70.2169136332*za[0.822949] - 145.754258489*za[0.85753] + 104.0*za[0.875]) : 0.0 : True
0.882138 : 0.0 : dza[0.882138] - (-88.3094339297*za[0.875] + 70.0473918235*za[0.882138] + 23.135540923*za[0.909605] - 7.0014911696*za[0.947949] + 3.19764166352*za[0.98253] - 1.06964931079*za[1]) : 0.0 : True
- 0.909605 : 0.0 : dza[0.909605] - (28.66454818*za[0.875] - 57.2910457612*za[0.882138] + 14.4486217927*za[0.909605] + 18.9103774085*za[0.947949] - 6.92720624227*za[0.98253] + 2.1947046222*za[1]) : 0.0 : True
+ 0.909605 : 0.0 : dza[0.909605] - (28.66454818*za[0.875] - 57.2910457612*za[0.882138] + 14.4486217927*za[0.909605] + 18.9103774085*za[0.947949] - 6.92720624226*za[0.98253] + 2.1947046222*za[1]) : 0.0 : True
0.947949 : 0.0 : dza[0.947949] - (-18.7533724632*za[0.875] + 32.9773219699*za[0.882138] - 35.9681370065*za[0.909605] + 6.85412196318*za[0.947949] + 20.1465675937*za[0.98253] - 5.25650205707*za[1]) : 0.0 : True
0.98253 : 0.0 : dza[0.98253] - (18.2610840016*za[0.875] - 31.0293057578*za[0.882138] + 27.1452153445*za[0.909605] - 41.5067272513*za[0.947949] + 4.64986442065*za[0.98253] + 22.4798692422*za[1]) : 0.0 : True
1 : 0.0 : dza[1] - (-40.0*za[0.875] + 67.2993937888*za[0.882138] - 55.7620489333*za[0.909605] + 70.2169136332*za[0.947949] - 145.754258489*za[0.98253] + 104.0*za[1]) : 0.0 : True
dzb_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.003569 : 0.0 : dzb[0.003569] - (-176.618867859*zb[0.0] + 140.094783647*zb[0.003569] + 46.2710818461*zb[0.017303] - 14.0029823392*zb[0.036474] + 6.39528332704*zb[0.053765] - 2.13929862159*zb[0.0625]) : 0.0 : True
- 0.017303 : 0.0 : dzb[0.017303] - (57.32909636*zb[0.0] - 114.582091522*zb[0.003569] + 28.8972435853*zb[0.017303] + 37.8207548171*zb[0.036474] - 13.8544124845*zb[0.053765] + 4.3894092444*zb[0.0625]) : 0.0 : True
- 0.036474 : 0.0 : dzb[0.036474] - (-37.5067449265*zb[0.0] + 65.9546439399*zb[0.003569] - 71.936274013*zb[0.017303] + 13.7082439264*zb[0.036474] + 40.2931351874*zb[0.053765] - 10.5130041141*zb[0.0625]) : 0.0 : True
- 0.053765 : 0.0 : dzb[0.053765] - (36.5221680033*zb[0.0] - 62.0586115156*zb[0.003569] + 54.290430689*zb[0.017303] - 83.0134545025*zb[0.036474] + 9.29972884129*zb[0.053765] + 44.9597384845*zb[0.0625]) : 0.0 : True
- 0.0625 : 0.0 : dzb[0.0625] - (-80.0*zb[0.0] + 134.598787578*zb[0.003569] - 111.524097867*zb[0.017303] + 140.433827266*zb[0.036474] - 291.508516977*zb[0.053765] + 208.0*zb[0.0625]) : 0.0 : True
+ 0.003569 : 0.0 : dzb[0.003569] - (-176.618867859*zb[0] + 140.094783647*zb[0.003569] + 46.2710818461*zb[0.017303] - 14.0029823392*zb[0.036474] + 6.39528332704*zb[0.053765] - 2.13929862159*zb[0.0625]) : 0.0 : True
+ 0.017303 : 0.0 : dzb[0.017303] - (57.32909636*zb[0] - 114.582091522*zb[0.003569] + 28.8972435853*zb[0.017303] + 37.8207548171*zb[0.036474] - 13.8544124845*zb[0.053765] + 4.3894092444*zb[0.0625]) : 0.0 : True
+ 0.036474 : 0.0 : dzb[0.036474] - (-37.5067449265*zb[0] + 65.9546439399*zb[0.003569] - 71.936274013*zb[0.017303] + 13.7082439264*zb[0.036474] + 40.2931351874*zb[0.053765] - 10.5130041141*zb[0.0625]) : 0.0 : True
+ 0.053765 : 0.0 : dzb[0.053765] - (36.5221680033*zb[0] - 62.0586115156*zb[0.003569] + 54.290430689*zb[0.017303] - 83.0134545025*zb[0.036474] + 9.29972884129*zb[0.053765] + 44.9597384845*zb[0.0625]) : 0.0 : True
+ 0.0625 : 0.0 : dzb[0.0625] - (-80.0*zb[0] + 134.598787578*zb[0.003569] - 111.524097867*zb[0.017303] + 140.433827266*zb[0.036474] - 291.508516977*zb[0.053765] + 208.0*zb[0.0625]) : 0.0 : True
0.066069 : 0.0 : dzb[0.066069] - (-176.618867859*zb[0.0625] + 140.094783647*zb[0.066069] + 46.2710818461*zb[0.079803] - 14.0029823392*zb[0.098974] + 6.39528332704*zb[0.116265] - 2.13929862159*zb[0.125]) : 0.0 : True
0.079803 : 0.0 : dzb[0.079803] - (57.32909636*zb[0.0625] - 114.582091522*zb[0.066069] + 28.8972435853*zb[0.079803] + 37.8207548171*zb[0.098974] - 13.8544124845*zb[0.116265] + 4.3894092444*zb[0.125]) : 0.0 : True
0.098974 : 0.0 : dzb[0.098974] - (-37.5067449265*zb[0.0625] + 65.9546439399*zb[0.066069] - 71.936274013*zb[0.079803] + 13.7082439264*zb[0.098974] + 40.2931351874*zb[0.116265] - 10.5130041141*zb[0.125]) : 0.0 : True
@@ -516,46 +521,47 @@
0.241265 : 0.0 : dzb[0.241265] - (36.5221680033*zb[0.1875] - 62.0586115156*zb[0.191069] + 54.290430689*zb[0.204803] - 83.0134545025*zb[0.223974] + 9.29972884129*zb[0.241265] + 44.9597384845*zb[0.25]) : 0.0 : True
0.25 : 0.0 : dzb[0.25] - (-80.0*zb[0.1875] + 134.598787578*zb[0.191069] - 111.524097867*zb[0.204803] + 140.433827266*zb[0.223974] - 291.508516977*zb[0.241265] + 208.0*zb[0.25]) : 0.0 : True
0.257138 : 0.0 : dzb[0.257138] - (-88.3094339297*zb[0.25] + 70.0473918235*zb[0.257138] + 23.135540923*zb[0.284605] - 7.0014911696*zb[0.322949] + 3.19764166352*zb[0.35753] - 1.06964931079*zb[0.375]) : 0.0 : True
- 0.284605 : 0.0 : dzb[0.284605] - (28.66454818*zb[0.25] - 57.2910457612*zb[0.257138] + 14.4486217927*zb[0.284605] + 18.9103774085*zb[0.322949] - 6.92720624227*zb[0.35753] + 2.1947046222*zb[0.375]) : 0.0 : True
+ 0.284605 : 0.0 : dzb[0.284605] - (28.66454818*zb[0.25] - 57.2910457612*zb[0.257138] + 14.4486217927*zb[0.284605] + 18.9103774085*zb[0.322949] - 6.92720624226*zb[0.35753] + 2.1947046222*zb[0.375]) : 0.0 : True
0.322949 : 0.0 : dzb[0.322949] - (-18.7533724632*zb[0.25] + 32.9773219699*zb[0.257138] - 35.9681370065*zb[0.284605] + 6.85412196318*zb[0.322949] + 20.1465675937*zb[0.35753] - 5.25650205707*zb[0.375]) : 0.0 : True
0.35753 : 0.0 : dzb[0.35753] - (18.2610840016*zb[0.25] - 31.0293057578*zb[0.257138] + 27.1452153445*zb[0.284605] - 41.5067272513*zb[0.322949] + 4.64986442065*zb[0.35753] + 22.4798692422*zb[0.375]) : 0.0 : True
0.375 : 0.0 : dzb[0.375] - (-40.0*zb[0.25] + 67.2993937888*zb[0.257138] - 55.7620489333*zb[0.284605] + 70.2169136332*zb[0.322949] - 145.754258489*zb[0.35753] + 104.0*zb[0.375]) : 0.0 : True
0.382138 : 0.0 : dzb[0.382138] - (-88.3094339297*zb[0.375] + 70.0473918235*zb[0.382138] + 23.135540923*zb[0.409605] - 7.0014911696*zb[0.447949] + 3.19764166352*zb[0.48253] - 1.06964931079*zb[0.5]) : 0.0 : True
- 0.409605 : 0.0 : dzb[0.409605] - (28.66454818*zb[0.375] - 57.2910457612*zb[0.382138] + 14.4486217927*zb[0.409605] + 18.9103774085*zb[0.447949] - 6.92720624227*zb[0.48253] + 2.1947046222*zb[0.5]) : 0.0 : True
+ 0.409605 : 0.0 : dzb[0.409605] - (28.66454818*zb[0.375] - 57.2910457612*zb[0.382138] + 14.4486217927*zb[0.409605] + 18.9103774085*zb[0.447949] - 6.92720624226*zb[0.48253] + 2.1947046222*zb[0.5]) : 0.0 : True
0.447949 : 0.0 : dzb[0.447949] - (-18.7533724632*zb[0.375] + 32.9773219699*zb[0.382138] - 35.9681370065*zb[0.409605] + 6.85412196318*zb[0.447949] + 20.1465675937*zb[0.48253] - 5.25650205707*zb[0.5]) : 0.0 : True
0.48253 : 0.0 : dzb[0.48253] - (18.2610840016*zb[0.375] - 31.0293057578*zb[0.382138] + 27.1452153445*zb[0.409605] - 41.5067272513*zb[0.447949] + 4.64986442065*zb[0.48253] + 22.4798692422*zb[0.5]) : 0.0 : True
0.5 : 0.0 : dzb[0.5] - (-40.0*zb[0.375] + 67.2993937888*zb[0.382138] - 55.7620489333*zb[0.409605] + 70.2169136332*zb[0.447949] - 145.754258489*zb[0.48253] + 104.0*zb[0.5]) : 0.0 : True
0.507138 : 0.0 : dzb[0.507138] - (-88.3094339297*zb[0.5] + 70.0473918235*zb[0.507138] + 23.135540923*zb[0.534605] - 7.0014911696*zb[0.572949] + 3.19764166352*zb[0.60753] - 1.06964931079*zb[0.625]) : 0.0 : True
- 0.534605 : 0.0 : dzb[0.534605] - (28.66454818*zb[0.5] - 57.2910457612*zb[0.507138] + 14.4486217927*zb[0.534605] + 18.9103774085*zb[0.572949] - 6.92720624227*zb[0.60753] + 2.1947046222*zb[0.625]) : 0.0 : True
+ 0.534605 : 0.0 : dzb[0.534605] - (28.66454818*zb[0.5] - 57.2910457612*zb[0.507138] + 14.4486217927*zb[0.534605] + 18.9103774085*zb[0.572949] - 6.92720624226*zb[0.60753] + 2.1947046222*zb[0.625]) : 0.0 : True
0.572949 : 0.0 : dzb[0.572949] - (-18.7533724632*zb[0.5] + 32.9773219699*zb[0.507138] - 35.9681370065*zb[0.534605] + 6.85412196318*zb[0.572949] + 20.1465675937*zb[0.60753] - 5.25650205707*zb[0.625]) : 0.0 : True
0.60753 : 0.0 : dzb[0.60753] - (18.2610840016*zb[0.5] - 31.0293057578*zb[0.507138] + 27.1452153445*zb[0.534605] - 41.5067272513*zb[0.572949] + 4.64986442065*zb[0.60753] + 22.4798692422*zb[0.625]) : 0.0 : True
0.625 : 0.0 : dzb[0.625] - (-40.0*zb[0.5] + 67.2993937888*zb[0.507138] - 55.7620489333*zb[0.534605] + 70.2169136332*zb[0.572949] - 145.754258489*zb[0.60753] + 104.0*zb[0.625]) : 0.0 : True
0.632138 : 0.0 : dzb[0.632138] - (-88.3094339297*zb[0.625] + 70.0473918235*zb[0.632138] + 23.135540923*zb[0.659605] - 7.0014911696*zb[0.697949] + 3.19764166352*zb[0.73253] - 1.06964931079*zb[0.75]) : 0.0 : True
- 0.659605 : 0.0 : dzb[0.659605] - (28.66454818*zb[0.625] - 57.2910457612*zb[0.632138] + 14.4486217927*zb[0.659605] + 18.9103774085*zb[0.697949] - 6.92720624227*zb[0.73253] + 2.1947046222*zb[0.75]) : 0.0 : True
+ 0.659605 : 0.0 : dzb[0.659605] - (28.66454818*zb[0.625] - 57.2910457612*zb[0.632138] + 14.4486217927*zb[0.659605] + 18.9103774085*zb[0.697949] - 6.92720624226*zb[0.73253] + 2.1947046222*zb[0.75]) : 0.0 : True
0.697949 : 0.0 : dzb[0.697949] - (-18.7533724632*zb[0.625] + 32.9773219699*zb[0.632138] - 35.9681370065*zb[0.659605] + 6.85412196318*zb[0.697949] + 20.1465675937*zb[0.73253] - 5.25650205707*zb[0.75]) : 0.0 : True
0.73253 : 0.0 : dzb[0.73253] - (18.2610840016*zb[0.625] - 31.0293057578*zb[0.632138] + 27.1452153445*zb[0.659605] - 41.5067272513*zb[0.697949] + 4.64986442065*zb[0.73253] + 22.4798692422*zb[0.75]) : 0.0 : True
0.75 : 0.0 : dzb[0.75] - (-40.0*zb[0.625] + 67.2993937888*zb[0.632138] - 55.7620489333*zb[0.659605] + 70.2169136332*zb[0.697949] - 145.754258489*zb[0.73253] + 104.0*zb[0.75]) : 0.0 : True
0.757138 : 0.0 : dzb[0.757138] - (-88.3094339297*zb[0.75] + 70.0473918235*zb[0.757138] + 23.135540923*zb[0.784605] - 7.0014911696*zb[0.822949] + 3.19764166352*zb[0.85753] - 1.06964931079*zb[0.875]) : 0.0 : True
- 0.784605 : 0.0 : dzb[0.784605] - (28.66454818*zb[0.75] - 57.2910457612*zb[0.757138] + 14.4486217927*zb[0.784605] + 18.9103774085*zb[0.822949] - 6.92720624227*zb[0.85753] + 2.1947046222*zb[0.875]) : 0.0 : True
+ 0.784605 : 0.0 : dzb[0.784605] - (28.66454818*zb[0.75] - 57.2910457612*zb[0.757138] + 14.4486217927*zb[0.784605] + 18.9103774085*zb[0.822949] - 6.92720624226*zb[0.85753] + 2.1947046222*zb[0.875]) : 0.0 : True
0.822949 : 0.0 : dzb[0.822949] - (-18.7533724632*zb[0.75] + 32.9773219699*zb[0.757138] - 35.9681370065*zb[0.784605] + 6.85412196318*zb[0.822949] + 20.1465675937*zb[0.85753] - 5.25650205707*zb[0.875]) : 0.0 : True
0.85753 : 0.0 : dzb[0.85753] - (18.2610840016*zb[0.75] - 31.0293057578*zb[0.757138] + 27.1452153445*zb[0.784605] - 41.5067272513*zb[0.822949] + 4.64986442065*zb[0.85753] + 22.4798692422*zb[0.875]) : 0.0 : True
0.875 : 0.0 : dzb[0.875] - (-40.0*zb[0.75] + 67.2993937888*zb[0.757138] - 55.7620489333*zb[0.784605] + 70.2169136332*zb[0.822949] - 145.754258489*zb[0.85753] + 104.0*zb[0.875]) : 0.0 : True
0.882138 : 0.0 : dzb[0.882138] - (-88.3094339297*zb[0.875] + 70.0473918235*zb[0.882138] + 23.135540923*zb[0.909605] - 7.0014911696*zb[0.947949] + 3.19764166352*zb[0.98253] - 1.06964931079*zb[1]) : 0.0 : True
- 0.909605 : 0.0 : dzb[0.909605] - (28.66454818*zb[0.875] - 57.2910457612*zb[0.882138] + 14.4486217927*zb[0.909605] + 18.9103774085*zb[0.947949] - 6.92720624227*zb[0.98253] + 2.1947046222*zb[1]) : 0.0 : True
+ 0.909605 : 0.0 : dzb[0.909605] - (28.66454818*zb[0.875] - 57.2910457612*zb[0.882138] + 14.4486217927*zb[0.909605] + 18.9103774085*zb[0.947949] - 6.92720624226*zb[0.98253] + 2.1947046222*zb[1]) : 0.0 : True
0.947949 : 0.0 : dzb[0.947949] - (-18.7533724632*zb[0.875] + 32.9773219699*zb[0.882138] - 35.9681370065*zb[0.909605] + 6.85412196318*zb[0.947949] + 20.1465675937*zb[0.98253] - 5.25650205707*zb[1]) : 0.0 : True
0.98253 : 0.0 : dzb[0.98253] - (18.2610840016*zb[0.875] - 31.0293057578*zb[0.882138] + 27.1452153445*zb[0.909605] - 41.5067272513*zb[0.947949] + 4.64986442065*zb[0.98253] + 22.4798692422*zb[1]) : 0.0 : True
1 : 0.0 : dzb[1] - (-40.0*zb[0.875] + 67.2993937888*zb[0.882138] - 55.7620489333*zb[0.909605] + 70.2169136332*zb[0.947949] - 145.754258489*zb[0.98253] + 104.0*zb[1]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 1)
- [0.0, 0.003569, 0.017303, 0.036474, 0.053765, 0.0625, 0.066069, 0.079803, 0.098974, 0.116265, 0.125, 0.128569, 0.142303, 0.161474, 0.178765, 0.1875, 0.191069, 0.204803, 0.223974, 0.241265, 0.25, 0.257138, 0.284605, 0.322949, 0.35753, 0.375, 0.382138, 0.409605, 0.447949, 0.48253, 0.5, 0.507138, 0.534605, 0.572949, 0.60753, 0.625, 0.632138, 0.659605, 0.697949, 0.73253, 0.75, 0.757138, 0.784605, 0.822949, 0.85753, 0.875, 0.882138, 0.909605, 0.947949, 0.98253, 1]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..1] : 51 : {0, 0.003569, 0.017303, 0.036474, 0.053765, 0.0625, 0.066069, 0.079803, 0.098974, 0.116265, 0.125, 0.128569, 0.142303, 0.161474, 0.178765, 0.1875, 0.191069, 0.204803, 0.223974, 0.241265, 0.25, 0.257138, 0.284605, 0.322949, 0.35753, 0.375, 0.382138, 0.409605, 0.447949, 0.48253, 0.5, 0.507138, 0.534605, 0.572949, 0.60753, 0.625, 0.632138, 0.659605, 0.697949, 0.73253, 0.75, 0.757138, 0.784605, 0.822949, 0.85753, 0.875, 0.882138, 0.909605, 0.947949, 0.98253, 1}
1 Suffix Declarations
var_input : Direction=Suffix.LOCAL, Datatype=Suffix.FLOAT
Key : Value
p1 : {0: 4.0, 0.5: 1.0}
-16 Declarations: t p1 p2 p3 p4 za zb zc dza dzb var_input diffeq1 diffeq2 algeq1 dza_disc_eq dzb_disc_eq
+17 Declarations: t_domain t p1 p2 p3 p4 za zb zc dza dzb var_input diffeq1 diffeq2 algeq1 dza_disc_eq dzb_disc_eq
[[ 1.0000 0.0000 0.0000]
[ 0.9607 0.0327 0.0066]
[ 0.9236 0.0547 0.0217]
diff --git a/pyomo/dae/tests/simulator_ode_example.casadi.txt b/pyomo/dae/tests/simulator_ode_example.casadi.txt
index 97802c5d9ab..c1d13b1d902 100644
--- a/pyomo/dae/tests/simulator_ode_example.casadi.txt
+++ b/pyomo/dae/tests/simulator_ode_example.casadi.txt
@@ -1,3 +1,8 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 10)
+ Key : Finite : Members
+ None : False : [0.0..10.0]
+
2 Param Declarations
b : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
Key : Value
@@ -9,7 +14,7 @@
4 Var Declarations
domegadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.057104 : None : None : None : False : True : Reals
0.276843 : None : None : None : False : True : Reals
0.58359 : None : None : None : False : True : Reals
@@ -59,10 +64,10 @@
9.276843 : None : None : None : False : True : Reals
9.58359 : None : None : None : False : True : Reals
9.86024 : None : None : None : False : True : Reals
- 10.0 : None : None : None : False : True : Reals
+ 10 : None : None : None : False : True : Reals
dthetadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.057104 : None : None : None : False : True : Reals
0.276843 : None : None : None : False : True : Reals
0.58359 : None : None : None : False : True : Reals
@@ -112,118 +117,118 @@
9.276843 : None : None : None : False : True : Reals
9.58359 : None : None : None : False : True : Reals
9.86024 : None : None : None : False : True : Reals
- 10.0 : None : None : None : False : True : Reals
+ 10 : None : None : None : False : True : Reals
omega : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.057104 : None : -0.0288367718331 : None : False : False : Reals
- 0.276843 : None : -0.144935728318 : None : False : False : Reals
- 0.58359 : None : -0.360925796388 : None : False : False : Reals
- 0.86024 : None : -0.681754964798 : None : False : False : Reals
- 1.0 : None : -0.916580382476 : None : False : False : Reals
- 1.057104 : None : -1.03638358118 : None : False : False : Reals
- 1.276843 : None : -1.61446153636 : None : False : False : Reals
- 1.58359 : None : -2.77862301258 : None : False : False : Reals
- 1.86024 : None : -3.7964837794 : None : False : False : Reals
- 2.0 : None : -3.98880252355 : None : False : False : Reals
- 2.057104 : None : -3.95363041693 : None : False : False : Reals
- 2.276843 : None : -3.31259041389 : None : False : False : Reals
- 2.58359 : None : -1.71003706385 : None : False : False : Reals
- 2.86024 : None : -0.291528693069 : None : False : False : Reals
- 3.0 : None : 0.372853032229 : None : False : False : Reals
- 3.057104 : None : 0.639837432637 : None : False : False : Reals
- 3.276843 : None : 1.65642484544 : None : False : False : Reals
- 3.58359 : None : 2.8995551007 : None : False : False : Reals
- 3.86024 : None : 3.25695914382 : None : False : False : Reals
- 4.0 : None : 2.98882137087 : None : False : False : Reals
- 4.057104 : None : 2.81261373275 : None : False : False : Reals
- 4.276843 : None : 1.84451738792 : None : False : False : Reals
- 4.58359 : None : 0.283476577158 : None : False : False : Reals
- 4.86024 : None : -1.05794775672 : None : False : False : Reals
- 5.0 : None : -1.66929122385 : None : False : False : Reals
- 5.057104 : None : -1.90219757192 : None : False : False : Reals
- 5.276843 : None : -2.55893328798 : None : False : False : Reals
- 5.58359 : None : -2.56375165735 : None : False : False : Reals
- 5.86024 : None : -1.66642941983 : None : False : False : Reals
- 6.0 : None : -1.04123251979 : None : False : False : Reals
- 6.057104 : None : -0.776917363925 : None : False : False : Reals
- 6.276843 : None : 0.24319396474 : None : False : False : Reals
- 6.58359 : None : 1.50686448553 : None : False : False : Reals
- 6.86024 : None : 2.19686100862 : None : False : False : Reals
- 7.0 : None : 2.25390221046 : None : False : False : Reals
- 7.057104 : None : 2.22299642675 : None : False : False : Reals
- 7.276843 : None : 1.78220623588 : None : False : False : Reals
- 7.58359 : None : 0.650826699669 : None : False : False : Reals
- 7.86024 : None : -0.475937555457 : None : False : False : Reals
- 8.0 : None : -0.989405964761 : None : False : False : Reals
- 8.057104 : None : -1.17772174412 : None : False : False : Reals
- 8.276843 : None : -1.72857955401 : None : False : False : Reals
- 8.58359 : None : -1.82788389429 : None : False : False : Reals
- 8.86024 : None : -1.23259781241 : None : False : False : Reals
- 9.0 : None : -0.784341315869 : None : False : False : Reals
- 9.057104 : None : -0.584143291772 : None : False : False : Reals
- 9.276843 : None : 0.196857216649 : None : False : False : Reals
- 9.58359 : None : 1.13358175254 : None : False : False : Reals
- 9.86024 : None : 1.55494228383 : None : False : False : Reals
- 10.0 : None : 1.56385842926 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.057104 : None : -0.028836771833058644 : None : False : False : Reals
+ 0.276843 : None : -0.14493572831790572 : None : False : False : Reals
+ 0.58359 : None : -0.3609257963877488 : None : False : False : Reals
+ 0.86024 : None : -0.6817549647984206 : None : False : False : Reals
+ 1.0 : None : -0.9165803824755299 : None : False : False : Reals
+ 1.057104 : None : -1.0363835811753719 : None : False : False : Reals
+ 1.276843 : None : -1.6144615363649575 : None : False : False : Reals
+ 1.58359 : None : -2.7786230125782354 : None : False : False : Reals
+ 1.86024 : None : -3.796483779395265 : None : False : False : Reals
+ 2.0 : None : -3.988802523552644 : None : False : False : Reals
+ 2.057104 : None : -3.953630416930289 : None : False : False : Reals
+ 2.276843 : None : -3.312590413889458 : None : False : False : Reals
+ 2.58359 : None : -1.7100370638459894 : None : False : False : Reals
+ 2.86024 : None : -0.2915286930692428 : None : False : False : Reals
+ 3.0 : None : 0.372853032228693 : None : False : False : Reals
+ 3.057104 : None : 0.6398374326368862 : None : False : False : Reals
+ 3.276843 : None : 1.6564248454406703 : None : False : False : Reals
+ 3.58359 : None : 2.8995551007035965 : None : False : False : Reals
+ 3.86024 : None : 3.2569591438154406 : None : False : False : Reals
+ 4.0 : None : 2.9888213708723494 : None : False : False : Reals
+ 4.057104 : None : 2.8126137327462035 : None : False : False : Reals
+ 4.276843 : None : 1.8445173879163923 : None : False : False : Reals
+ 4.58359 : None : 0.28347657715882807 : None : False : False : Reals
+ 4.86024 : None : -1.0579477567165065 : None : False : False : Reals
+ 5.0 : None : -1.669291223845272 : None : False : False : Reals
+ 5.057104 : None : -1.9021975719208306 : None : False : False : Reals
+ 5.276843 : None : -2.558933287977951 : None : False : False : Reals
+ 5.58359 : None : -2.563751657354578 : None : False : False : Reals
+ 5.86024 : None : -1.6664294198290082 : None : False : False : Reals
+ 6.0 : None : -1.041232519793336 : None : False : False : Reals
+ 6.057104 : None : -0.7769173639254203 : None : False : False : Reals
+ 6.276843 : None : 0.24319396474008742 : None : False : False : Reals
+ 6.58359 : None : 1.5068644855267461 : None : False : False : Reals
+ 6.86024 : None : 2.196861008587628 : None : False : False : Reals
+ 7.0 : None : 2.253902210417937 : None : False : False : Reals
+ 7.057104 : None : 2.2229964267053735 : None : False : False : Reals
+ 7.276843 : None : 1.7822062358283022 : None : False : False : Reals
+ 7.58359 : None : 0.6508266996085692 : None : False : False : Reals
+ 7.86024 : None : -0.4759375554813527 : None : False : False : Reals
+ 8.0 : None : -0.9894059647637419 : None : False : False : Reals
+ 8.057104 : None : -1.1777217441210222 : None : False : False : Reals
+ 8.276843 : None : -1.728579553942413 : None : False : False : Reals
+ 8.58359 : None : -1.827883894164624 : None : False : False : Reals
+ 8.86024 : None : -1.232597812303022 : None : False : False : Reals
+ 9.0 : None : -0.784341315783437 : None : False : False : Reals
+ 9.057104 : None : -0.5841432916945888 : None : False : False : Reals
+ 9.276843 : None : 0.1968572167129064 : None : False : False : Reals
+ 9.58359 : None : 1.1335817525158958 : None : False : False : Reals
+ 9.86024 : None : 1.554942283741946 : None : False : False : Reals
+ 10 : None : 1.5638584292101536 : None : False : False : Reals
theta : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 3.04 : None : False : False : Reals
- 0.057104 : None : 3.03854359236 : None : False : False : Reals
- 0.276843 : None : 3.01985169904 : None : False : False : Reals
- 0.58359 : None : 2.94479849706 : None : False : False : Reals
- 0.86024 : None : 2.80389804558 : None : False : False : Reals
- 1.0 : None : 2.69434291633 : None : False : False : Reals
- 1.057104 : None : 2.63693502887 : None : False : False : Reals
- 1.276843 : None : 2.34999537636 : None : False : False : Reals
- 1.58359 : None : 1.6854468113 : None : False : False : Reals
- 1.86024 : None : 0.764350498932 : None : False : False : Reals
- 2.0 : None : 0.216583176695 : None : False : False : Reals
- 2.057104 : None : -0.0097719012592 : None : False : False : Reals
- 2.276843 : None : -0.821935874055 : None : False : False : Reals
- 2.58359 : None : -1.59939653373 : None : False : False : Reals
- 2.86024 : None : -1.87329513994 : None : False : False : Reals
- 3.0 : None : -1.86747657483 : None : False : False : Reals
- 3.057104 : None : -1.83892648779 : None : False : False : Reals
- 3.276843 : None : -1.58510628476 : None : False : False : Reals
- 3.58359 : None : -0.874879240519 : None : False : False : Reals
- 3.86024 : None : 0.00109629546615 : None : False : False : Reals
- 4.0 : None : 0.440559372007 : None : False : False : Reals
- 4.057104 : None : 0.607801260665 : None : False : False : Reals
- 4.276843 : None : 1.12310078819 : None : False : False : Reals
- 4.58359 : None : 1.44954772893 : None : False : False : Reals
- 4.86024 : None : 1.34301354783 : None : False : False : Reals
- 5.0 : None : 1.14829625752 : None : False : False : Reals
- 5.057104 : None : 1.05016975232 : None : False : False : Reals
- 5.276843 : None : 0.550508955937 : None : False : False : Reals
- 5.58359 : None : -0.266408463166 : None : False : False : Reals
- 5.86024 : None : -0.869540385218 : None : False : False : Reals
- 6.0 : None : -1.05419561307 : None : False : False : Reals
- 6.057104 : None : -1.11104485228 : None : False : False : Reals
- 6.276843 : None : -1.16704241034 : None : False : False : Reals
- 6.58359 : None : -0.890105814579 : None : False : False : Reals
- 6.86024 : None : -0.363923767771 : None : False : False : Reals
- 7.0 : None : -0.0503543436636 : None : False : False : Reals
- 7.057104 : None : 0.078268098944 : None : False : False : Reals
- 7.276843 : None : 0.527093474144 : None : False : False : Reals
- 7.58359 : None : 0.908477581807 : None : False : False : Reals
- 7.86024 : None : 0.929578843425 : None : False : False : Reals
- 8.0 : None : 0.826347007449 : None : False : False : Reals
- 8.057104 : None : 0.763915384299 : None : False : False : Reals
- 8.276843 : None : 0.440372783687 : None : False : False : Reals
- 8.58359 : None : -0.127136016061 : None : False : False : Reals
- 8.86024 : None : -0.560481227396 : None : False : False : Reals
- 9.0 : None : -0.70398319544 : None : False : False : Reals
- 9.057104 : None : -0.740757144228 : None : False : False : Reals
- 9.276843 : None : -0.784799079693 : None : False : False : Reals
- 9.58359 : None : -0.572881790888 : None : False : False : Reals
- 9.86024 : None : -0.187877594901 : None : False : False : Reals
- 10.0 : None : 0.0317769938428 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 3.04 : None : False : False : Reals
+ 0.057104 : None : 3.0385435923560458 : None : False : False : Reals
+ 0.276843 : None : 3.01985169903839 : None : False : False : Reals
+ 0.58359 : None : 2.9447984970578065 : None : False : False : Reals
+ 0.86024 : None : 2.8038980455758797 : None : False : False : Reals
+ 1.0 : None : 2.6943429163268737 : None : False : False : Reals
+ 1.057104 : None : 2.636935028867737 : None : False : False : Reals
+ 1.276843 : None : 2.34999537636079 : None : False : False : Reals
+ 1.58359 : None : 1.6854468113015664 : None : False : False : Reals
+ 1.86024 : None : 0.7643504989320303 : None : False : False : Reals
+ 2.0 : None : 0.21658317669479182 : None : False : False : Reals
+ 2.057104 : None : -0.009771901259197968 : None : False : False : Reals
+ 2.276843 : None : -0.8219358740553472 : None : False : False : Reals
+ 2.58359 : None : -1.5993965337328329 : None : False : False : Reals
+ 2.86024 : None : -1.8732951399446378 : None : False : False : Reals
+ 3.0 : None : -1.8674765748347781 : None : False : False : Reals
+ 3.057104 : None : -1.8389264877883587 : None : False : False : Reals
+ 3.276843 : None : -1.5851062847610995 : None : False : False : Reals
+ 3.58359 : None : -0.8748792405193453 : None : False : False : Reals
+ 3.86024 : None : 0.0010962954661344615 : None : False : False : Reals
+ 4.0 : None : 0.44055937200735007 : None : False : False : Reals
+ 4.057104 : None : 0.6078012606645682 : None : False : False : Reals
+ 4.276843 : None : 1.1231007881946113 : None : False : False : Reals
+ 4.58359 : None : 1.4495477289294216 : None : False : False : Reals
+ 4.86024 : None : 1.3430135478347687 : None : False : False : Reals
+ 5.0 : None : 1.1482962575205269 : None : False : False : Reals
+ 5.057104 : None : 1.0501697523179434 : None : False : False : Reals
+ 5.276843 : None : 0.5505089559367401 : None : False : False : Reals
+ 5.58359 : None : -0.2664084631664597 : None : False : False : Reals
+ 5.86024 : None : -0.8695403852180925 : None : False : False : Reals
+ 6.0 : None : -1.054195613068785 : None : False : False : Reals
+ 6.057104 : None : -1.1110448522785588 : None : False : False : Reals
+ 6.276843 : None : -1.1670424103366646 : None : False : False : Reals
+ 6.58359 : None : -0.8901058145796649 : None : False : False : Reals
+ 6.86024 : None : -0.3639237677569771 : None : False : False : Reals
+ 7.0 : None : -0.05035434365056468 : None : False : False : Reals
+ 7.057104 : None : 0.07826809895455494 : None : False : False : Reals
+ 7.276843 : None : 0.5270934741434193 : None : False : False : Reals
+ 7.58359 : None : 0.9084775817769184 : None : False : False : Reals
+ 7.86024 : None : 0.929578843387574 : None : False : False : Reals
+ 8.0 : None : 0.8263470073958926 : None : False : False : Reals
+ 8.057104 : None : 0.7639153842432056 : None : False : False : Reals
+ 8.276843 : None : 0.4403727836384161 : None : False : False : Reals
+ 8.58359 : None : -0.12713601607931915 : None : False : False : Reals
+ 8.86024 : None : -0.5604812273799488 : None : False : False : Reals
+ 9.0 : None : -0.7039831954109196 : None : False : False : Reals
+ 9.057104 : None : -0.7407571441944939 : None : False : False : Reals
+ 9.276843 : None : -0.7847990796409327 : None : False : False : Reals
+ 9.58359 : None : -0.5728817908280706 : None : False : False : Reals
+ 9.86024 : None : -0.18787759486339484 : None : False : False : Reals
+ 10 : None : 0.03177699387008462 : None : False : False : Reals
4 Constraint Declarations
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : domegadt[0.0] - (-0.25*omega[0.0] - 5.0*sin(theta[0.0])) : 0.0 : True
+ 0 : 0.0 : domegadt[0] - (-0.25*omega[0] - 5.0*sin(theta[0])) : 0.0 : True
0.057104 : 0.0 : domegadt[0.057104] - (-0.25*omega[0.057104] - 5.0*sin(theta[0.057104])) : 0.0 : True
0.276843 : 0.0 : domegadt[0.276843] - (-0.25*omega[0.276843] - 5.0*sin(theta[0.276843])) : 0.0 : True
0.58359 : 0.0 : domegadt[0.58359] - (-0.25*omega[0.58359] - 5.0*sin(theta[0.58359])) : 0.0 : True
@@ -273,10 +278,10 @@
9.276843 : 0.0 : domegadt[9.276843] - (-0.25*omega[9.276843] - 5.0*sin(theta[9.276843])) : 0.0 : True
9.58359 : 0.0 : domegadt[9.58359] - (-0.25*omega[9.58359] - 5.0*sin(theta[9.58359])) : 0.0 : True
9.86024 : 0.0 : domegadt[9.86024] - (-0.25*omega[9.86024] - 5.0*sin(theta[9.86024])) : 0.0 : True
- 10.0 : 0.0 : domegadt[10.0] - (-0.25*omega[10.0] - 5.0*sin(theta[10.0])) : 0.0 : True
+ 10 : 0.0 : domegadt[10] - (-0.25*omega[10] - 5.0*sin(theta[10])) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dthetadt[0.0] - omega[0.0] : 0.0 : True
+ 0 : 0.0 : dthetadt[0] - omega[0] : 0.0 : True
0.057104 : 0.0 : dthetadt[0.057104] - omega[0.057104] : 0.0 : True
0.276843 : 0.0 : dthetadt[0.276843] - omega[0.276843] : 0.0 : True
0.58359 : 0.0 : dthetadt[0.58359] - omega[0.58359] : 0.0 : True
@@ -326,117 +331,118 @@
9.276843 : 0.0 : dthetadt[9.276843] - omega[9.276843] : 0.0 : True
9.58359 : 0.0 : dthetadt[9.58359] - omega[9.58359] : 0.0 : True
9.86024 : 0.0 : dthetadt[9.86024] - omega[9.86024] : 0.0 : True
- 10.0 : 0.0 : dthetadt[10.0] - omega[10.0] : 0.0 : True
+ 10 : 0.0 : dthetadt[10] - omega[10] : 0.0 : True
domegadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.057104 : 0.0 : domegadt[0.057104] - (-11.0386792412*omega[0.0] + 8.75592397794*omega[0.057104] + 2.89194261538*omega[0.276843] - 0.8751863962*omega[0.58359] + 0.39970520794*omega[0.86024] - 0.133706163849*omega[1.0]) : 0.0 : True
- 0.276843 : 0.0 : domegadt[0.276843] - (3.5830685225*omega[0.0] - 7.16138072015*omega[0.057104] + 1.80607772408*omega[0.276843] + 2.36379717607*omega[0.58359] - 0.865900780283*omega[0.86024] + 0.274338077775*omega[1.0]) : 0.0 : True
- 0.58359 : 0.0 : domegadt[0.58359] - (-2.3441715579*omega[0.0] + 4.12216524624*omega[0.057104] - 4.49601712581*omega[0.276843] + 0.856765245397*omega[0.58359] + 2.51832094921*omega[0.86024] - 0.657062757134*omega[1.0]) : 0.0 : True
- 0.86024 : 0.0 : domegadt[0.86024] - (2.28263550021*omega[0.0] - 3.87866321972*omega[0.057104] + 3.39315191806*omega[0.276843] - 5.18834090641*omega[0.58359] + 0.581233052581*omega[0.86024] + 2.80998365528*omega[1.0]) : 0.0 : True
- 1.0 : 0.0 : domegadt[1.0] - (-5.0*omega[0.0] + 8.41242422359*omega[0.057104] - 6.97025611666*omega[0.276843] + 8.77711420415*omega[0.58359] - 18.2192823111*omega[0.86024] + 13.0*omega[1.0]) : 0.0 : True
- 1.057104 : 0.0 : domegadt[1.057104] - (-11.0386792412*omega[1.0] + 8.75592397794*omega[1.057104] + 2.89194261538*omega[1.276843] - 0.8751863962*omega[1.58359] + 0.39970520794*omega[1.86024] - 0.133706163849*omega[2.0]) : 0.0 : True
- 1.276843 : 0.0 : domegadt[1.276843] - (3.5830685225*omega[1.0] - 7.16138072015*omega[1.057104] + 1.80607772408*omega[1.276843] + 2.36379717607*omega[1.58359] - 0.865900780283*omega[1.86024] + 0.274338077775*omega[2.0]) : 0.0 : True
- 1.58359 : 0.0 : domegadt[1.58359] - (-2.3441715579*omega[1.0] + 4.12216524624*omega[1.057104] - 4.49601712581*omega[1.276843] + 0.856765245397*omega[1.58359] + 2.51832094921*omega[1.86024] - 0.657062757134*omega[2.0]) : 0.0 : True
- 1.86024 : 0.0 : domegadt[1.86024] - (2.28263550021*omega[1.0] - 3.87866321972*omega[1.057104] + 3.39315191806*omega[1.276843] - 5.18834090641*omega[1.58359] + 0.581233052581*omega[1.86024] + 2.80998365528*omega[2.0]) : 0.0 : True
- 2.0 : 0.0 : domegadt[2.0] - (-5.0*omega[1.0] + 8.41242422359*omega[1.057104] - 6.97025611666*omega[1.276843] + 8.77711420415*omega[1.58359] - 18.2192823111*omega[1.86024] + 13.0*omega[2.0]) : 0.0 : True
- 2.057104 : 0.0 : domegadt[2.057104] - (-11.0386792412*omega[2.0] + 8.75592397794*omega[2.057104] + 2.89194261538*omega[2.276843] - 0.8751863962*omega[2.58359] + 0.39970520794*omega[2.86024] - 0.133706163849*omega[3.0]) : 0.0 : True
- 2.276843 : 0.0 : domegadt[2.276843] - (3.5830685225*omega[2.0] - 7.16138072015*omega[2.057104] + 1.80607772408*omega[2.276843] + 2.36379717607*omega[2.58359] - 0.865900780283*omega[2.86024] + 0.274338077775*omega[3.0]) : 0.0 : True
- 2.58359 : 0.0 : domegadt[2.58359] - (-2.3441715579*omega[2.0] + 4.12216524624*omega[2.057104] - 4.49601712581*omega[2.276843] + 0.856765245397*omega[2.58359] + 2.51832094921*omega[2.86024] - 0.657062757134*omega[3.0]) : 0.0 : True
- 2.86024 : 0.0 : domegadt[2.86024] - (2.28263550021*omega[2.0] - 3.87866321972*omega[2.057104] + 3.39315191806*omega[2.276843] - 5.18834090641*omega[2.58359] + 0.581233052581*omega[2.86024] + 2.80998365528*omega[3.0]) : 0.0 : True
- 3.0 : 0.0 : domegadt[3.0] - (-5.0*omega[2.0] + 8.41242422359*omega[2.057104] - 6.97025611666*omega[2.276843] + 8.77711420415*omega[2.58359] - 18.2192823111*omega[2.86024] + 13.0*omega[3.0]) : 0.0 : True
- 3.057104 : 0.0 : domegadt[3.057104] - (-11.0386792412*omega[3.0] + 8.75592397794*omega[3.057104] + 2.89194261538*omega[3.276843] - 0.8751863962*omega[3.58359] + 0.39970520794*omega[3.86024] - 0.133706163849*omega[4.0]) : 0.0 : True
- 3.276843 : 0.0 : domegadt[3.276843] - (3.5830685225*omega[3.0] - 7.16138072015*omega[3.057104] + 1.80607772408*omega[3.276843] + 2.36379717607*omega[3.58359] - 0.865900780283*omega[3.86024] + 0.274338077775*omega[4.0]) : 0.0 : True
- 3.58359 : 0.0 : domegadt[3.58359] - (-2.3441715579*omega[3.0] + 4.12216524624*omega[3.057104] - 4.49601712581*omega[3.276843] + 0.856765245397*omega[3.58359] + 2.51832094921*omega[3.86024] - 0.657062757134*omega[4.0]) : 0.0 : True
- 3.86024 : 0.0 : domegadt[3.86024] - (2.28263550021*omega[3.0] - 3.87866321972*omega[3.057104] + 3.39315191806*omega[3.276843] - 5.18834090641*omega[3.58359] + 0.581233052581*omega[3.86024] + 2.80998365528*omega[4.0]) : 0.0 : True
- 4.0 : 0.0 : domegadt[4.0] - (-5.0*omega[3.0] + 8.41242422359*omega[3.057104] - 6.97025611666*omega[3.276843] + 8.77711420415*omega[3.58359] - 18.2192823111*omega[3.86024] + 13.0*omega[4.0]) : 0.0 : True
- 4.057104 : 0.0 : domegadt[4.057104] - (-11.0386792412*omega[4.0] + 8.75592397794*omega[4.057104] + 2.89194261538*omega[4.276843] - 0.8751863962*omega[4.58359] + 0.39970520794*omega[4.86024] - 0.133706163849*omega[5.0]) : 0.0 : True
- 4.276843 : 0.0 : domegadt[4.276843] - (3.5830685225*omega[4.0] - 7.16138072015*omega[4.057104] + 1.80607772408*omega[4.276843] + 2.36379717607*omega[4.58359] - 0.865900780283*omega[4.86024] + 0.274338077775*omega[5.0]) : 0.0 : True
- 4.58359 : 0.0 : domegadt[4.58359] - (-2.3441715579*omega[4.0] + 4.12216524624*omega[4.057104] - 4.49601712581*omega[4.276843] + 0.856765245397*omega[4.58359] + 2.51832094921*omega[4.86024] - 0.657062757134*omega[5.0]) : 0.0 : True
- 4.86024 : 0.0 : domegadt[4.86024] - (2.28263550021*omega[4.0] - 3.87866321972*omega[4.057104] + 3.39315191806*omega[4.276843] - 5.18834090641*omega[4.58359] + 0.581233052581*omega[4.86024] + 2.80998365528*omega[5.0]) : 0.0 : True
- 5.0 : 0.0 : domegadt[5.0] - (-5.0*omega[4.0] + 8.41242422359*omega[4.057104] - 6.97025611666*omega[4.276843] + 8.77711420415*omega[4.58359] - 18.2192823111*omega[4.86024] + 13.0*omega[5.0]) : 0.0 : True
- 5.057104 : 0.0 : domegadt[5.057104] - (-11.0386792412*omega[5.0] + 8.75592397794*omega[5.057104] + 2.89194261538*omega[5.276843] - 0.8751863962*omega[5.58359] + 0.39970520794*omega[5.86024] - 0.133706163849*omega[6.0]) : 0.0 : True
- 5.276843 : 0.0 : domegadt[5.276843] - (3.5830685225*omega[5.0] - 7.16138072015*omega[5.057104] + 1.80607772408*omega[5.276843] + 2.36379717607*omega[5.58359] - 0.865900780283*omega[5.86024] + 0.274338077775*omega[6.0]) : 0.0 : True
- 5.58359 : 0.0 : domegadt[5.58359] - (-2.3441715579*omega[5.0] + 4.12216524624*omega[5.057104] - 4.49601712581*omega[5.276843] + 0.856765245397*omega[5.58359] + 2.51832094921*omega[5.86024] - 0.657062757134*omega[6.0]) : 0.0 : True
- 5.86024 : 0.0 : domegadt[5.86024] - (2.28263550021*omega[5.0] - 3.87866321972*omega[5.057104] + 3.39315191806*omega[5.276843] - 5.18834090641*omega[5.58359] + 0.581233052581*omega[5.86024] + 2.80998365528*omega[6.0]) : 0.0 : True
- 6.0 : 0.0 : domegadt[6.0] - (-5.0*omega[5.0] + 8.41242422359*omega[5.057104] - 6.97025611666*omega[5.276843] + 8.77711420415*omega[5.58359] - 18.2192823111*omega[5.86024] + 13.0*omega[6.0]) : 0.0 : True
- 6.057104 : 0.0 : domegadt[6.057104] - (-11.0386792412*omega[6.0] + 8.75592397794*omega[6.057104] + 2.89194261538*omega[6.276843] - 0.8751863962*omega[6.58359] + 0.39970520794*omega[6.86024] - 0.133706163849*omega[7.0]) : 0.0 : True
- 6.276843 : 0.0 : domegadt[6.276843] - (3.5830685225*omega[6.0] - 7.16138072015*omega[6.057104] + 1.80607772408*omega[6.276843] + 2.36379717607*omega[6.58359] - 0.865900780283*omega[6.86024] + 0.274338077775*omega[7.0]) : 0.0 : True
- 6.58359 : 0.0 : domegadt[6.58359] - (-2.3441715579*omega[6.0] + 4.12216524624*omega[6.057104] - 4.49601712581*omega[6.276843] + 0.856765245397*omega[6.58359] + 2.51832094921*omega[6.86024] - 0.657062757134*omega[7.0]) : 0.0 : True
- 6.86024 : 0.0 : domegadt[6.86024] - (2.28263550021*omega[6.0] - 3.87866321972*omega[6.057104] + 3.39315191806*omega[6.276843] - 5.18834090641*omega[6.58359] + 0.581233052581*omega[6.86024] + 2.80998365528*omega[7.0]) : 0.0 : True
- 7.0 : 0.0 : domegadt[7.0] - (-5.0*omega[6.0] + 8.41242422359*omega[6.057104] - 6.97025611666*omega[6.276843] + 8.77711420415*omega[6.58359] - 18.2192823111*omega[6.86024] + 13.0*omega[7.0]) : 0.0 : True
- 7.057104 : 0.0 : domegadt[7.057104] - (-11.0386792412*omega[7.0] + 8.75592397794*omega[7.057104] + 2.89194261538*omega[7.276843] - 0.8751863962*omega[7.58359] + 0.39970520794*omega[7.86024] - 0.133706163849*omega[8.0]) : 0.0 : True
- 7.276843 : 0.0 : domegadt[7.276843] - (3.5830685225*omega[7.0] - 7.16138072015*omega[7.057104] + 1.80607772408*omega[7.276843] + 2.36379717607*omega[7.58359] - 0.865900780283*omega[7.86024] + 0.274338077775*omega[8.0]) : 0.0 : True
- 7.58359 : 0.0 : domegadt[7.58359] - (-2.3441715579*omega[7.0] + 4.12216524624*omega[7.057104] - 4.49601712581*omega[7.276843] + 0.856765245397*omega[7.58359] + 2.51832094921*omega[7.86024] - 0.657062757134*omega[8.0]) : 0.0 : True
- 7.86024 : 0.0 : domegadt[7.86024] - (2.28263550021*omega[7.0] - 3.87866321972*omega[7.057104] + 3.39315191806*omega[7.276843] - 5.18834090641*omega[7.58359] + 0.581233052581*omega[7.86024] + 2.80998365528*omega[8.0]) : 0.0 : True
- 8.0 : 0.0 : domegadt[8.0] - (-5.0*omega[7.0] + 8.41242422359*omega[7.057104] - 6.97025611666*omega[7.276843] + 8.77711420415*omega[7.58359] - 18.2192823111*omega[7.86024] + 13.0*omega[8.0]) : 0.0 : True
- 8.057104 : 0.0 : domegadt[8.057104] - (-11.0386792412*omega[8.0] + 8.75592397794*omega[8.057104] + 2.89194261538*omega[8.276843] - 0.8751863962*omega[8.58359] + 0.39970520794*omega[8.86024] - 0.133706163849*omega[9.0]) : 0.0 : True
- 8.276843 : 0.0 : domegadt[8.276843] - (3.5830685225*omega[8.0] - 7.16138072015*omega[8.057104] + 1.80607772408*omega[8.276843] + 2.36379717607*omega[8.58359] - 0.865900780283*omega[8.86024] + 0.274338077775*omega[9.0]) : 0.0 : True
- 8.58359 : 0.0 : domegadt[8.58359] - (-2.3441715579*omega[8.0] + 4.12216524624*omega[8.057104] - 4.49601712581*omega[8.276843] + 0.856765245397*omega[8.58359] + 2.51832094921*omega[8.86024] - 0.657062757134*omega[9.0]) : 0.0 : True
- 8.86024 : 0.0 : domegadt[8.86024] - (2.28263550021*omega[8.0] - 3.87866321972*omega[8.057104] + 3.39315191806*omega[8.276843] - 5.18834090641*omega[8.58359] + 0.581233052581*omega[8.86024] + 2.80998365528*omega[9.0]) : 0.0 : True
- 9.0 : 0.0 : domegadt[9.0] - (-5.0*omega[8.0] + 8.41242422359*omega[8.057104] - 6.97025611666*omega[8.276843] + 8.77711420415*omega[8.58359] - 18.2192823111*omega[8.86024] + 13.0*omega[9.0]) : 0.0 : True
- 9.057104 : 0.0 : domegadt[9.057104] - (-11.0386792412*omega[9.0] + 8.75592397794*omega[9.057104] + 2.89194261538*omega[9.276843] - 0.8751863962*omega[9.58359] + 0.39970520794*omega[9.86024] - 0.133706163849*omega[10.0]) : 0.0 : True
- 9.276843 : 0.0 : domegadt[9.276843] - (3.5830685225*omega[9.0] - 7.16138072015*omega[9.057104] + 1.80607772408*omega[9.276843] + 2.36379717607*omega[9.58359] - 0.865900780283*omega[9.86024] + 0.274338077775*omega[10.0]) : 0.0 : True
- 9.58359 : 0.0 : domegadt[9.58359] - (-2.3441715579*omega[9.0] + 4.12216524624*omega[9.057104] - 4.49601712581*omega[9.276843] + 0.856765245397*omega[9.58359] + 2.51832094921*omega[9.86024] - 0.657062757134*omega[10.0]) : 0.0 : True
- 9.86024 : 0.0 : domegadt[9.86024] - (2.28263550021*omega[9.0] - 3.87866321972*omega[9.057104] + 3.39315191806*omega[9.276843] - 5.18834090641*omega[9.58359] + 0.581233052581*omega[9.86024] + 2.80998365528*omega[10.0]) : 0.0 : True
- 10.0 : 0.0 : domegadt[10.0] - (-5.0*omega[9.0] + 8.41242422359*omega[9.057104] - 6.97025611666*omega[9.276843] + 8.77711420415*omega[9.58359] - 18.2192823111*omega[9.86024] + 13.0*omega[10.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.057104 : 0.0 : domegadt[0.057104] - (-11.0386792412*omega[0] + 8.75592397794*omega[0.057104] + 2.89194261538*omega[0.276843] - 0.8751863962*omega[0.58359] + 0.39970520794*omega[0.86024] - 0.133706163849*omega[1.0]) : 0.0 : True
+ 0.276843 : 0.0 : domegadt[0.276843] - (3.5830685225*omega[0] - 7.16138072015*omega[0.057104] + 1.80607772408*omega[0.276843] + 2.36379717607*omega[0.58359] - 0.865900780283*omega[0.86024] + 0.274338077775*omega[1.0]) : 0.0 : True
+ 0.58359 : 0.0 : domegadt[0.58359] - (-2.3441715579*omega[0] + 4.12216524624*omega[0.057104] - 4.49601712581*omega[0.276843] + 0.856765245397*omega[0.58359] + 2.51832094921*omega[0.86024] - 0.657062757134*omega[1.0]) : 0.0 : True
+ 0.86024 : 0.0 : domegadt[0.86024] - (2.28263550021*omega[0] - 3.87866321972*omega[0.057104] + 3.39315191806*omega[0.276843] - 5.18834090641*omega[0.58359] + 0.581233052581*omega[0.86024] + 2.80998365528*omega[1.0]) : 0.0 : True
+ 1.0 : 0.0 : domegadt[1.0] - (-5.0*omega[0] + 8.41242422359*omega[0.057104] - 6.97025611666*omega[0.276843] + 8.77711420415*omega[0.58359] - 18.2192823111*omega[0.86024] + 13.0*omega[1.0]) : 0.0 : True
+ 1.057104 : 0.0 : domegadt[1.057104] - (-11.0386792412*omega[1.0] + 8.75592397794*omega[1.057104] + 2.89194261538*omega[1.276843] - 0.8751863962*omega[1.58359] + 0.39970520794*omega[1.86024] - 0.133706163849*omega[2.0]) : 0.0 : True
+ 1.276843 : 0.0 : domegadt[1.276843] - (3.5830685225*omega[1.0] - 7.16138072015*omega[1.057104] + 1.80607772408*omega[1.276843] + 2.36379717607*omega[1.58359] - 0.865900780283*omega[1.86024] + 0.274338077775*omega[2.0]) : 0.0 : True
+ 1.58359 : 0.0 : domegadt[1.58359] - (-2.3441715579*omega[1.0] + 4.12216524624*omega[1.057104] - 4.49601712581*omega[1.276843] + 0.856765245397*omega[1.58359] + 2.51832094921*omega[1.86024] - 0.657062757134*omega[2.0]) : 0.0 : True
+ 1.86024 : 0.0 : domegadt[1.86024] - (2.28263550021*omega[1.0] - 3.87866321972*omega[1.057104] + 3.39315191806*omega[1.276843] - 5.18834090641*omega[1.58359] + 0.581233052581*omega[1.86024] + 2.80998365528*omega[2.0]) : 0.0 : True
+ 2.0 : 0.0 : domegadt[2.0] - (-5.0*omega[1.0] + 8.41242422359*omega[1.057104] - 6.97025611666*omega[1.276843] + 8.77711420415*omega[1.58359] - 18.2192823111*omega[1.86024] + 13.0*omega[2.0]) : 0.0 : True
+ 2.057104 : 0.0 : domegadt[2.057104] - (-11.0386792412*omega[2.0] + 8.75592397794*omega[2.057104] + 2.89194261538*omega[2.276843] - 0.8751863962*omega[2.58359] + 0.39970520794*omega[2.86024] - 0.133706163849*omega[3.0]) : 0.0 : True
+ 2.276843 : 0.0 : domegadt[2.276843] - (3.5830685225*omega[2.0] - 7.16138072015*omega[2.057104] + 1.80607772408*omega[2.276843] + 2.36379717607*omega[2.58359] - 0.865900780283*omega[2.86024] + 0.274338077775*omega[3.0]) : 0.0 : True
+ 2.58359 : 0.0 : domegadt[2.58359] - (-2.3441715579*omega[2.0] + 4.12216524624*omega[2.057104] - 4.49601712581*omega[2.276843] + 0.856765245397*omega[2.58359] + 2.51832094921*omega[2.86024] - 0.657062757134*omega[3.0]) : 0.0 : True
+ 2.86024 : 0.0 : domegadt[2.86024] - (2.28263550021*omega[2.0] - 3.87866321972*omega[2.057104] + 3.39315191806*omega[2.276843] - 5.18834090641*omega[2.58359] + 0.581233052581*omega[2.86024] + 2.80998365528*omega[3.0]) : 0.0 : True
+ 3.0 : 0.0 : domegadt[3.0] - (-5.0*omega[2.0] + 8.41242422359*omega[2.057104] - 6.97025611666*omega[2.276843] + 8.77711420415*omega[2.58359] - 18.2192823111*omega[2.86024] + 13.0*omega[3.0]) : 0.0 : True
+ 3.057104 : 0.0 : domegadt[3.057104] - (-11.0386792412*omega[3.0] + 8.75592397794*omega[3.057104] + 2.89194261538*omega[3.276843] - 0.8751863962*omega[3.58359] + 0.39970520794*omega[3.86024] - 0.133706163849*omega[4.0]) : 0.0 : True
+ 3.276843 : 0.0 : domegadt[3.276843] - (3.5830685225*omega[3.0] - 7.16138072015*omega[3.057104] + 1.80607772408*omega[3.276843] + 2.36379717607*omega[3.58359] - 0.865900780283*omega[3.86024] + 0.274338077775*omega[4.0]) : 0.0 : True
+ 3.58359 : 0.0 : domegadt[3.58359] - (-2.3441715579*omega[3.0] + 4.12216524624*omega[3.057104] - 4.49601712581*omega[3.276843] + 0.856765245397*omega[3.58359] + 2.51832094921*omega[3.86024] - 0.657062757134*omega[4.0]) : 0.0 : True
+ 3.86024 : 0.0 : domegadt[3.86024] - (2.28263550021*omega[3.0] - 3.87866321972*omega[3.057104] + 3.39315191806*omega[3.276843] - 5.18834090641*omega[3.58359] + 0.581233052581*omega[3.86024] + 2.80998365528*omega[4.0]) : 0.0 : True
+ 4.0 : 0.0 : domegadt[4.0] - (-5.0*omega[3.0] + 8.41242422359*omega[3.057104] - 6.97025611666*omega[3.276843] + 8.77711420415*omega[3.58359] - 18.2192823111*omega[3.86024] + 13.0*omega[4.0]) : 0.0 : True
+ 4.057104 : 0.0 : domegadt[4.057104] - (-11.0386792412*omega[4.0] + 8.75592397794*omega[4.057104] + 2.89194261538*omega[4.276843] - 0.8751863962*omega[4.58359] + 0.39970520794*omega[4.86024] - 0.133706163849*omega[5.0]) : 0.0 : True
+ 4.276843 : 0.0 : domegadt[4.276843] - (3.5830685225*omega[4.0] - 7.16138072015*omega[4.057104] + 1.80607772408*omega[4.276843] + 2.36379717607*omega[4.58359] - 0.865900780283*omega[4.86024] + 0.274338077775*omega[5.0]) : 0.0 : True
+ 4.58359 : 0.0 : domegadt[4.58359] - (-2.3441715579*omega[4.0] + 4.12216524624*omega[4.057104] - 4.49601712581*omega[4.276843] + 0.856765245397*omega[4.58359] + 2.51832094921*omega[4.86024] - 0.657062757134*omega[5.0]) : 0.0 : True
+ 4.86024 : 0.0 : domegadt[4.86024] - (2.28263550021*omega[4.0] - 3.87866321972*omega[4.057104] + 3.39315191806*omega[4.276843] - 5.18834090641*omega[4.58359] + 0.581233052581*omega[4.86024] + 2.80998365528*omega[5.0]) : 0.0 : True
+ 5.0 : 0.0 : domegadt[5.0] - (-5.0*omega[4.0] + 8.41242422359*omega[4.057104] - 6.97025611666*omega[4.276843] + 8.77711420415*omega[4.58359] - 18.2192823111*omega[4.86024] + 13.0*omega[5.0]) : 0.0 : True
+ 5.057104 : 0.0 : domegadt[5.057104] - (-11.0386792412*omega[5.0] + 8.75592397794*omega[5.057104] + 2.89194261538*omega[5.276843] - 0.8751863962*omega[5.58359] + 0.39970520794*omega[5.86024] - 0.133706163849*omega[6.0]) : 0.0 : True
+ 5.276843 : 0.0 : domegadt[5.276843] - (3.5830685225*omega[5.0] - 7.16138072015*omega[5.057104] + 1.80607772408*omega[5.276843] + 2.36379717607*omega[5.58359] - 0.865900780283*omega[5.86024] + 0.274338077775*omega[6.0]) : 0.0 : True
+ 5.58359 : 0.0 : domegadt[5.58359] - (-2.3441715579*omega[5.0] + 4.12216524624*omega[5.057104] - 4.49601712581*omega[5.276843] + 0.856765245397*omega[5.58359] + 2.51832094921*omega[5.86024] - 0.657062757134*omega[6.0]) : 0.0 : True
+ 5.86024 : 0.0 : domegadt[5.86024] - (2.28263550021*omega[5.0] - 3.87866321972*omega[5.057104] + 3.39315191806*omega[5.276843] - 5.18834090641*omega[5.58359] + 0.581233052581*omega[5.86024] + 2.80998365528*omega[6.0]) : 0.0 : True
+ 6.0 : 0.0 : domegadt[6.0] - (-5.0*omega[5.0] + 8.41242422359*omega[5.057104] - 6.97025611666*omega[5.276843] + 8.77711420415*omega[5.58359] - 18.2192823111*omega[5.86024] + 13.0*omega[6.0]) : 0.0 : True
+ 6.057104 : 0.0 : domegadt[6.057104] - (-11.0386792412*omega[6.0] + 8.75592397794*omega[6.057104] + 2.89194261538*omega[6.276843] - 0.8751863962*omega[6.58359] + 0.39970520794*omega[6.86024] - 0.133706163849*omega[7.0]) : 0.0 : True
+ 6.276843 : 0.0 : domegadt[6.276843] - (3.5830685225*omega[6.0] - 7.16138072015*omega[6.057104] + 1.80607772408*omega[6.276843] + 2.36379717607*omega[6.58359] - 0.865900780283*omega[6.86024] + 0.274338077775*omega[7.0]) : 0.0 : True
+ 6.58359 : 0.0 : domegadt[6.58359] - (-2.3441715579*omega[6.0] + 4.12216524624*omega[6.057104] - 4.49601712581*omega[6.276843] + 0.856765245397*omega[6.58359] + 2.51832094921*omega[6.86024] - 0.657062757134*omega[7.0]) : 0.0 : True
+ 6.86024 : 0.0 : domegadt[6.86024] - (2.28263550021*omega[6.0] - 3.87866321972*omega[6.057104] + 3.39315191806*omega[6.276843] - 5.18834090641*omega[6.58359] + 0.581233052581*omega[6.86024] + 2.80998365528*omega[7.0]) : 0.0 : True
+ 7.0 : 0.0 : domegadt[7.0] - (-5.0*omega[6.0] + 8.41242422359*omega[6.057104] - 6.97025611666*omega[6.276843] + 8.77711420415*omega[6.58359] - 18.2192823111*omega[6.86024] + 13.0*omega[7.0]) : 0.0 : True
+ 7.057104 : 0.0 : domegadt[7.057104] - (-11.0386792412*omega[7.0] + 8.75592397794*omega[7.057104] + 2.89194261538*omega[7.276843] - 0.8751863962*omega[7.58359] + 0.39970520794*omega[7.86024] - 0.133706163849*omega[8.0]) : 0.0 : True
+ 7.276843 : 0.0 : domegadt[7.276843] - (3.5830685225*omega[7.0] - 7.16138072015*omega[7.057104] + 1.80607772408*omega[7.276843] + 2.36379717607*omega[7.58359] - 0.865900780283*omega[7.86024] + 0.274338077775*omega[8.0]) : 0.0 : True
+ 7.58359 : 0.0 : domegadt[7.58359] - (-2.3441715579*omega[7.0] + 4.12216524624*omega[7.057104] - 4.49601712581*omega[7.276843] + 0.856765245397*omega[7.58359] + 2.51832094921*omega[7.86024] - 0.657062757134*omega[8.0]) : 0.0 : True
+ 7.86024 : 0.0 : domegadt[7.86024] - (2.28263550021*omega[7.0] - 3.87866321972*omega[7.057104] + 3.39315191806*omega[7.276843] - 5.18834090641*omega[7.58359] + 0.581233052581*omega[7.86024] + 2.80998365528*omega[8.0]) : 0.0 : True
+ 8.0 : 0.0 : domegadt[8.0] - (-5.0*omega[7.0] + 8.41242422359*omega[7.057104] - 6.97025611666*omega[7.276843] + 8.77711420415*omega[7.58359] - 18.2192823111*omega[7.86024] + 13.0*omega[8.0]) : 0.0 : True
+ 8.057104 : 0.0 : domegadt[8.057104] - (-11.0386792412*omega[8.0] + 8.75592397794*omega[8.057104] + 2.89194261538*omega[8.276843] - 0.8751863962*omega[8.58359] + 0.39970520794*omega[8.86024] - 0.133706163849*omega[9.0]) : 0.0 : True
+ 8.276843 : 0.0 : domegadt[8.276843] - (3.5830685225*omega[8.0] - 7.16138072015*omega[8.057104] + 1.80607772408*omega[8.276843] + 2.36379717607*omega[8.58359] - 0.865900780283*omega[8.86024] + 0.274338077775*omega[9.0]) : 0.0 : True
+ 8.58359 : 0.0 : domegadt[8.58359] - (-2.3441715579*omega[8.0] + 4.12216524624*omega[8.057104] - 4.49601712581*omega[8.276843] + 0.856765245397*omega[8.58359] + 2.51832094921*omega[8.86024] - 0.657062757134*omega[9.0]) : 0.0 : True
+ 8.86024 : 0.0 : domegadt[8.86024] - (2.28263550021*omega[8.0] - 3.87866321972*omega[8.057104] + 3.39315191806*omega[8.276843] - 5.18834090641*omega[8.58359] + 0.581233052581*omega[8.86024] + 2.80998365528*omega[9.0]) : 0.0 : True
+ 9.0 : 0.0 : domegadt[9.0] - (-5.0*omega[8.0] + 8.41242422359*omega[8.057104] - 6.97025611666*omega[8.276843] + 8.77711420415*omega[8.58359] - 18.2192823111*omega[8.86024] + 13.0*omega[9.0]) : 0.0 : True
+ 9.057104 : 0.0 : domegadt[9.057104] - (-11.0386792412*omega[9.0] + 8.75592397794*omega[9.057104] + 2.89194261538*omega[9.276843] - 0.8751863962*omega[9.58359] + 0.39970520794*omega[9.86024] - 0.133706163849*omega[10]) : 0.0 : True
+ 9.276843 : 0.0 : domegadt[9.276843] - (3.5830685225*omega[9.0] - 7.16138072015*omega[9.057104] + 1.80607772408*omega[9.276843] + 2.36379717607*omega[9.58359] - 0.865900780283*omega[9.86024] + 0.274338077775*omega[10]) : 0.0 : True
+ 9.58359 : 0.0 : domegadt[9.58359] - (-2.3441715579*omega[9.0] + 4.12216524624*omega[9.057104] - 4.49601712581*omega[9.276843] + 0.856765245397*omega[9.58359] + 2.51832094921*omega[9.86024] - 0.657062757134*omega[10]) : 0.0 : True
+ 9.86024 : 0.0 : domegadt[9.86024] - (2.28263550021*omega[9.0] - 3.87866321972*omega[9.057104] + 3.39315191806*omega[9.276843] - 5.18834090641*omega[9.58359] + 0.581233052581*omega[9.86024] + 2.80998365528*omega[10]) : 0.0 : True
+ 10 : 0.0 : domegadt[10] - (-5.0*omega[9.0] + 8.41242422359*omega[9.057104] - 6.97025611666*omega[9.276843] + 8.77711420415*omega[9.58359] - 18.2192823111*omega[9.86024] + 13.0*omega[10]) : 0.0 : True
dthetadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.057104 : 0.0 : dthetadt[0.057104] - (-11.0386792412*theta[0.0] + 8.75592397794*theta[0.057104] + 2.89194261538*theta[0.276843] - 0.8751863962*theta[0.58359] + 0.39970520794*theta[0.86024] - 0.133706163849*theta[1.0]) : 0.0 : True
- 0.276843 : 0.0 : dthetadt[0.276843] - (3.5830685225*theta[0.0] - 7.16138072015*theta[0.057104] + 1.80607772408*theta[0.276843] + 2.36379717607*theta[0.58359] - 0.865900780283*theta[0.86024] + 0.274338077775*theta[1.0]) : 0.0 : True
- 0.58359 : 0.0 : dthetadt[0.58359] - (-2.3441715579*theta[0.0] + 4.12216524624*theta[0.057104] - 4.49601712581*theta[0.276843] + 0.856765245397*theta[0.58359] + 2.51832094921*theta[0.86024] - 0.657062757134*theta[1.0]) : 0.0 : True
- 0.86024 : 0.0 : dthetadt[0.86024] - (2.28263550021*theta[0.0] - 3.87866321972*theta[0.057104] + 3.39315191806*theta[0.276843] - 5.18834090641*theta[0.58359] + 0.581233052581*theta[0.86024] + 2.80998365528*theta[1.0]) : 0.0 : True
- 1.0 : 0.0 : dthetadt[1.0] - (-5.0*theta[0.0] + 8.41242422359*theta[0.057104] - 6.97025611666*theta[0.276843] + 8.77711420415*theta[0.58359] - 18.2192823111*theta[0.86024] + 13.0*theta[1.0]) : 0.0 : True
- 1.057104 : 0.0 : dthetadt[1.057104] - (-11.0386792412*theta[1.0] + 8.75592397794*theta[1.057104] + 2.89194261538*theta[1.276843] - 0.8751863962*theta[1.58359] + 0.39970520794*theta[1.86024] - 0.133706163849*theta[2.0]) : 0.0 : True
- 1.276843 : 0.0 : dthetadt[1.276843] - (3.5830685225*theta[1.0] - 7.16138072015*theta[1.057104] + 1.80607772408*theta[1.276843] + 2.36379717607*theta[1.58359] - 0.865900780283*theta[1.86024] + 0.274338077775*theta[2.0]) : 0.0 : True
- 1.58359 : 0.0 : dthetadt[1.58359] - (-2.3441715579*theta[1.0] + 4.12216524624*theta[1.057104] - 4.49601712581*theta[1.276843] + 0.856765245397*theta[1.58359] + 2.51832094921*theta[1.86024] - 0.657062757134*theta[2.0]) : 0.0 : True
- 1.86024 : 0.0 : dthetadt[1.86024] - (2.28263550021*theta[1.0] - 3.87866321972*theta[1.057104] + 3.39315191806*theta[1.276843] - 5.18834090641*theta[1.58359] + 0.581233052581*theta[1.86024] + 2.80998365528*theta[2.0]) : 0.0 : True
- 2.0 : 0.0 : dthetadt[2.0] - (-5.0*theta[1.0] + 8.41242422359*theta[1.057104] - 6.97025611666*theta[1.276843] + 8.77711420415*theta[1.58359] - 18.2192823111*theta[1.86024] + 13.0*theta[2.0]) : 0.0 : True
- 2.057104 : 0.0 : dthetadt[2.057104] - (-11.0386792412*theta[2.0] + 8.75592397794*theta[2.057104] + 2.89194261538*theta[2.276843] - 0.8751863962*theta[2.58359] + 0.39970520794*theta[2.86024] - 0.133706163849*theta[3.0]) : 0.0 : True
- 2.276843 : 0.0 : dthetadt[2.276843] - (3.5830685225*theta[2.0] - 7.16138072015*theta[2.057104] + 1.80607772408*theta[2.276843] + 2.36379717607*theta[2.58359] - 0.865900780283*theta[2.86024] + 0.274338077775*theta[3.0]) : 0.0 : True
- 2.58359 : 0.0 : dthetadt[2.58359] - (-2.3441715579*theta[2.0] + 4.12216524624*theta[2.057104] - 4.49601712581*theta[2.276843] + 0.856765245397*theta[2.58359] + 2.51832094921*theta[2.86024] - 0.657062757134*theta[3.0]) : 0.0 : True
- 2.86024 : 0.0 : dthetadt[2.86024] - (2.28263550021*theta[2.0] - 3.87866321972*theta[2.057104] + 3.39315191806*theta[2.276843] - 5.18834090641*theta[2.58359] + 0.581233052581*theta[2.86024] + 2.80998365528*theta[3.0]) : 0.0 : True
- 3.0 : 0.0 : dthetadt[3.0] - (-5.0*theta[2.0] + 8.41242422359*theta[2.057104] - 6.97025611666*theta[2.276843] + 8.77711420415*theta[2.58359] - 18.2192823111*theta[2.86024] + 13.0*theta[3.0]) : 0.0 : True
- 3.057104 : 0.0 : dthetadt[3.057104] - (-11.0386792412*theta[3.0] + 8.75592397794*theta[3.057104] + 2.89194261538*theta[3.276843] - 0.8751863962*theta[3.58359] + 0.39970520794*theta[3.86024] - 0.133706163849*theta[4.0]) : 0.0 : True
- 3.276843 : 0.0 : dthetadt[3.276843] - (3.5830685225*theta[3.0] - 7.16138072015*theta[3.057104] + 1.80607772408*theta[3.276843] + 2.36379717607*theta[3.58359] - 0.865900780283*theta[3.86024] + 0.274338077775*theta[4.0]) : 0.0 : True
- 3.58359 : 0.0 : dthetadt[3.58359] - (-2.3441715579*theta[3.0] + 4.12216524624*theta[3.057104] - 4.49601712581*theta[3.276843] + 0.856765245397*theta[3.58359] + 2.51832094921*theta[3.86024] - 0.657062757134*theta[4.0]) : 0.0 : True
- 3.86024 : 0.0 : dthetadt[3.86024] - (2.28263550021*theta[3.0] - 3.87866321972*theta[3.057104] + 3.39315191806*theta[3.276843] - 5.18834090641*theta[3.58359] + 0.581233052581*theta[3.86024] + 2.80998365528*theta[4.0]) : 0.0 : True
- 4.0 : 0.0 : dthetadt[4.0] - (-5.0*theta[3.0] + 8.41242422359*theta[3.057104] - 6.97025611666*theta[3.276843] + 8.77711420415*theta[3.58359] - 18.2192823111*theta[3.86024] + 13.0*theta[4.0]) : 0.0 : True
- 4.057104 : 0.0 : dthetadt[4.057104] - (-11.0386792412*theta[4.0] + 8.75592397794*theta[4.057104] + 2.89194261538*theta[4.276843] - 0.8751863962*theta[4.58359] + 0.39970520794*theta[4.86024] - 0.133706163849*theta[5.0]) : 0.0 : True
- 4.276843 : 0.0 : dthetadt[4.276843] - (3.5830685225*theta[4.0] - 7.16138072015*theta[4.057104] + 1.80607772408*theta[4.276843] + 2.36379717607*theta[4.58359] - 0.865900780283*theta[4.86024] + 0.274338077775*theta[5.0]) : 0.0 : True
- 4.58359 : 0.0 : dthetadt[4.58359] - (-2.3441715579*theta[4.0] + 4.12216524624*theta[4.057104] - 4.49601712581*theta[4.276843] + 0.856765245397*theta[4.58359] + 2.51832094921*theta[4.86024] - 0.657062757134*theta[5.0]) : 0.0 : True
- 4.86024 : 0.0 : dthetadt[4.86024] - (2.28263550021*theta[4.0] - 3.87866321972*theta[4.057104] + 3.39315191806*theta[4.276843] - 5.18834090641*theta[4.58359] + 0.581233052581*theta[4.86024] + 2.80998365528*theta[5.0]) : 0.0 : True
- 5.0 : 0.0 : dthetadt[5.0] - (-5.0*theta[4.0] + 8.41242422359*theta[4.057104] - 6.97025611666*theta[4.276843] + 8.77711420415*theta[4.58359] - 18.2192823111*theta[4.86024] + 13.0*theta[5.0]) : 0.0 : True
- 5.057104 : 0.0 : dthetadt[5.057104] - (-11.0386792412*theta[5.0] + 8.75592397794*theta[5.057104] + 2.89194261538*theta[5.276843] - 0.8751863962*theta[5.58359] + 0.39970520794*theta[5.86024] - 0.133706163849*theta[6.0]) : 0.0 : True
- 5.276843 : 0.0 : dthetadt[5.276843] - (3.5830685225*theta[5.0] - 7.16138072015*theta[5.057104] + 1.80607772408*theta[5.276843] + 2.36379717607*theta[5.58359] - 0.865900780283*theta[5.86024] + 0.274338077775*theta[6.0]) : 0.0 : True
- 5.58359 : 0.0 : dthetadt[5.58359] - (-2.3441715579*theta[5.0] + 4.12216524624*theta[5.057104] - 4.49601712581*theta[5.276843] + 0.856765245397*theta[5.58359] + 2.51832094921*theta[5.86024] - 0.657062757134*theta[6.0]) : 0.0 : True
- 5.86024 : 0.0 : dthetadt[5.86024] - (2.28263550021*theta[5.0] - 3.87866321972*theta[5.057104] + 3.39315191806*theta[5.276843] - 5.18834090641*theta[5.58359] + 0.581233052581*theta[5.86024] + 2.80998365528*theta[6.0]) : 0.0 : True
- 6.0 : 0.0 : dthetadt[6.0] - (-5.0*theta[5.0] + 8.41242422359*theta[5.057104] - 6.97025611666*theta[5.276843] + 8.77711420415*theta[5.58359] - 18.2192823111*theta[5.86024] + 13.0*theta[6.0]) : 0.0 : True
- 6.057104 : 0.0 : dthetadt[6.057104] - (-11.0386792412*theta[6.0] + 8.75592397794*theta[6.057104] + 2.89194261538*theta[6.276843] - 0.8751863962*theta[6.58359] + 0.39970520794*theta[6.86024] - 0.133706163849*theta[7.0]) : 0.0 : True
- 6.276843 : 0.0 : dthetadt[6.276843] - (3.5830685225*theta[6.0] - 7.16138072015*theta[6.057104] + 1.80607772408*theta[6.276843] + 2.36379717607*theta[6.58359] - 0.865900780283*theta[6.86024] + 0.274338077775*theta[7.0]) : 0.0 : True
- 6.58359 : 0.0 : dthetadt[6.58359] - (-2.3441715579*theta[6.0] + 4.12216524624*theta[6.057104] - 4.49601712581*theta[6.276843] + 0.856765245397*theta[6.58359] + 2.51832094921*theta[6.86024] - 0.657062757134*theta[7.0]) : 0.0 : True
- 6.86024 : 0.0 : dthetadt[6.86024] - (2.28263550021*theta[6.0] - 3.87866321972*theta[6.057104] + 3.39315191806*theta[6.276843] - 5.18834090641*theta[6.58359] + 0.581233052581*theta[6.86024] + 2.80998365528*theta[7.0]) : 0.0 : True
- 7.0 : 0.0 : dthetadt[7.0] - (-5.0*theta[6.0] + 8.41242422359*theta[6.057104] - 6.97025611666*theta[6.276843] + 8.77711420415*theta[6.58359] - 18.2192823111*theta[6.86024] + 13.0*theta[7.0]) : 0.0 : True
- 7.057104 : 0.0 : dthetadt[7.057104] - (-11.0386792412*theta[7.0] + 8.75592397794*theta[7.057104] + 2.89194261538*theta[7.276843] - 0.8751863962*theta[7.58359] + 0.39970520794*theta[7.86024] - 0.133706163849*theta[8.0]) : 0.0 : True
- 7.276843 : 0.0 : dthetadt[7.276843] - (3.5830685225*theta[7.0] - 7.16138072015*theta[7.057104] + 1.80607772408*theta[7.276843] + 2.36379717607*theta[7.58359] - 0.865900780283*theta[7.86024] + 0.274338077775*theta[8.0]) : 0.0 : True
- 7.58359 : 0.0 : dthetadt[7.58359] - (-2.3441715579*theta[7.0] + 4.12216524624*theta[7.057104] - 4.49601712581*theta[7.276843] + 0.856765245397*theta[7.58359] + 2.51832094921*theta[7.86024] - 0.657062757134*theta[8.0]) : 0.0 : True
- 7.86024 : 0.0 : dthetadt[7.86024] - (2.28263550021*theta[7.0] - 3.87866321972*theta[7.057104] + 3.39315191806*theta[7.276843] - 5.18834090641*theta[7.58359] + 0.581233052581*theta[7.86024] + 2.80998365528*theta[8.0]) : 0.0 : True
- 8.0 : 0.0 : dthetadt[8.0] - (-5.0*theta[7.0] + 8.41242422359*theta[7.057104] - 6.97025611666*theta[7.276843] + 8.77711420415*theta[7.58359] - 18.2192823111*theta[7.86024] + 13.0*theta[8.0]) : 0.0 : True
- 8.057104 : 0.0 : dthetadt[8.057104] - (-11.0386792412*theta[8.0] + 8.75592397794*theta[8.057104] + 2.89194261538*theta[8.276843] - 0.8751863962*theta[8.58359] + 0.39970520794*theta[8.86024] - 0.133706163849*theta[9.0]) : 0.0 : True
- 8.276843 : 0.0 : dthetadt[8.276843] - (3.5830685225*theta[8.0] - 7.16138072015*theta[8.057104] + 1.80607772408*theta[8.276843] + 2.36379717607*theta[8.58359] - 0.865900780283*theta[8.86024] + 0.274338077775*theta[9.0]) : 0.0 : True
- 8.58359 : 0.0 : dthetadt[8.58359] - (-2.3441715579*theta[8.0] + 4.12216524624*theta[8.057104] - 4.49601712581*theta[8.276843] + 0.856765245397*theta[8.58359] + 2.51832094921*theta[8.86024] - 0.657062757134*theta[9.0]) : 0.0 : True
- 8.86024 : 0.0 : dthetadt[8.86024] - (2.28263550021*theta[8.0] - 3.87866321972*theta[8.057104] + 3.39315191806*theta[8.276843] - 5.18834090641*theta[8.58359] + 0.581233052581*theta[8.86024] + 2.80998365528*theta[9.0]) : 0.0 : True
- 9.0 : 0.0 : dthetadt[9.0] - (-5.0*theta[8.0] + 8.41242422359*theta[8.057104] - 6.97025611666*theta[8.276843] + 8.77711420415*theta[8.58359] - 18.2192823111*theta[8.86024] + 13.0*theta[9.0]) : 0.0 : True
- 9.057104 : 0.0 : dthetadt[9.057104] - (-11.0386792412*theta[9.0] + 8.75592397794*theta[9.057104] + 2.89194261538*theta[9.276843] - 0.8751863962*theta[9.58359] + 0.39970520794*theta[9.86024] - 0.133706163849*theta[10.0]) : 0.0 : True
- 9.276843 : 0.0 : dthetadt[9.276843] - (3.5830685225*theta[9.0] - 7.16138072015*theta[9.057104] + 1.80607772408*theta[9.276843] + 2.36379717607*theta[9.58359] - 0.865900780283*theta[9.86024] + 0.274338077775*theta[10.0]) : 0.0 : True
- 9.58359 : 0.0 : dthetadt[9.58359] - (-2.3441715579*theta[9.0] + 4.12216524624*theta[9.057104] - 4.49601712581*theta[9.276843] + 0.856765245397*theta[9.58359] + 2.51832094921*theta[9.86024] - 0.657062757134*theta[10.0]) : 0.0 : True
- 9.86024 : 0.0 : dthetadt[9.86024] - (2.28263550021*theta[9.0] - 3.87866321972*theta[9.057104] + 3.39315191806*theta[9.276843] - 5.18834090641*theta[9.58359] + 0.581233052581*theta[9.86024] + 2.80998365528*theta[10.0]) : 0.0 : True
- 10.0 : 0.0 : dthetadt[10.0] - (-5.0*theta[9.0] + 8.41242422359*theta[9.057104] - 6.97025611666*theta[9.276843] + 8.77711420415*theta[9.58359] - 18.2192823111*theta[9.86024] + 13.0*theta[10.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.057104 : 0.0 : dthetadt[0.057104] - (-11.0386792412*theta[0] + 8.75592397794*theta[0.057104] + 2.89194261538*theta[0.276843] - 0.8751863962*theta[0.58359] + 0.39970520794*theta[0.86024] - 0.133706163849*theta[1.0]) : 0.0 : True
+ 0.276843 : 0.0 : dthetadt[0.276843] - (3.5830685225*theta[0] - 7.16138072015*theta[0.057104] + 1.80607772408*theta[0.276843] + 2.36379717607*theta[0.58359] - 0.865900780283*theta[0.86024] + 0.274338077775*theta[1.0]) : 0.0 : True
+ 0.58359 : 0.0 : dthetadt[0.58359] - (-2.3441715579*theta[0] + 4.12216524624*theta[0.057104] - 4.49601712581*theta[0.276843] + 0.856765245397*theta[0.58359] + 2.51832094921*theta[0.86024] - 0.657062757134*theta[1.0]) : 0.0 : True
+ 0.86024 : 0.0 : dthetadt[0.86024] - (2.28263550021*theta[0] - 3.87866321972*theta[0.057104] + 3.39315191806*theta[0.276843] - 5.18834090641*theta[0.58359] + 0.581233052581*theta[0.86024] + 2.80998365528*theta[1.0]) : 0.0 : True
+ 1.0 : 0.0 : dthetadt[1.0] - (-5.0*theta[0] + 8.41242422359*theta[0.057104] - 6.97025611666*theta[0.276843] + 8.77711420415*theta[0.58359] - 18.2192823111*theta[0.86024] + 13.0*theta[1.0]) : 0.0 : True
+ 1.057104 : 0.0 : dthetadt[1.057104] - (-11.0386792412*theta[1.0] + 8.75592397794*theta[1.057104] + 2.89194261538*theta[1.276843] - 0.8751863962*theta[1.58359] + 0.39970520794*theta[1.86024] - 0.133706163849*theta[2.0]) : 0.0 : True
+ 1.276843 : 0.0 : dthetadt[1.276843] - (3.5830685225*theta[1.0] - 7.16138072015*theta[1.057104] + 1.80607772408*theta[1.276843] + 2.36379717607*theta[1.58359] - 0.865900780283*theta[1.86024] + 0.274338077775*theta[2.0]) : 0.0 : True
+ 1.58359 : 0.0 : dthetadt[1.58359] - (-2.3441715579*theta[1.0] + 4.12216524624*theta[1.057104] - 4.49601712581*theta[1.276843] + 0.856765245397*theta[1.58359] + 2.51832094921*theta[1.86024] - 0.657062757134*theta[2.0]) : 0.0 : True
+ 1.86024 : 0.0 : dthetadt[1.86024] - (2.28263550021*theta[1.0] - 3.87866321972*theta[1.057104] + 3.39315191806*theta[1.276843] - 5.18834090641*theta[1.58359] + 0.581233052581*theta[1.86024] + 2.80998365528*theta[2.0]) : 0.0 : True
+ 2.0 : 0.0 : dthetadt[2.0] - (-5.0*theta[1.0] + 8.41242422359*theta[1.057104] - 6.97025611666*theta[1.276843] + 8.77711420415*theta[1.58359] - 18.2192823111*theta[1.86024] + 13.0*theta[2.0]) : 0.0 : True
+ 2.057104 : 0.0 : dthetadt[2.057104] - (-11.0386792412*theta[2.0] + 8.75592397794*theta[2.057104] + 2.89194261538*theta[2.276843] - 0.8751863962*theta[2.58359] + 0.39970520794*theta[2.86024] - 0.133706163849*theta[3.0]) : 0.0 : True
+ 2.276843 : 0.0 : dthetadt[2.276843] - (3.5830685225*theta[2.0] - 7.16138072015*theta[2.057104] + 1.80607772408*theta[2.276843] + 2.36379717607*theta[2.58359] - 0.865900780283*theta[2.86024] + 0.274338077775*theta[3.0]) : 0.0 : True
+ 2.58359 : 0.0 : dthetadt[2.58359] - (-2.3441715579*theta[2.0] + 4.12216524624*theta[2.057104] - 4.49601712581*theta[2.276843] + 0.856765245397*theta[2.58359] + 2.51832094921*theta[2.86024] - 0.657062757134*theta[3.0]) : 0.0 : True
+ 2.86024 : 0.0 : dthetadt[2.86024] - (2.28263550021*theta[2.0] - 3.87866321972*theta[2.057104] + 3.39315191806*theta[2.276843] - 5.18834090641*theta[2.58359] + 0.581233052581*theta[2.86024] + 2.80998365528*theta[3.0]) : 0.0 : True
+ 3.0 : 0.0 : dthetadt[3.0] - (-5.0*theta[2.0] + 8.41242422359*theta[2.057104] - 6.97025611666*theta[2.276843] + 8.77711420415*theta[2.58359] - 18.2192823111*theta[2.86024] + 13.0*theta[3.0]) : 0.0 : True
+ 3.057104 : 0.0 : dthetadt[3.057104] - (-11.0386792412*theta[3.0] + 8.75592397794*theta[3.057104] + 2.89194261538*theta[3.276843] - 0.8751863962*theta[3.58359] + 0.39970520794*theta[3.86024] - 0.133706163849*theta[4.0]) : 0.0 : True
+ 3.276843 : 0.0 : dthetadt[3.276843] - (3.5830685225*theta[3.0] - 7.16138072015*theta[3.057104] + 1.80607772408*theta[3.276843] + 2.36379717607*theta[3.58359] - 0.865900780283*theta[3.86024] + 0.274338077775*theta[4.0]) : 0.0 : True
+ 3.58359 : 0.0 : dthetadt[3.58359] - (-2.3441715579*theta[3.0] + 4.12216524624*theta[3.057104] - 4.49601712581*theta[3.276843] + 0.856765245397*theta[3.58359] + 2.51832094921*theta[3.86024] - 0.657062757134*theta[4.0]) : 0.0 : True
+ 3.86024 : 0.0 : dthetadt[3.86024] - (2.28263550021*theta[3.0] - 3.87866321972*theta[3.057104] + 3.39315191806*theta[3.276843] - 5.18834090641*theta[3.58359] + 0.581233052581*theta[3.86024] + 2.80998365528*theta[4.0]) : 0.0 : True
+ 4.0 : 0.0 : dthetadt[4.0] - (-5.0*theta[3.0] + 8.41242422359*theta[3.057104] - 6.97025611666*theta[3.276843] + 8.77711420415*theta[3.58359] - 18.2192823111*theta[3.86024] + 13.0*theta[4.0]) : 0.0 : True
+ 4.057104 : 0.0 : dthetadt[4.057104] - (-11.0386792412*theta[4.0] + 8.75592397794*theta[4.057104] + 2.89194261538*theta[4.276843] - 0.8751863962*theta[4.58359] + 0.39970520794*theta[4.86024] - 0.133706163849*theta[5.0]) : 0.0 : True
+ 4.276843 : 0.0 : dthetadt[4.276843] - (3.5830685225*theta[4.0] - 7.16138072015*theta[4.057104] + 1.80607772408*theta[4.276843] + 2.36379717607*theta[4.58359] - 0.865900780283*theta[4.86024] + 0.274338077775*theta[5.0]) : 0.0 : True
+ 4.58359 : 0.0 : dthetadt[4.58359] - (-2.3441715579*theta[4.0] + 4.12216524624*theta[4.057104] - 4.49601712581*theta[4.276843] + 0.856765245397*theta[4.58359] + 2.51832094921*theta[4.86024] - 0.657062757134*theta[5.0]) : 0.0 : True
+ 4.86024 : 0.0 : dthetadt[4.86024] - (2.28263550021*theta[4.0] - 3.87866321972*theta[4.057104] + 3.39315191806*theta[4.276843] - 5.18834090641*theta[4.58359] + 0.581233052581*theta[4.86024] + 2.80998365528*theta[5.0]) : 0.0 : True
+ 5.0 : 0.0 : dthetadt[5.0] - (-5.0*theta[4.0] + 8.41242422359*theta[4.057104] - 6.97025611666*theta[4.276843] + 8.77711420415*theta[4.58359] - 18.2192823111*theta[4.86024] + 13.0*theta[5.0]) : 0.0 : True
+ 5.057104 : 0.0 : dthetadt[5.057104] - (-11.0386792412*theta[5.0] + 8.75592397794*theta[5.057104] + 2.89194261538*theta[5.276843] - 0.8751863962*theta[5.58359] + 0.39970520794*theta[5.86024] - 0.133706163849*theta[6.0]) : 0.0 : True
+ 5.276843 : 0.0 : dthetadt[5.276843] - (3.5830685225*theta[5.0] - 7.16138072015*theta[5.057104] + 1.80607772408*theta[5.276843] + 2.36379717607*theta[5.58359] - 0.865900780283*theta[5.86024] + 0.274338077775*theta[6.0]) : 0.0 : True
+ 5.58359 : 0.0 : dthetadt[5.58359] - (-2.3441715579*theta[5.0] + 4.12216524624*theta[5.057104] - 4.49601712581*theta[5.276843] + 0.856765245397*theta[5.58359] + 2.51832094921*theta[5.86024] - 0.657062757134*theta[6.0]) : 0.0 : True
+ 5.86024 : 0.0 : dthetadt[5.86024] - (2.28263550021*theta[5.0] - 3.87866321972*theta[5.057104] + 3.39315191806*theta[5.276843] - 5.18834090641*theta[5.58359] + 0.581233052581*theta[5.86024] + 2.80998365528*theta[6.0]) : 0.0 : True
+ 6.0 : 0.0 : dthetadt[6.0] - (-5.0*theta[5.0] + 8.41242422359*theta[5.057104] - 6.97025611666*theta[5.276843] + 8.77711420415*theta[5.58359] - 18.2192823111*theta[5.86024] + 13.0*theta[6.0]) : 0.0 : True
+ 6.057104 : 0.0 : dthetadt[6.057104] - (-11.0386792412*theta[6.0] + 8.75592397794*theta[6.057104] + 2.89194261538*theta[6.276843] - 0.8751863962*theta[6.58359] + 0.39970520794*theta[6.86024] - 0.133706163849*theta[7.0]) : 0.0 : True
+ 6.276843 : 0.0 : dthetadt[6.276843] - (3.5830685225*theta[6.0] - 7.16138072015*theta[6.057104] + 1.80607772408*theta[6.276843] + 2.36379717607*theta[6.58359] - 0.865900780283*theta[6.86024] + 0.274338077775*theta[7.0]) : 0.0 : True
+ 6.58359 : 0.0 : dthetadt[6.58359] - (-2.3441715579*theta[6.0] + 4.12216524624*theta[6.057104] - 4.49601712581*theta[6.276843] + 0.856765245397*theta[6.58359] + 2.51832094921*theta[6.86024] - 0.657062757134*theta[7.0]) : 0.0 : True
+ 6.86024 : 0.0 : dthetadt[6.86024] - (2.28263550021*theta[6.0] - 3.87866321972*theta[6.057104] + 3.39315191806*theta[6.276843] - 5.18834090641*theta[6.58359] + 0.581233052581*theta[6.86024] + 2.80998365528*theta[7.0]) : 0.0 : True
+ 7.0 : 0.0 : dthetadt[7.0] - (-5.0*theta[6.0] + 8.41242422359*theta[6.057104] - 6.97025611666*theta[6.276843] + 8.77711420415*theta[6.58359] - 18.2192823111*theta[6.86024] + 13.0*theta[7.0]) : 0.0 : True
+ 7.057104 : 0.0 : dthetadt[7.057104] - (-11.0386792412*theta[7.0] + 8.75592397794*theta[7.057104] + 2.89194261538*theta[7.276843] - 0.8751863962*theta[7.58359] + 0.39970520794*theta[7.86024] - 0.133706163849*theta[8.0]) : 0.0 : True
+ 7.276843 : 0.0 : dthetadt[7.276843] - (3.5830685225*theta[7.0] - 7.16138072015*theta[7.057104] + 1.80607772408*theta[7.276843] + 2.36379717607*theta[7.58359] - 0.865900780283*theta[7.86024] + 0.274338077775*theta[8.0]) : 0.0 : True
+ 7.58359 : 0.0 : dthetadt[7.58359] - (-2.3441715579*theta[7.0] + 4.12216524624*theta[7.057104] - 4.49601712581*theta[7.276843] + 0.856765245397*theta[7.58359] + 2.51832094921*theta[7.86024] - 0.657062757134*theta[8.0]) : 0.0 : True
+ 7.86024 : 0.0 : dthetadt[7.86024] - (2.28263550021*theta[7.0] - 3.87866321972*theta[7.057104] + 3.39315191806*theta[7.276843] - 5.18834090641*theta[7.58359] + 0.581233052581*theta[7.86024] + 2.80998365528*theta[8.0]) : 0.0 : True
+ 8.0 : 0.0 : dthetadt[8.0] - (-5.0*theta[7.0] + 8.41242422359*theta[7.057104] - 6.97025611666*theta[7.276843] + 8.77711420415*theta[7.58359] - 18.2192823111*theta[7.86024] + 13.0*theta[8.0]) : 0.0 : True
+ 8.057104 : 0.0 : dthetadt[8.057104] - (-11.0386792412*theta[8.0] + 8.75592397794*theta[8.057104] + 2.89194261538*theta[8.276843] - 0.8751863962*theta[8.58359] + 0.39970520794*theta[8.86024] - 0.133706163849*theta[9.0]) : 0.0 : True
+ 8.276843 : 0.0 : dthetadt[8.276843] - (3.5830685225*theta[8.0] - 7.16138072015*theta[8.057104] + 1.80607772408*theta[8.276843] + 2.36379717607*theta[8.58359] - 0.865900780283*theta[8.86024] + 0.274338077775*theta[9.0]) : 0.0 : True
+ 8.58359 : 0.0 : dthetadt[8.58359] - (-2.3441715579*theta[8.0] + 4.12216524624*theta[8.057104] - 4.49601712581*theta[8.276843] + 0.856765245397*theta[8.58359] + 2.51832094921*theta[8.86024] - 0.657062757134*theta[9.0]) : 0.0 : True
+ 8.86024 : 0.0 : dthetadt[8.86024] - (2.28263550021*theta[8.0] - 3.87866321972*theta[8.057104] + 3.39315191806*theta[8.276843] - 5.18834090641*theta[8.58359] + 0.581233052581*theta[8.86024] + 2.80998365528*theta[9.0]) : 0.0 : True
+ 9.0 : 0.0 : dthetadt[9.0] - (-5.0*theta[8.0] + 8.41242422359*theta[8.057104] - 6.97025611666*theta[8.276843] + 8.77711420415*theta[8.58359] - 18.2192823111*theta[8.86024] + 13.0*theta[9.0]) : 0.0 : True
+ 9.057104 : 0.0 : dthetadt[9.057104] - (-11.0386792412*theta[9.0] + 8.75592397794*theta[9.057104] + 2.89194261538*theta[9.276843] - 0.8751863962*theta[9.58359] + 0.39970520794*theta[9.86024] - 0.133706163849*theta[10]) : 0.0 : True
+ 9.276843 : 0.0 : dthetadt[9.276843] - (3.5830685225*theta[9.0] - 7.16138072015*theta[9.057104] + 1.80607772408*theta[9.276843] + 2.36379717607*theta[9.58359] - 0.865900780283*theta[9.86024] + 0.274338077775*theta[10]) : 0.0 : True
+ 9.58359 : 0.0 : dthetadt[9.58359] - (-2.3441715579*theta[9.0] + 4.12216524624*theta[9.057104] - 4.49601712581*theta[9.276843] + 0.856765245397*theta[9.58359] + 2.51832094921*theta[9.86024] - 0.657062757134*theta[10]) : 0.0 : True
+ 9.86024 : 0.0 : dthetadt[9.86024] - (2.28263550021*theta[9.0] - 3.87866321972*theta[9.057104] + 3.39315191806*theta[9.276843] - 5.18834090641*theta[9.58359] + 0.581233052581*theta[9.86024] + 2.80998365528*theta[10]) : 0.0 : True
+ 10 : 0.0 : dthetadt[10] - (-5.0*theta[9.0] + 8.41242422359*theta[9.057104] - 6.97025611666*theta[9.276843] + 8.77711420415*theta[9.58359] - 18.2192823111*theta[9.86024] + 13.0*theta[10]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 10.0)
- [0.0, 0.057104, 0.276843, 0.58359, 0.86024, 1.0, 1.057104, 1.276843, 1.58359, 1.86024, 2.0, 2.057104, 2.276843, 2.58359, 2.86024, 3.0, 3.057104, 3.276843, 3.58359, 3.86024, 4.0, 4.057104, 4.276843, 4.58359, 4.86024, 5.0, 5.057104, 5.276843, 5.58359, 5.86024, 6.0, 6.057104, 6.276843, 6.58359, 6.86024, 7.0, 7.057104, 7.276843, 7.58359, 7.86024, 8.0, 8.057104, 8.276843, 8.58359, 8.86024, 9.0, 9.057104, 9.276843, 9.58359, 9.86024, 10.0]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..10.0] : 51 : {0, 0.057104, 0.276843, 0.58359, 0.86024, 1.0, 1.057104, 1.276843, 1.58359, 1.86024, 2.0, 2.057104, 2.276843, 2.58359, 2.86024, 3.0, 3.057104, 3.276843, 3.58359, 3.86024, 4.0, 4.057104, 4.276843, 4.58359, 4.86024, 5.0, 5.057104, 5.276843, 5.58359, 5.86024, 6.0, 6.057104, 6.276843, 6.58359, 6.86024, 7.0, 7.057104, 7.276843, 7.58359, 7.86024, 8.0, 8.057104, 8.276843, 8.58359, 8.86024, 9.0, 9.057104, 9.276843, 9.58359, 9.86024, 10}
-11 Declarations: t b c omega theta domegadt dthetadt diffeq1 diffeq2 domegadt_disc_eq dthetadt_disc_eq
+12 Declarations: t_domain t b c omega theta domegadt dthetadt diffeq1 diffeq2 domegadt_disc_eq dthetadt_disc_eq
[[ 0.0000 3.0400]
[-0.0510 3.0374]
[-0.1033 3.0297]
diff --git a/pyomo/dae/tests/simulator_ode_example.scipy.txt b/pyomo/dae/tests/simulator_ode_example.scipy.txt
index ae7a0137bbe..68e840476c5 100644
--- a/pyomo/dae/tests/simulator_ode_example.scipy.txt
+++ b/pyomo/dae/tests/simulator_ode_example.scipy.txt
@@ -1,3 +1,8 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 10)
+ Key : Finite : Members
+ None : False : [0.0..10.0]
+
2 Param Declarations
b : Size=1, Index=None, Domain=Any, Default=None, Mutable=False
Key : Value
@@ -9,7 +14,7 @@
4 Var Declarations
domegadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.057104 : None : None : None : False : True : Reals
0.276843 : None : None : None : False : True : Reals
0.58359 : None : None : None : False : True : Reals
@@ -59,10 +64,10 @@
9.276843 : None : None : None : False : True : Reals
9.58359 : None : None : None : False : True : Reals
9.86024 : None : None : None : False : True : Reals
- 10.0 : None : None : None : False : True : Reals
+ 10 : None : None : None : False : True : Reals
dthetadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.057104 : None : None : None : False : True : Reals
0.276843 : None : None : None : False : True : Reals
0.58359 : None : None : None : False : True : Reals
@@ -112,118 +117,118 @@
9.276843 : None : None : None : False : True : Reals
9.58359 : None : None : None : False : True : Reals
9.86024 : None : None : None : False : True : Reals
- 10.0 : None : None : None : False : True : Reals
+ 10 : None : None : None : False : True : Reals
omega : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.057104 : None : -0.0288367678571954 : None : False : False : Reals
- 0.276843 : None : -0.14493547297962298 : None : False : False : Reals
- 0.58359 : None : -0.3609255484078798 : None : False : False : Reals
- 0.86024 : None : -0.6817556426373107 : None : False : False : Reals
- 1.0 : None : -0.9165828791830309 : None : False : False : Reals
- 1.057104 : None : -1.0363871213806717 : None : False : False : Reals
- 1.276843 : None : -1.6144692926364337 : None : False : False : Reals
- 1.58359 : None : -2.778634680075688 : None : False : False : Reals
- 1.86024 : None : -3.79648284775337 : None : False : False : Reals
- 2.0 : None : -3.988796465928554 : None : False : False : Reals
- 2.057104 : None : -3.9536231125313166 : None : False : False : Reals
- 2.276843 : None : -3.3125752076882047 : None : False : False : Reals
- 2.58359 : None : -1.710012896601475 : None : False : False : Reals
- 2.86024 : None : -0.2915045209866684 : None : False : False : Reals
- 3.0 : None : 0.372878210487349 : None : False : False : Reals
- 3.057104 : None : 0.6398631497543629 : None : False : False : Reals
- 3.276843 : None : 1.6564515713044317 : None : False : False : Reals
- 3.58359 : None : 2.8995747952012128 : None : False : False : Reals
- 3.86024 : None : 3.2569501545230115 : None : False : False : Reals
- 4.0 : None : 2.9887944400187783 : None : False : False : Reals
- 4.057104 : None : 2.8125811174423694 : None : False : False : Reals
- 4.276843 : None : 1.8444715230192887 : None : False : False : Reals
- 4.58359 : None : 0.2834252108238402 : None : False : False : Reals
- 4.86024 : None : -1.057993134080183 : None : False : False : Reals
- 5.0 : None : -1.6693288967205664 : None : False : False : Reals
- 5.057104 : None : -1.9022308408799535 : None : False : False : Reals
- 5.276843 : None : -2.558937531652716 : None : False : False : Reals
- 5.58359 : None : -2.563725675578094 : None : False : False : Reals
- 5.86024 : None : -1.6663859280776347 : None : False : False : Reals
- 6.0 : None : -1.0411834953693138 : None : False : False : Reals
- 6.057104 : None : -0.7768663909741436 : None : False : False : Reals
- 6.276843 : None : 0.24324394670869176 : None : False : False : Reals
- 6.58359 : None : 1.506899152287275 : None : False : False : Reals
- 6.86024 : None : 2.196866144117729 : None : False : False : Reals
- 7.0 : None : 2.2538917941075023 : None : False : False : Reals
- 7.057104 : None : 2.222980613588319 : None : False : False : Reals
- 7.276843 : None : 1.782173406282398 : None : False : False : Reals
- 7.58359 : None : 0.6507870709652979 : None : False : False : Reals
- 7.86024 : None : -0.4759762642191314 : None : False : False : Reals
- 8.0 : None : -0.9894403155920486 : None : False : False : Reals
- 8.057104 : None : -1.177752596518665 : None : False : False : Reals
- 8.276843 : None : -1.7285935472347709 : None : False : False : Reals
- 8.58359 : None : -1.8278759157066458 : None : False : False : Reals
- 8.86024 : None : -1.23257470405372 : None : False : False : Reals
- 9.0 : None : -0.7843117915439277 : None : False : False : Reals
- 9.057104 : None : -0.5841118111548802 : None : False : False : Reals
- 9.276843 : None : 0.19689143410950405 : None : False : False : Reals
- 9.58359 : None : 1.1336067750399608 : None : False : False : Reals
- 9.86024 : None : 1.5549500931784928 : None : False : False : Reals
- 10.0 : None : 1.563856037467057 : None : False : False : Reals
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.057104 : None : -0.02883676785719472 : None : False : False : Reals
+ 0.276843 : None : -0.1449354729796208 : None : False : False : Reals
+ 0.58359 : None : -0.36092554840808805 : None : False : False : Reals
+ 0.86024 : None : -0.6817556426372613 : None : False : False : Reals
+ 1.0 : None : -0.9165828791824202 : None : False : False : Reals
+ 1.057104 : None : -1.0363871213795395 : None : False : False : Reals
+ 1.276843 : None : -1.6144692926327497 : None : False : False : Reals
+ 1.58359 : None : -2.7786346800683424 : None : False : False : Reals
+ 1.86024 : None : -3.796482847733081 : None : False : False : Reals
+ 2.0 : None : -3.988796465909305 : None : False : False : Reals
+ 2.057104 : None : -3.953623112513651 : None : False : False : Reals
+ 2.276843 : None : -3.312575207699946 : None : False : False : Reals
+ 2.58359 : None : -1.7100128965925252 : None : False : False : Reals
+ 2.86024 : None : -0.291504520974184 : None : False : False : Reals
+ 3.0 : None : 0.3728782105020554 : None : False : False : Reals
+ 3.057104 : None : 0.6398631497703713 : None : False : False : Reals
+ 3.276843 : None : 1.656451571323684 : None : False : False : Reals
+ 3.58359 : None : 2.8995747952055715 : None : False : False : Reals
+ 3.86024 : None : 3.2569501544942683 : None : False : False : Reals
+ 4.0 : None : 2.988794439977374 : None : False : False : Reals
+ 4.057104 : None : 2.8125811173968605 : None : False : False : Reals
+ 4.276843 : None : 1.8444715229742499 : None : False : False : Reals
+ 4.58359 : None : 0.28342521078447 : None : False : False : Reals
+ 4.86024 : None : -1.057993134114566 : None : False : False : Reals
+ 5.0 : None : -1.6693288967500712 : None : False : False : Reals
+ 5.057104 : None : -1.9022308409070277 : None : False : False : Reals
+ 5.276843 : None : -2.558937531651821 : None : False : False : Reals
+ 5.58359 : None : -2.563725675534555 : None : False : False : Reals
+ 5.86024 : None : -1.666385928075673 : None : False : False : Reals
+ 6.0 : None : -1.0411834953356252 : None : False : False : Reals
+ 6.057104 : None : -0.7768663911814562 : None : False : False : Reals
+ 6.276843 : None : 0.243243946735828 : None : False : False : Reals
+ 6.58359 : None : 1.5068991522156376 : None : False : False : Reals
+ 6.86024 : None : 2.1968661443753326 : None : False : False : Reals
+ 7.0 : None : 2.253891794473088 : None : False : False : Reals
+ 7.057104 : None : 2.2229806137680086 : None : False : False : Reals
+ 7.276843 : None : 1.7821734064935248 : None : False : False : Reals
+ 7.58359 : None : 0.6507870614236346 : None : False : False : Reals
+ 7.86024 : None : -0.47597626756085787 : None : False : False : Reals
+ 8.0 : None : -0.9894403051763665 : None : False : False : Reals
+ 8.057104 : None : -1.1777525964021447 : None : False : False : Reals
+ 8.276843 : None : -1.7285936008830658 : None : False : False : Reals
+ 8.58359 : None : -1.8278759270408607 : None : False : False : Reals
+ 8.86024 : None : -1.2325741860581525 : None : False : False : Reals
+ 9.0 : None : -0.784311239002578 : None : False : False : Reals
+ 9.057104 : None : -0.5841111859804913 : None : False : False : Reals
+ 9.276843 : None : 0.19689205091208473 : None : False : False : Reals
+ 9.58359 : None : 1.1336071338525238 : None : False : False : Reals
+ 9.86024 : None : 1.5549485047543452 : None : False : False : Reals
+ 10 : None : 1.563854769919517 : None : False : False : Reals
theta : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 3.04 : None : False : False : Reals
+ 0 : None : 3.04 : None : False : False : Reals
0.057104 : None : 3.038543631896333 : None : False : False : Reals
- 0.276843 : None : 3.019851836127963 : None : False : False : Reals
- 0.58359 : None : 2.9447977576344537 : None : False : False : Reals
- 0.86024 : None : 2.8038967152002052 : None : False : False : Reals
- 1.0 : None : 2.6943413502975 : None : False : False : Reals
- 1.057104 : None : 2.63693317455725 : None : False : False : Reals
- 1.276843 : None : 2.3499919922857972 : None : False : False : Reals
- 1.58359 : None : 1.6854393888932513 : None : False : False : Reals
- 1.86024 : None : 0.7643416268571628 : None : False : False : Reals
- 2.0 : None : 0.21657601660924686 : None : False : False : Reals
- 2.057104 : None : -0.009778463548521271 : None : False : False : Reals
- 2.276843 : None : -0.8219405400650371 : None : False : False : Reals
- 2.58359 : None : -1.5993955406633624 : None : False : False : Reals
- 2.86024 : None : -1.8732873237018672 : None : False : False : Reals
- 3.0 : None : -1.8674652828919989 : None : False : False : Reals
- 3.057104 : None : -1.8389137415633605 : None : False : False : Reals
- 3.276843 : None : -1.5850889936130301 : None : False : False : Reals
- 3.58359 : None : -0.874855651835759 : None : False : False : Reals
- 3.86024 : None : 0.0011225739576981725 : None : False : False : Reals
- 4.0 : None : 0.4405829141908524 : None : False : False : Reals
- 4.057104 : None : 0.6078231037212737 : None : False : False : Reals
- 4.276843 : None : 1.123114207698621 : None : False : False : Reals
- 4.58359 : None : 1.449545499460466 : None : False : False : Reals
- 4.86024 : None : 1.3429971960922067 : None : False : False : Reals
- 5.0 : None : 1.1482738190342978 : None : False : False : Reals
- 5.057104 : None : 1.0501451604025767 : None : False : False : Reals
- 5.276843 : None : 0.5504809721319138 : None : False : False : Reals
- 5.58359 : None : -0.266430877472533 : None : False : False : Reals
- 5.86024 : None : -0.8695545274350406 : None : False : False : Reals
- 6.0 : None : -1.0542034315209994 : None : False : False : Reals
- 6.057104 : None : -1.1110499283931439 : None : False : False : Reals
- 6.276843 : None : -1.1670359646194137 : None : False : False : Reals
- 6.58359 : None : -0.8900857462259287 : None : False : False : Reals
- 6.86024 : None : -0.36389968565759245 : None : False : False : Reals
- 7.0 : None : -0.050331438008618265 : None : False : False : Reals
- 7.057104 : None : 0.07829024490877882 : None : False : False : Reals
- 7.276843 : None : 0.5271104849972925 : None : False : False : Reals
- 7.58359 : None : 0.9084843557947639 : None : False : False : Reals
- 7.86024 : None : 0.929574195290587 : None : False : False : Reals
- 8.0 : None : 0.8263373164710168 : None : False : False : Reals
- 8.057104 : None : 0.7639038523119559 : None : False : False : Reals
- 8.276843 : None : 0.44035655257316964 : None : False : False : Reals
- 8.58359 : None : -0.1271522815342985 : None : False : False : Reals
- 8.86024 : None : -0.5604937275209211 : None : False : False : Reals
- 9.0 : None : -0.7039923793461461 : None : False : False : Reals
- 9.057104 : None : -0.7407646255052835 : None : False : False : Reals
- 9.276843 : None : -0.7847991880652745 : None : False : False : Reals
- 9.58359 : None : -0.5728724256003687 : None : False : False : Reals
- 9.86024 : None : -0.18786346624234584 : None : False : False : Reals
- 10.0 : None : 0.03179163438205111 : None : False : False : Reals
+ 0.276843 : None : 3.019851836127969 : None : False : False : Reals
+ 0.58359 : None : 2.9447977576351585 : None : False : False : Reals
+ 0.86024 : None : 2.80389671520199 : None : False : False : Reals
+ 1.0 : None : 2.694341350299097 : None : False : False : Reals
+ 1.057104 : None : 2.6369331745589455 : None : False : False : Reals
+ 1.276843 : None : 2.3499919922881487 : None : False : False : Reals
+ 1.58359 : None : 1.6854393888968824 : None : False : False : Reals
+ 1.86024 : None : 0.7643416268649684 : None : False : False : Reals
+ 2.0 : None : 0.2165760166194643 : None : False : False : Reals
+ 2.057104 : None : -0.009778463537462978 : None : False : False : Reals
+ 2.276843 : None : -0.8219405400526596 : None : False : False : Reals
+ 2.58359 : None : -1.5993955406503764 : None : False : False : Reals
+ 2.86024 : None : -1.8732873236863297 : None : False : False : Reals
+ 3.0 : None : -1.867465282874202 : None : False : False : Reals
+ 3.057104 : None : -1.8389137415444607 : None : False : False : Reals
+ 3.276843 : None : -1.5850889935889494 : None : False : False : Reals
+ 3.58359 : None : -0.8748556518076085 : None : False : False : Reals
+ 3.86024 : None : 0.001122573982069705 : None : False : False : Reals
+ 4.0 : None : 0.4405829142101789 : None : False : False : Reals
+ 4.057104 : None : 0.6078231037380178 : None : False : False : Reals
+ 4.276843 : None : 1.1231142077038074 : None : False : False : Reals
+ 4.58359 : None : 1.4495454994529584 : None : False : False : Reals
+ 4.86024 : None : 1.3429971960743747 : None : False : False : Reals
+ 5.0 : None : 1.1482738190118214 : None : False : False : Reals
+ 5.057104 : None : 1.0501451603782952 : None : False : False : Reals
+ 5.276843 : None : 0.5504809721035704 : None : False : False : Reals
+ 5.58359 : None : -0.26643087749320626 : None : False : False : Reals
+ 5.86024 : None : -0.8695545274381273 : None : False : False : Reals
+ 6.0 : None : -1.0542034315250488 : None : False : False : Reals
+ 6.057104 : None : -1.1110499283879802 : None : False : False : Reals
+ 6.276843 : None : -1.1670359646273178 : None : False : False : Reals
+ 6.58359 : None : -0.8900857462577395 : None : False : False : Reals
+ 6.86024 : None : -0.36389968565637654 : None : False : False : Reals
+ 7.0 : None : -0.05033143788272976 : None : False : False : Reals
+ 7.057104 : None : 0.07829024506973595 : None : False : False : Reals
+ 7.276843 : None : 0.5271104854350149 : None : False : False : Reals
+ 7.58359 : None : 0.9084843573873719 : None : False : False : Reals
+ 7.86024 : None : 0.929574194942734 : None : False : False : Reals
+ 8.0 : None : 0.8263373192924706 : None : False : False : Reals
+ 8.057104 : None : 0.7639038563509183 : None : False : False : Reals
+ 8.276843 : None : 0.4403565503250918 : None : False : False : Reals
+ 8.58359 : None : -0.12715230294107133 : None : False : False : Reals
+ 8.86024 : None : -0.5604937183340744 : None : False : False : Reals
+ 9.0 : None : -0.7039921764767408 : None : False : False : Reals
+ 9.057104 : None : -0.7407643983069807 : None : False : False : Reals
+ 9.276843 : None : -0.7847988403007271 : None : False : False : Reals
+ 9.58359 : None : -0.5728719467649149 : None : False : False : Reals
+ 9.86024 : None : -0.18786344902572952 : None : False : False : Reals
+ 10 : None : 0.03179123130112051 : None : False : False : Reals
4 Constraint Declarations
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : domegadt[0.0] - (-0.25*omega[0.0] - 5.0*sin(theta[0.0])) : 0.0 : True
+ 0 : 0.0 : domegadt[0] - (-0.25*omega[0] - 5.0*sin(theta[0])) : 0.0 : True
0.057104 : 0.0 : domegadt[0.057104] - (-0.25*omega[0.057104] - 5.0*sin(theta[0.057104])) : 0.0 : True
0.276843 : 0.0 : domegadt[0.276843] - (-0.25*omega[0.276843] - 5.0*sin(theta[0.276843])) : 0.0 : True
0.58359 : 0.0 : domegadt[0.58359] - (-0.25*omega[0.58359] - 5.0*sin(theta[0.58359])) : 0.0 : True
@@ -273,10 +278,10 @@
9.276843 : 0.0 : domegadt[9.276843] - (-0.25*omega[9.276843] - 5.0*sin(theta[9.276843])) : 0.0 : True
9.58359 : 0.0 : domegadt[9.58359] - (-0.25*omega[9.58359] - 5.0*sin(theta[9.58359])) : 0.0 : True
9.86024 : 0.0 : domegadt[9.86024] - (-0.25*omega[9.86024] - 5.0*sin(theta[9.86024])) : 0.0 : True
- 10.0 : 0.0 : domegadt[10.0] - (-0.25*omega[10.0] - 5.0*sin(theta[10.0])) : 0.0 : True
+ 10 : 0.0 : domegadt[10] - (-0.25*omega[10] - 5.0*sin(theta[10])) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dthetadt[0.0] - omega[0.0] : 0.0 : True
+ 0 : 0.0 : dthetadt[0] - omega[0] : 0.0 : True
0.057104 : 0.0 : dthetadt[0.057104] - omega[0.057104] : 0.0 : True
0.276843 : 0.0 : dthetadt[0.276843] - omega[0.276843] : 0.0 : True
0.58359 : 0.0 : dthetadt[0.58359] - omega[0.58359] : 0.0 : True
@@ -326,117 +331,118 @@
9.276843 : 0.0 : dthetadt[9.276843] - omega[9.276843] : 0.0 : True
9.58359 : 0.0 : dthetadt[9.58359] - omega[9.58359] : 0.0 : True
9.86024 : 0.0 : dthetadt[9.86024] - omega[9.86024] : 0.0 : True
- 10.0 : 0.0 : dthetadt[10.0] - omega[10.0] : 0.0 : True
+ 10 : 0.0 : dthetadt[10] - omega[10] : 0.0 : True
domegadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.057104 : 0.0 : domegadt[0.057104] - (-11.038679241208952*omega[0.0] + 8.755923977938355*omega[0.057104] + 2.8919426153801258*omega[0.276843] - 0.87518639620027*omega[0.58359] + 0.39970520793996167*omega[0.86024] - 0.13370616384921521*omega[1.0]) : 0.0 : True
- 0.276843 : 0.0 : domegadt[0.276843] - (3.5830685225010477*omega[0.0] - 7.161380720145321*omega[0.057104] + 1.8060777240835826*omega[0.276843] + 2.3637971760686236*omega[0.58359] - 0.8659007802831209*omega[0.86024] + 0.274338077775192*omega[1.0]) : 0.0 : True
- 0.58359 : 0.0 : domegadt[0.58359] - (-2.3441715579038664*omega[0.0] + 4.122165246243398*omega[0.057104] - 4.496017125813501*omega[0.276843] + 0.8567652453972836*omega[0.58359] + 2.518320949211015*omega[0.86024] - 0.657062757134355*omega[1.0]) : 0.0 : True
- 0.86024 : 0.0 : domegadt[0.86024] - (2.282635500205682*omega[0.0] - 3.8786632197240785*omega[0.057104] + 3.3931519180649445*omega[0.276843] - 5.188340906407153*omega[0.58359] + 0.5812330525807557*omega[0.86024] + 2.8099836552797197*omega[1.0]) : 0.0 : True
- 1.0 : 0.0 : domegadt[1.0] - (-4.999999999999989*omega[0.0] + 8.412424223594346*omega[0.057104] - 6.970256116656801*omega[0.276843] + 8.777114204150497*omega[0.58359] - 18.219282311088037*omega[0.86024] + 12.99999999999998*omega[1.0]) : 0.0 : True
- 1.057104 : 0.0 : domegadt[1.057104] - (-11.038679241208952*omega[1.0] + 8.755923977938355*omega[1.057104] + 2.8919426153801258*omega[1.276843] - 0.87518639620027*omega[1.58359] + 0.39970520793996167*omega[1.86024] - 0.13370616384921521*omega[2.0]) : 0.0 : True
- 1.276843 : 0.0 : domegadt[1.276843] - (3.5830685225010477*omega[1.0] - 7.161380720145321*omega[1.057104] + 1.8060777240835826*omega[1.276843] + 2.3637971760686236*omega[1.58359] - 0.8659007802831209*omega[1.86024] + 0.274338077775192*omega[2.0]) : 0.0 : True
- 1.58359 : 0.0 : domegadt[1.58359] - (-2.3441715579038664*omega[1.0] + 4.122165246243398*omega[1.057104] - 4.496017125813501*omega[1.276843] + 0.8567652453972836*omega[1.58359] + 2.518320949211015*omega[1.86024] - 0.657062757134355*omega[2.0]) : 0.0 : True
- 1.86024 : 0.0 : domegadt[1.86024] - (2.282635500205682*omega[1.0] - 3.8786632197240785*omega[1.057104] + 3.3931519180649445*omega[1.276843] - 5.188340906407153*omega[1.58359] + 0.5812330525807557*omega[1.86024] + 2.8099836552797197*omega[2.0]) : 0.0 : True
- 2.0 : 0.0 : domegadt[2.0] - (-4.999999999999989*omega[1.0] + 8.412424223594346*omega[1.057104] - 6.970256116656801*omega[1.276843] + 8.777114204150497*omega[1.58359] - 18.219282311088037*omega[1.86024] + 12.99999999999998*omega[2.0]) : 0.0 : True
- 2.057104 : 0.0 : domegadt[2.057104] - (-11.038679241208952*omega[2.0] + 8.755923977938355*omega[2.057104] + 2.8919426153801258*omega[2.276843] - 0.87518639620027*omega[2.58359] + 0.39970520793996167*omega[2.86024] - 0.13370616384921521*omega[3.0]) : 0.0 : True
- 2.276843 : 0.0 : domegadt[2.276843] - (3.5830685225010477*omega[2.0] - 7.161380720145321*omega[2.057104] + 1.8060777240835826*omega[2.276843] + 2.3637971760686236*omega[2.58359] - 0.8659007802831209*omega[2.86024] + 0.274338077775192*omega[3.0]) : 0.0 : True
- 2.58359 : 0.0 : domegadt[2.58359] - (-2.3441715579038664*omega[2.0] + 4.122165246243398*omega[2.057104] - 4.496017125813501*omega[2.276843] + 0.8567652453972836*omega[2.58359] + 2.518320949211015*omega[2.86024] - 0.657062757134355*omega[3.0]) : 0.0 : True
- 2.86024 : 0.0 : domegadt[2.86024] - (2.282635500205682*omega[2.0] - 3.8786632197240785*omega[2.057104] + 3.3931519180649445*omega[2.276843] - 5.188340906407153*omega[2.58359] + 0.5812330525807557*omega[2.86024] + 2.8099836552797197*omega[3.0]) : 0.0 : True
- 3.0 : 0.0 : domegadt[3.0] - (-4.999999999999989*omega[2.0] + 8.412424223594346*omega[2.057104] - 6.970256116656801*omega[2.276843] + 8.777114204150497*omega[2.58359] - 18.219282311088037*omega[2.86024] + 12.99999999999998*omega[3.0]) : 0.0 : True
- 3.057104 : 0.0 : domegadt[3.057104] - (-11.038679241208952*omega[3.0] + 8.755923977938355*omega[3.057104] + 2.8919426153801258*omega[3.276843] - 0.87518639620027*omega[3.58359] + 0.39970520793996167*omega[3.86024] - 0.13370616384921521*omega[4.0]) : 0.0 : True
- 3.276843 : 0.0 : domegadt[3.276843] - (3.5830685225010477*omega[3.0] - 7.161380720145321*omega[3.057104] + 1.8060777240835826*omega[3.276843] + 2.3637971760686236*omega[3.58359] - 0.8659007802831209*omega[3.86024] + 0.274338077775192*omega[4.0]) : 0.0 : True
- 3.58359 : 0.0 : domegadt[3.58359] - (-2.3441715579038664*omega[3.0] + 4.122165246243398*omega[3.057104] - 4.496017125813501*omega[3.276843] + 0.8567652453972836*omega[3.58359] + 2.518320949211015*omega[3.86024] - 0.657062757134355*omega[4.0]) : 0.0 : True
- 3.86024 : 0.0 : domegadt[3.86024] - (2.282635500205682*omega[3.0] - 3.8786632197240785*omega[3.057104] + 3.3931519180649445*omega[3.276843] - 5.188340906407153*omega[3.58359] + 0.5812330525807557*omega[3.86024] + 2.8099836552797197*omega[4.0]) : 0.0 : True
- 4.0 : 0.0 : domegadt[4.0] - (-4.999999999999989*omega[3.0] + 8.412424223594346*omega[3.057104] - 6.970256116656801*omega[3.276843] + 8.777114204150497*omega[3.58359] - 18.219282311088037*omega[3.86024] + 12.99999999999998*omega[4.0]) : 0.0 : True
- 4.057104 : 0.0 : domegadt[4.057104] - (-11.038679241208952*omega[4.0] + 8.755923977938355*omega[4.057104] + 2.8919426153801258*omega[4.276843] - 0.87518639620027*omega[4.58359] + 0.39970520793996167*omega[4.86024] - 0.13370616384921521*omega[5.0]) : 0.0 : True
- 4.276843 : 0.0 : domegadt[4.276843] - (3.5830685225010477*omega[4.0] - 7.161380720145321*omega[4.057104] + 1.8060777240835826*omega[4.276843] + 2.3637971760686236*omega[4.58359] - 0.8659007802831209*omega[4.86024] + 0.274338077775192*omega[5.0]) : 0.0 : True
- 4.58359 : 0.0 : domegadt[4.58359] - (-2.3441715579038664*omega[4.0] + 4.122165246243398*omega[4.057104] - 4.496017125813501*omega[4.276843] + 0.8567652453972836*omega[4.58359] + 2.518320949211015*omega[4.86024] - 0.657062757134355*omega[5.0]) : 0.0 : True
- 4.86024 : 0.0 : domegadt[4.86024] - (2.282635500205682*omega[4.0] - 3.8786632197240785*omega[4.057104] + 3.3931519180649445*omega[4.276843] - 5.188340906407153*omega[4.58359] + 0.5812330525807557*omega[4.86024] + 2.8099836552797197*omega[5.0]) : 0.0 : True
- 5.0 : 0.0 : domegadt[5.0] - (-4.999999999999989*omega[4.0] + 8.412424223594346*omega[4.057104] - 6.970256116656801*omega[4.276843] + 8.777114204150497*omega[4.58359] - 18.219282311088037*omega[4.86024] + 12.99999999999998*omega[5.0]) : 0.0 : True
- 5.057104 : 0.0 : domegadt[5.057104] - (-11.038679241208952*omega[5.0] + 8.755923977938355*omega[5.057104] + 2.8919426153801258*omega[5.276843] - 0.87518639620027*omega[5.58359] + 0.39970520793996167*omega[5.86024] - 0.13370616384921521*omega[6.0]) : 0.0 : True
- 5.276843 : 0.0 : domegadt[5.276843] - (3.5830685225010477*omega[5.0] - 7.161380720145321*omega[5.057104] + 1.8060777240835826*omega[5.276843] + 2.3637971760686236*omega[5.58359] - 0.8659007802831209*omega[5.86024] + 0.274338077775192*omega[6.0]) : 0.0 : True
- 5.58359 : 0.0 : domegadt[5.58359] - (-2.3441715579038664*omega[5.0] + 4.122165246243398*omega[5.057104] - 4.496017125813501*omega[5.276843] + 0.8567652453972836*omega[5.58359] + 2.518320949211015*omega[5.86024] - 0.657062757134355*omega[6.0]) : 0.0 : True
- 5.86024 : 0.0 : domegadt[5.86024] - (2.282635500205682*omega[5.0] - 3.8786632197240785*omega[5.057104] + 3.3931519180649445*omega[5.276843] - 5.188340906407153*omega[5.58359] + 0.5812330525807557*omega[5.86024] + 2.8099836552797197*omega[6.0]) : 0.0 : True
- 6.0 : 0.0 : domegadt[6.0] - (-4.999999999999989*omega[5.0] + 8.412424223594346*omega[5.057104] - 6.970256116656801*omega[5.276843] + 8.777114204150497*omega[5.58359] - 18.219282311088037*omega[5.86024] + 12.99999999999998*omega[6.0]) : 0.0 : True
- 6.057104 : 0.0 : domegadt[6.057104] - (-11.038679241208952*omega[6.0] + 8.755923977938355*omega[6.057104] + 2.8919426153801258*omega[6.276843] - 0.87518639620027*omega[6.58359] + 0.39970520793996167*omega[6.86024] - 0.13370616384921521*omega[7.0]) : 0.0 : True
- 6.276843 : 0.0 : domegadt[6.276843] - (3.5830685225010477*omega[6.0] - 7.161380720145321*omega[6.057104] + 1.8060777240835826*omega[6.276843] + 2.3637971760686236*omega[6.58359] - 0.8659007802831209*omega[6.86024] + 0.274338077775192*omega[7.0]) : 0.0 : True
- 6.58359 : 0.0 : domegadt[6.58359] - (-2.3441715579038664*omega[6.0] + 4.122165246243398*omega[6.057104] - 4.496017125813501*omega[6.276843] + 0.8567652453972836*omega[6.58359] + 2.518320949211015*omega[6.86024] - 0.657062757134355*omega[7.0]) : 0.0 : True
- 6.86024 : 0.0 : domegadt[6.86024] - (2.282635500205682*omega[6.0] - 3.8786632197240785*omega[6.057104] + 3.3931519180649445*omega[6.276843] - 5.188340906407153*omega[6.58359] + 0.5812330525807557*omega[6.86024] + 2.8099836552797197*omega[7.0]) : 0.0 : True
- 7.0 : 0.0 : domegadt[7.0] - (-4.999999999999989*omega[6.0] + 8.412424223594346*omega[6.057104] - 6.970256116656801*omega[6.276843] + 8.777114204150497*omega[6.58359] - 18.219282311088037*omega[6.86024] + 12.99999999999998*omega[7.0]) : 0.0 : True
- 7.057104 : 0.0 : domegadt[7.057104] - (-11.038679241208952*omega[7.0] + 8.755923977938355*omega[7.057104] + 2.8919426153801258*omega[7.276843] - 0.87518639620027*omega[7.58359] + 0.39970520793996167*omega[7.86024] - 0.13370616384921521*omega[8.0]) : 0.0 : True
- 7.276843 : 0.0 : domegadt[7.276843] - (3.5830685225010477*omega[7.0] - 7.161380720145321*omega[7.057104] + 1.8060777240835826*omega[7.276843] + 2.3637971760686236*omega[7.58359] - 0.8659007802831209*omega[7.86024] + 0.274338077775192*omega[8.0]) : 0.0 : True
- 7.58359 : 0.0 : domegadt[7.58359] - (-2.3441715579038664*omega[7.0] + 4.122165246243398*omega[7.057104] - 4.496017125813501*omega[7.276843] + 0.8567652453972836*omega[7.58359] + 2.518320949211015*omega[7.86024] - 0.657062757134355*omega[8.0]) : 0.0 : True
- 7.86024 : 0.0 : domegadt[7.86024] - (2.282635500205682*omega[7.0] - 3.8786632197240785*omega[7.057104] + 3.3931519180649445*omega[7.276843] - 5.188340906407153*omega[7.58359] + 0.5812330525807557*omega[7.86024] + 2.8099836552797197*omega[8.0]) : 0.0 : True
- 8.0 : 0.0 : domegadt[8.0] - (-4.999999999999989*omega[7.0] + 8.412424223594346*omega[7.057104] - 6.970256116656801*omega[7.276843] + 8.777114204150497*omega[7.58359] - 18.219282311088037*omega[7.86024] + 12.99999999999998*omega[8.0]) : 0.0 : True
- 8.057104 : 0.0 : domegadt[8.057104] - (-11.038679241208952*omega[8.0] + 8.755923977938355*omega[8.057104] + 2.8919426153801258*omega[8.276843] - 0.87518639620027*omega[8.58359] + 0.39970520793996167*omega[8.86024] - 0.13370616384921521*omega[9.0]) : 0.0 : True
- 8.276843 : 0.0 : domegadt[8.276843] - (3.5830685225010477*omega[8.0] - 7.161380720145321*omega[8.057104] + 1.8060777240835826*omega[8.276843] + 2.3637971760686236*omega[8.58359] - 0.8659007802831209*omega[8.86024] + 0.274338077775192*omega[9.0]) : 0.0 : True
- 8.58359 : 0.0 : domegadt[8.58359] - (-2.3441715579038664*omega[8.0] + 4.122165246243398*omega[8.057104] - 4.496017125813501*omega[8.276843] + 0.8567652453972836*omega[8.58359] + 2.518320949211015*omega[8.86024] - 0.657062757134355*omega[9.0]) : 0.0 : True
- 8.86024 : 0.0 : domegadt[8.86024] - (2.282635500205682*omega[8.0] - 3.8786632197240785*omega[8.057104] + 3.3931519180649445*omega[8.276843] - 5.188340906407153*omega[8.58359] + 0.5812330525807557*omega[8.86024] + 2.8099836552797197*omega[9.0]) : 0.0 : True
- 9.0 : 0.0 : domegadt[9.0] - (-4.999999999999989*omega[8.0] + 8.412424223594346*omega[8.057104] - 6.970256116656801*omega[8.276843] + 8.777114204150497*omega[8.58359] - 18.219282311088037*omega[8.86024] + 12.99999999999998*omega[9.0]) : 0.0 : True
- 9.057104 : 0.0 : domegadt[9.057104] - (-11.038679241208952*omega[9.0] + 8.755923977938355*omega[9.057104] + 2.8919426153801258*omega[9.276843] - 0.87518639620027*omega[9.58359] + 0.39970520793996167*omega[9.86024] - 0.13370616384921521*omega[10.0]) : 0.0 : True
- 9.276843 : 0.0 : domegadt[9.276843] - (3.5830685225010477*omega[9.0] - 7.161380720145321*omega[9.057104] + 1.8060777240835826*omega[9.276843] + 2.3637971760686236*omega[9.58359] - 0.8659007802831209*omega[9.86024] + 0.274338077775192*omega[10.0]) : 0.0 : True
- 9.58359 : 0.0 : domegadt[9.58359] - (-2.3441715579038664*omega[9.0] + 4.122165246243398*omega[9.057104] - 4.496017125813501*omega[9.276843] + 0.8567652453972836*omega[9.58359] + 2.518320949211015*omega[9.86024] - 0.657062757134355*omega[10.0]) : 0.0 : True
- 9.86024 : 0.0 : domegadt[9.86024] - (2.282635500205682*omega[9.0] - 3.8786632197240785*omega[9.057104] + 3.3931519180649445*omega[9.276843] - 5.188340906407153*omega[9.58359] + 0.5812330525807557*omega[9.86024] + 2.8099836552797197*omega[10.0]) : 0.0 : True
- 10.0 : 0.0 : domegadt[10.0] - (-4.999999999999989*omega[9.0] + 8.412424223594346*omega[9.057104] - 6.970256116656801*omega[9.276843] + 8.777114204150497*omega[9.58359] - 18.219282311088037*omega[9.86024] + 12.99999999999998*omega[10.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.057104 : 0.0 : domegadt[0.057104] - (-11.0386792412*omega[0] + 8.75592397794*omega[0.057104] + 2.89194261538*omega[0.276843] - 0.8751863962*omega[0.58359] + 0.39970520794*omega[0.86024] - 0.133706163849*omega[1.0]) : 0.0 : True
+ 0.276843 : 0.0 : domegadt[0.276843] - (3.5830685225*omega[0] - 7.16138072015*omega[0.057104] + 1.80607772408*omega[0.276843] + 2.36379717607*omega[0.58359] - 0.865900780283*omega[0.86024] + 0.274338077775*omega[1.0]) : 0.0 : True
+ 0.58359 : 0.0 : domegadt[0.58359] - (-2.3441715579*omega[0] + 4.12216524624*omega[0.057104] - 4.49601712581*omega[0.276843] + 0.856765245397*omega[0.58359] + 2.51832094921*omega[0.86024] - 0.657062757134*omega[1.0]) : 0.0 : True
+ 0.86024 : 0.0 : domegadt[0.86024] - (2.28263550021*omega[0] - 3.87866321972*omega[0.057104] + 3.39315191806*omega[0.276843] - 5.18834090641*omega[0.58359] + 0.581233052581*omega[0.86024] + 2.80998365528*omega[1.0]) : 0.0 : True
+ 1.0 : 0.0 : domegadt[1.0] - (-5.0*omega[0] + 8.41242422359*omega[0.057104] - 6.97025611666*omega[0.276843] + 8.77711420415*omega[0.58359] - 18.2192823111*omega[0.86024] + 13.0*omega[1.0]) : 0.0 : True
+ 1.057104 : 0.0 : domegadt[1.057104] - (-11.0386792412*omega[1.0] + 8.75592397794*omega[1.057104] + 2.89194261538*omega[1.276843] - 0.8751863962*omega[1.58359] + 0.39970520794*omega[1.86024] - 0.133706163849*omega[2.0]) : 0.0 : True
+ 1.276843 : 0.0 : domegadt[1.276843] - (3.5830685225*omega[1.0] - 7.16138072015*omega[1.057104] + 1.80607772408*omega[1.276843] + 2.36379717607*omega[1.58359] - 0.865900780283*omega[1.86024] + 0.274338077775*omega[2.0]) : 0.0 : True
+ 1.58359 : 0.0 : domegadt[1.58359] - (-2.3441715579*omega[1.0] + 4.12216524624*omega[1.057104] - 4.49601712581*omega[1.276843] + 0.856765245397*omega[1.58359] + 2.51832094921*omega[1.86024] - 0.657062757134*omega[2.0]) : 0.0 : True
+ 1.86024 : 0.0 : domegadt[1.86024] - (2.28263550021*omega[1.0] - 3.87866321972*omega[1.057104] + 3.39315191806*omega[1.276843] - 5.18834090641*omega[1.58359] + 0.581233052581*omega[1.86024] + 2.80998365528*omega[2.0]) : 0.0 : True
+ 2.0 : 0.0 : domegadt[2.0] - (-5.0*omega[1.0] + 8.41242422359*omega[1.057104] - 6.97025611666*omega[1.276843] + 8.77711420415*omega[1.58359] - 18.2192823111*omega[1.86024] + 13.0*omega[2.0]) : 0.0 : True
+ 2.057104 : 0.0 : domegadt[2.057104] - (-11.0386792412*omega[2.0] + 8.75592397794*omega[2.057104] + 2.89194261538*omega[2.276843] - 0.8751863962*omega[2.58359] + 0.39970520794*omega[2.86024] - 0.133706163849*omega[3.0]) : 0.0 : True
+ 2.276843 : 0.0 : domegadt[2.276843] - (3.5830685225*omega[2.0] - 7.16138072015*omega[2.057104] + 1.80607772408*omega[2.276843] + 2.36379717607*omega[2.58359] - 0.865900780283*omega[2.86024] + 0.274338077775*omega[3.0]) : 0.0 : True
+ 2.58359 : 0.0 : domegadt[2.58359] - (-2.3441715579*omega[2.0] + 4.12216524624*omega[2.057104] - 4.49601712581*omega[2.276843] + 0.856765245397*omega[2.58359] + 2.51832094921*omega[2.86024] - 0.657062757134*omega[3.0]) : 0.0 : True
+ 2.86024 : 0.0 : domegadt[2.86024] - (2.28263550021*omega[2.0] - 3.87866321972*omega[2.057104] + 3.39315191806*omega[2.276843] - 5.18834090641*omega[2.58359] + 0.581233052581*omega[2.86024] + 2.80998365528*omega[3.0]) : 0.0 : True
+ 3.0 : 0.0 : domegadt[3.0] - (-5.0*omega[2.0] + 8.41242422359*omega[2.057104] - 6.97025611666*omega[2.276843] + 8.77711420415*omega[2.58359] - 18.2192823111*omega[2.86024] + 13.0*omega[3.0]) : 0.0 : True
+ 3.057104 : 0.0 : domegadt[3.057104] - (-11.0386792412*omega[3.0] + 8.75592397794*omega[3.057104] + 2.89194261538*omega[3.276843] - 0.8751863962*omega[3.58359] + 0.39970520794*omega[3.86024] - 0.133706163849*omega[4.0]) : 0.0 : True
+ 3.276843 : 0.0 : domegadt[3.276843] - (3.5830685225*omega[3.0] - 7.16138072015*omega[3.057104] + 1.80607772408*omega[3.276843] + 2.36379717607*omega[3.58359] - 0.865900780283*omega[3.86024] + 0.274338077775*omega[4.0]) : 0.0 : True
+ 3.58359 : 0.0 : domegadt[3.58359] - (-2.3441715579*omega[3.0] + 4.12216524624*omega[3.057104] - 4.49601712581*omega[3.276843] + 0.856765245397*omega[3.58359] + 2.51832094921*omega[3.86024] - 0.657062757134*omega[4.0]) : 0.0 : True
+ 3.86024 : 0.0 : domegadt[3.86024] - (2.28263550021*omega[3.0] - 3.87866321972*omega[3.057104] + 3.39315191806*omega[3.276843] - 5.18834090641*omega[3.58359] + 0.581233052581*omega[3.86024] + 2.80998365528*omega[4.0]) : 0.0 : True
+ 4.0 : 0.0 : domegadt[4.0] - (-5.0*omega[3.0] + 8.41242422359*omega[3.057104] - 6.97025611666*omega[3.276843] + 8.77711420415*omega[3.58359] - 18.2192823111*omega[3.86024] + 13.0*omega[4.0]) : 0.0 : True
+ 4.057104 : 0.0 : domegadt[4.057104] - (-11.0386792412*omega[4.0] + 8.75592397794*omega[4.057104] + 2.89194261538*omega[4.276843] - 0.8751863962*omega[4.58359] + 0.39970520794*omega[4.86024] - 0.133706163849*omega[5.0]) : 0.0 : True
+ 4.276843 : 0.0 : domegadt[4.276843] - (3.5830685225*omega[4.0] - 7.16138072015*omega[4.057104] + 1.80607772408*omega[4.276843] + 2.36379717607*omega[4.58359] - 0.865900780283*omega[4.86024] + 0.274338077775*omega[5.0]) : 0.0 : True
+ 4.58359 : 0.0 : domegadt[4.58359] - (-2.3441715579*omega[4.0] + 4.12216524624*omega[4.057104] - 4.49601712581*omega[4.276843] + 0.856765245397*omega[4.58359] + 2.51832094921*omega[4.86024] - 0.657062757134*omega[5.0]) : 0.0 : True
+ 4.86024 : 0.0 : domegadt[4.86024] - (2.28263550021*omega[4.0] - 3.87866321972*omega[4.057104] + 3.39315191806*omega[4.276843] - 5.18834090641*omega[4.58359] + 0.581233052581*omega[4.86024] + 2.80998365528*omega[5.0]) : 0.0 : True
+ 5.0 : 0.0 : domegadt[5.0] - (-5.0*omega[4.0] + 8.41242422359*omega[4.057104] - 6.97025611666*omega[4.276843] + 8.77711420415*omega[4.58359] - 18.2192823111*omega[4.86024] + 13.0*omega[5.0]) : 0.0 : True
+ 5.057104 : 0.0 : domegadt[5.057104] - (-11.0386792412*omega[5.0] + 8.75592397794*omega[5.057104] + 2.89194261538*omega[5.276843] - 0.8751863962*omega[5.58359] + 0.39970520794*omega[5.86024] - 0.133706163849*omega[6.0]) : 0.0 : True
+ 5.276843 : 0.0 : domegadt[5.276843] - (3.5830685225*omega[5.0] - 7.16138072015*omega[5.057104] + 1.80607772408*omega[5.276843] + 2.36379717607*omega[5.58359] - 0.865900780283*omega[5.86024] + 0.274338077775*omega[6.0]) : 0.0 : True
+ 5.58359 : 0.0 : domegadt[5.58359] - (-2.3441715579*omega[5.0] + 4.12216524624*omega[5.057104] - 4.49601712581*omega[5.276843] + 0.856765245397*omega[5.58359] + 2.51832094921*omega[5.86024] - 0.657062757134*omega[6.0]) : 0.0 : True
+ 5.86024 : 0.0 : domegadt[5.86024] - (2.28263550021*omega[5.0] - 3.87866321972*omega[5.057104] + 3.39315191806*omega[5.276843] - 5.18834090641*omega[5.58359] + 0.581233052581*omega[5.86024] + 2.80998365528*omega[6.0]) : 0.0 : True
+ 6.0 : 0.0 : domegadt[6.0] - (-5.0*omega[5.0] + 8.41242422359*omega[5.057104] - 6.97025611666*omega[5.276843] + 8.77711420415*omega[5.58359] - 18.2192823111*omega[5.86024] + 13.0*omega[6.0]) : 0.0 : True
+ 6.057104 : 0.0 : domegadt[6.057104] - (-11.0386792412*omega[6.0] + 8.75592397794*omega[6.057104] + 2.89194261538*omega[6.276843] - 0.8751863962*omega[6.58359] + 0.39970520794*omega[6.86024] - 0.133706163849*omega[7.0]) : 0.0 : True
+ 6.276843 : 0.0 : domegadt[6.276843] - (3.5830685225*omega[6.0] - 7.16138072015*omega[6.057104] + 1.80607772408*omega[6.276843] + 2.36379717607*omega[6.58359] - 0.865900780283*omega[6.86024] + 0.274338077775*omega[7.0]) : 0.0 : True
+ 6.58359 : 0.0 : domegadt[6.58359] - (-2.3441715579*omega[6.0] + 4.12216524624*omega[6.057104] - 4.49601712581*omega[6.276843] + 0.856765245397*omega[6.58359] + 2.51832094921*omega[6.86024] - 0.657062757134*omega[7.0]) : 0.0 : True
+ 6.86024 : 0.0 : domegadt[6.86024] - (2.28263550021*omega[6.0] - 3.87866321972*omega[6.057104] + 3.39315191806*omega[6.276843] - 5.18834090641*omega[6.58359] + 0.581233052581*omega[6.86024] + 2.80998365528*omega[7.0]) : 0.0 : True
+ 7.0 : 0.0 : domegadt[7.0] - (-5.0*omega[6.0] + 8.41242422359*omega[6.057104] - 6.97025611666*omega[6.276843] + 8.77711420415*omega[6.58359] - 18.2192823111*omega[6.86024] + 13.0*omega[7.0]) : 0.0 : True
+ 7.057104 : 0.0 : domegadt[7.057104] - (-11.0386792412*omega[7.0] + 8.75592397794*omega[7.057104] + 2.89194261538*omega[7.276843] - 0.8751863962*omega[7.58359] + 0.39970520794*omega[7.86024] - 0.133706163849*omega[8.0]) : 0.0 : True
+ 7.276843 : 0.0 : domegadt[7.276843] - (3.5830685225*omega[7.0] - 7.16138072015*omega[7.057104] + 1.80607772408*omega[7.276843] + 2.36379717607*omega[7.58359] - 0.865900780283*omega[7.86024] + 0.274338077775*omega[8.0]) : 0.0 : True
+ 7.58359 : 0.0 : domegadt[7.58359] - (-2.3441715579*omega[7.0] + 4.12216524624*omega[7.057104] - 4.49601712581*omega[7.276843] + 0.856765245397*omega[7.58359] + 2.51832094921*omega[7.86024] - 0.657062757134*omega[8.0]) : 0.0 : True
+ 7.86024 : 0.0 : domegadt[7.86024] - (2.28263550021*omega[7.0] - 3.87866321972*omega[7.057104] + 3.39315191806*omega[7.276843] - 5.18834090641*omega[7.58359] + 0.581233052581*omega[7.86024] + 2.80998365528*omega[8.0]) : 0.0 : True
+ 8.0 : 0.0 : domegadt[8.0] - (-5.0*omega[7.0] + 8.41242422359*omega[7.057104] - 6.97025611666*omega[7.276843] + 8.77711420415*omega[7.58359] - 18.2192823111*omega[7.86024] + 13.0*omega[8.0]) : 0.0 : True
+ 8.057104 : 0.0 : domegadt[8.057104] - (-11.0386792412*omega[8.0] + 8.75592397794*omega[8.057104] + 2.89194261538*omega[8.276843] - 0.8751863962*omega[8.58359] + 0.39970520794*omega[8.86024] - 0.133706163849*omega[9.0]) : 0.0 : True
+ 8.276843 : 0.0 : domegadt[8.276843] - (3.5830685225*omega[8.0] - 7.16138072015*omega[8.057104] + 1.80607772408*omega[8.276843] + 2.36379717607*omega[8.58359] - 0.865900780283*omega[8.86024] + 0.274338077775*omega[9.0]) : 0.0 : True
+ 8.58359 : 0.0 : domegadt[8.58359] - (-2.3441715579*omega[8.0] + 4.12216524624*omega[8.057104] - 4.49601712581*omega[8.276843] + 0.856765245397*omega[8.58359] + 2.51832094921*omega[8.86024] - 0.657062757134*omega[9.0]) : 0.0 : True
+ 8.86024 : 0.0 : domegadt[8.86024] - (2.28263550021*omega[8.0] - 3.87866321972*omega[8.057104] + 3.39315191806*omega[8.276843] - 5.18834090641*omega[8.58359] + 0.581233052581*omega[8.86024] + 2.80998365528*omega[9.0]) : 0.0 : True
+ 9.0 : 0.0 : domegadt[9.0] - (-5.0*omega[8.0] + 8.41242422359*omega[8.057104] - 6.97025611666*omega[8.276843] + 8.77711420415*omega[8.58359] - 18.2192823111*omega[8.86024] + 13.0*omega[9.0]) : 0.0 : True
+ 9.057104 : 0.0 : domegadt[9.057104] - (-11.0386792412*omega[9.0] + 8.75592397794*omega[9.057104] + 2.89194261538*omega[9.276843] - 0.8751863962*omega[9.58359] + 0.39970520794*omega[9.86024] - 0.133706163849*omega[10]) : 0.0 : True
+ 9.276843 : 0.0 : domegadt[9.276843] - (3.5830685225*omega[9.0] - 7.16138072015*omega[9.057104] + 1.80607772408*omega[9.276843] + 2.36379717607*omega[9.58359] - 0.865900780283*omega[9.86024] + 0.274338077775*omega[10]) : 0.0 : True
+ 9.58359 : 0.0 : domegadt[9.58359] - (-2.3441715579*omega[9.0] + 4.12216524624*omega[9.057104] - 4.49601712581*omega[9.276843] + 0.856765245397*omega[9.58359] + 2.51832094921*omega[9.86024] - 0.657062757134*omega[10]) : 0.0 : True
+ 9.86024 : 0.0 : domegadt[9.86024] - (2.28263550021*omega[9.0] - 3.87866321972*omega[9.057104] + 3.39315191806*omega[9.276843] - 5.18834090641*omega[9.58359] + 0.581233052581*omega[9.86024] + 2.80998365528*omega[10]) : 0.0 : True
+ 10 : 0.0 : domegadt[10] - (-5.0*omega[9.0] + 8.41242422359*omega[9.057104] - 6.97025611666*omega[9.276843] + 8.77711420415*omega[9.58359] - 18.2192823111*omega[9.86024] + 13.0*omega[10]) : 0.0 : True
dthetadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.057104 : 0.0 : dthetadt[0.057104] - (-11.038679241208952*theta[0.0] + 8.755923977938355*theta[0.057104] + 2.8919426153801258*theta[0.276843] - 0.87518639620027*theta[0.58359] + 0.39970520793996167*theta[0.86024] - 0.13370616384921521*theta[1.0]) : 0.0 : True
- 0.276843 : 0.0 : dthetadt[0.276843] - (3.5830685225010477*theta[0.0] - 7.161380720145321*theta[0.057104] + 1.8060777240835826*theta[0.276843] + 2.3637971760686236*theta[0.58359] - 0.8659007802831209*theta[0.86024] + 0.274338077775192*theta[1.0]) : 0.0 : True
- 0.58359 : 0.0 : dthetadt[0.58359] - (-2.3441715579038664*theta[0.0] + 4.122165246243398*theta[0.057104] - 4.496017125813501*theta[0.276843] + 0.8567652453972836*theta[0.58359] + 2.518320949211015*theta[0.86024] - 0.657062757134355*theta[1.0]) : 0.0 : True
- 0.86024 : 0.0 : dthetadt[0.86024] - (2.282635500205682*theta[0.0] - 3.8786632197240785*theta[0.057104] + 3.3931519180649445*theta[0.276843] - 5.188340906407153*theta[0.58359] + 0.5812330525807557*theta[0.86024] + 2.8099836552797197*theta[1.0]) : 0.0 : True
- 1.0 : 0.0 : dthetadt[1.0] - (-4.999999999999989*theta[0.0] + 8.412424223594346*theta[0.057104] - 6.970256116656801*theta[0.276843] + 8.777114204150497*theta[0.58359] - 18.219282311088037*theta[0.86024] + 12.99999999999998*theta[1.0]) : 0.0 : True
- 1.057104 : 0.0 : dthetadt[1.057104] - (-11.038679241208952*theta[1.0] + 8.755923977938355*theta[1.057104] + 2.8919426153801258*theta[1.276843] - 0.87518639620027*theta[1.58359] + 0.39970520793996167*theta[1.86024] - 0.13370616384921521*theta[2.0]) : 0.0 : True
- 1.276843 : 0.0 : dthetadt[1.276843] - (3.5830685225010477*theta[1.0] - 7.161380720145321*theta[1.057104] + 1.8060777240835826*theta[1.276843] + 2.3637971760686236*theta[1.58359] - 0.8659007802831209*theta[1.86024] + 0.274338077775192*theta[2.0]) : 0.0 : True
- 1.58359 : 0.0 : dthetadt[1.58359] - (-2.3441715579038664*theta[1.0] + 4.122165246243398*theta[1.057104] - 4.496017125813501*theta[1.276843] + 0.8567652453972836*theta[1.58359] + 2.518320949211015*theta[1.86024] - 0.657062757134355*theta[2.0]) : 0.0 : True
- 1.86024 : 0.0 : dthetadt[1.86024] - (2.282635500205682*theta[1.0] - 3.8786632197240785*theta[1.057104] + 3.3931519180649445*theta[1.276843] - 5.188340906407153*theta[1.58359] + 0.5812330525807557*theta[1.86024] + 2.8099836552797197*theta[2.0]) : 0.0 : True
- 2.0 : 0.0 : dthetadt[2.0] - (-4.999999999999989*theta[1.0] + 8.412424223594346*theta[1.057104] - 6.970256116656801*theta[1.276843] + 8.777114204150497*theta[1.58359] - 18.219282311088037*theta[1.86024] + 12.99999999999998*theta[2.0]) : 0.0 : True
- 2.057104 : 0.0 : dthetadt[2.057104] - (-11.038679241208952*theta[2.0] + 8.755923977938355*theta[2.057104] + 2.8919426153801258*theta[2.276843] - 0.87518639620027*theta[2.58359] + 0.39970520793996167*theta[2.86024] - 0.13370616384921521*theta[3.0]) : 0.0 : True
- 2.276843 : 0.0 : dthetadt[2.276843] - (3.5830685225010477*theta[2.0] - 7.161380720145321*theta[2.057104] + 1.8060777240835826*theta[2.276843] + 2.3637971760686236*theta[2.58359] - 0.8659007802831209*theta[2.86024] + 0.274338077775192*theta[3.0]) : 0.0 : True
- 2.58359 : 0.0 : dthetadt[2.58359] - (-2.3441715579038664*theta[2.0] + 4.122165246243398*theta[2.057104] - 4.496017125813501*theta[2.276843] + 0.8567652453972836*theta[2.58359] + 2.518320949211015*theta[2.86024] - 0.657062757134355*theta[3.0]) : 0.0 : True
- 2.86024 : 0.0 : dthetadt[2.86024] - (2.282635500205682*theta[2.0] - 3.8786632197240785*theta[2.057104] + 3.3931519180649445*theta[2.276843] - 5.188340906407153*theta[2.58359] + 0.5812330525807557*theta[2.86024] + 2.8099836552797197*theta[3.0]) : 0.0 : True
- 3.0 : 0.0 : dthetadt[3.0] - (-4.999999999999989*theta[2.0] + 8.412424223594346*theta[2.057104] - 6.970256116656801*theta[2.276843] + 8.777114204150497*theta[2.58359] - 18.219282311088037*theta[2.86024] + 12.99999999999998*theta[3.0]) : 0.0 : True
- 3.057104 : 0.0 : dthetadt[3.057104] - (-11.038679241208952*theta[3.0] + 8.755923977938355*theta[3.057104] + 2.8919426153801258*theta[3.276843] - 0.87518639620027*theta[3.58359] + 0.39970520793996167*theta[3.86024] - 0.13370616384921521*theta[4.0]) : 0.0 : True
- 3.276843 : 0.0 : dthetadt[3.276843] - (3.5830685225010477*theta[3.0] - 7.161380720145321*theta[3.057104] + 1.8060777240835826*theta[3.276843] + 2.3637971760686236*theta[3.58359] - 0.8659007802831209*theta[3.86024] + 0.274338077775192*theta[4.0]) : 0.0 : True
- 3.58359 : 0.0 : dthetadt[3.58359] - (-2.3441715579038664*theta[3.0] + 4.122165246243398*theta[3.057104] - 4.496017125813501*theta[3.276843] + 0.8567652453972836*theta[3.58359] + 2.518320949211015*theta[3.86024] - 0.657062757134355*theta[4.0]) : 0.0 : True
- 3.86024 : 0.0 : dthetadt[3.86024] - (2.282635500205682*theta[3.0] - 3.8786632197240785*theta[3.057104] + 3.3931519180649445*theta[3.276843] - 5.188340906407153*theta[3.58359] + 0.5812330525807557*theta[3.86024] + 2.8099836552797197*theta[4.0]) : 0.0 : True
- 4.0 : 0.0 : dthetadt[4.0] - (-4.999999999999989*theta[3.0] + 8.412424223594346*theta[3.057104] - 6.970256116656801*theta[3.276843] + 8.777114204150497*theta[3.58359] - 18.219282311088037*theta[3.86024] + 12.99999999999998*theta[4.0]) : 0.0 : True
- 4.057104 : 0.0 : dthetadt[4.057104] - (-11.038679241208952*theta[4.0] + 8.755923977938355*theta[4.057104] + 2.8919426153801258*theta[4.276843] - 0.87518639620027*theta[4.58359] + 0.39970520793996167*theta[4.86024] - 0.13370616384921521*theta[5.0]) : 0.0 : True
- 4.276843 : 0.0 : dthetadt[4.276843] - (3.5830685225010477*theta[4.0] - 7.161380720145321*theta[4.057104] + 1.8060777240835826*theta[4.276843] + 2.3637971760686236*theta[4.58359] - 0.8659007802831209*theta[4.86024] + 0.274338077775192*theta[5.0]) : 0.0 : True
- 4.58359 : 0.0 : dthetadt[4.58359] - (-2.3441715579038664*theta[4.0] + 4.122165246243398*theta[4.057104] - 4.496017125813501*theta[4.276843] + 0.8567652453972836*theta[4.58359] + 2.518320949211015*theta[4.86024] - 0.657062757134355*theta[5.0]) : 0.0 : True
- 4.86024 : 0.0 : dthetadt[4.86024] - (2.282635500205682*theta[4.0] - 3.8786632197240785*theta[4.057104] + 3.3931519180649445*theta[4.276843] - 5.188340906407153*theta[4.58359] + 0.5812330525807557*theta[4.86024] + 2.8099836552797197*theta[5.0]) : 0.0 : True
- 5.0 : 0.0 : dthetadt[5.0] - (-4.999999999999989*theta[4.0] + 8.412424223594346*theta[4.057104] - 6.970256116656801*theta[4.276843] + 8.777114204150497*theta[4.58359] - 18.219282311088037*theta[4.86024] + 12.99999999999998*theta[5.0]) : 0.0 : True
- 5.057104 : 0.0 : dthetadt[5.057104] - (-11.038679241208952*theta[5.0] + 8.755923977938355*theta[5.057104] + 2.8919426153801258*theta[5.276843] - 0.87518639620027*theta[5.58359] + 0.39970520793996167*theta[5.86024] - 0.13370616384921521*theta[6.0]) : 0.0 : True
- 5.276843 : 0.0 : dthetadt[5.276843] - (3.5830685225010477*theta[5.0] - 7.161380720145321*theta[5.057104] + 1.8060777240835826*theta[5.276843] + 2.3637971760686236*theta[5.58359] - 0.8659007802831209*theta[5.86024] + 0.274338077775192*theta[6.0]) : 0.0 : True
- 5.58359 : 0.0 : dthetadt[5.58359] - (-2.3441715579038664*theta[5.0] + 4.122165246243398*theta[5.057104] - 4.496017125813501*theta[5.276843] + 0.8567652453972836*theta[5.58359] + 2.518320949211015*theta[5.86024] - 0.657062757134355*theta[6.0]) : 0.0 : True
- 5.86024 : 0.0 : dthetadt[5.86024] - (2.282635500205682*theta[5.0] - 3.8786632197240785*theta[5.057104] + 3.3931519180649445*theta[5.276843] - 5.188340906407153*theta[5.58359] + 0.5812330525807557*theta[5.86024] + 2.8099836552797197*theta[6.0]) : 0.0 : True
- 6.0 : 0.0 : dthetadt[6.0] - (-4.999999999999989*theta[5.0] + 8.412424223594346*theta[5.057104] - 6.970256116656801*theta[5.276843] + 8.777114204150497*theta[5.58359] - 18.219282311088037*theta[5.86024] + 12.99999999999998*theta[6.0]) : 0.0 : True
- 6.057104 : 0.0 : dthetadt[6.057104] - (-11.038679241208952*theta[6.0] + 8.755923977938355*theta[6.057104] + 2.8919426153801258*theta[6.276843] - 0.87518639620027*theta[6.58359] + 0.39970520793996167*theta[6.86024] - 0.13370616384921521*theta[7.0]) : 0.0 : True
- 6.276843 : 0.0 : dthetadt[6.276843] - (3.5830685225010477*theta[6.0] - 7.161380720145321*theta[6.057104] + 1.8060777240835826*theta[6.276843] + 2.3637971760686236*theta[6.58359] - 0.8659007802831209*theta[6.86024] + 0.274338077775192*theta[7.0]) : 0.0 : True
- 6.58359 : 0.0 : dthetadt[6.58359] - (-2.3441715579038664*theta[6.0] + 4.122165246243398*theta[6.057104] - 4.496017125813501*theta[6.276843] + 0.8567652453972836*theta[6.58359] + 2.518320949211015*theta[6.86024] - 0.657062757134355*theta[7.0]) : 0.0 : True
- 6.86024 : 0.0 : dthetadt[6.86024] - (2.282635500205682*theta[6.0] - 3.8786632197240785*theta[6.057104] + 3.3931519180649445*theta[6.276843] - 5.188340906407153*theta[6.58359] + 0.5812330525807557*theta[6.86024] + 2.8099836552797197*theta[7.0]) : 0.0 : True
- 7.0 : 0.0 : dthetadt[7.0] - (-4.999999999999989*theta[6.0] + 8.412424223594346*theta[6.057104] - 6.970256116656801*theta[6.276843] + 8.777114204150497*theta[6.58359] - 18.219282311088037*theta[6.86024] + 12.99999999999998*theta[7.0]) : 0.0 : True
- 7.057104 : 0.0 : dthetadt[7.057104] - (-11.038679241208952*theta[7.0] + 8.755923977938355*theta[7.057104] + 2.8919426153801258*theta[7.276843] - 0.87518639620027*theta[7.58359] + 0.39970520793996167*theta[7.86024] - 0.13370616384921521*theta[8.0]) : 0.0 : True
- 7.276843 : 0.0 : dthetadt[7.276843] - (3.5830685225010477*theta[7.0] - 7.161380720145321*theta[7.057104] + 1.8060777240835826*theta[7.276843] + 2.3637971760686236*theta[7.58359] - 0.8659007802831209*theta[7.86024] + 0.274338077775192*theta[8.0]) : 0.0 : True
- 7.58359 : 0.0 : dthetadt[7.58359] - (-2.3441715579038664*theta[7.0] + 4.122165246243398*theta[7.057104] - 4.496017125813501*theta[7.276843] + 0.8567652453972836*theta[7.58359] + 2.518320949211015*theta[7.86024] - 0.657062757134355*theta[8.0]) : 0.0 : True
- 7.86024 : 0.0 : dthetadt[7.86024] - (2.282635500205682*theta[7.0] - 3.8786632197240785*theta[7.057104] + 3.3931519180649445*theta[7.276843] - 5.188340906407153*theta[7.58359] + 0.5812330525807557*theta[7.86024] + 2.8099836552797197*theta[8.0]) : 0.0 : True
- 8.0 : 0.0 : dthetadt[8.0] - (-4.999999999999989*theta[7.0] + 8.412424223594346*theta[7.057104] - 6.970256116656801*theta[7.276843] + 8.777114204150497*theta[7.58359] - 18.219282311088037*theta[7.86024] + 12.99999999999998*theta[8.0]) : 0.0 : True
- 8.057104 : 0.0 : dthetadt[8.057104] - (-11.038679241208952*theta[8.0] + 8.755923977938355*theta[8.057104] + 2.8919426153801258*theta[8.276843] - 0.87518639620027*theta[8.58359] + 0.39970520793996167*theta[8.86024] - 0.13370616384921521*theta[9.0]) : 0.0 : True
- 8.276843 : 0.0 : dthetadt[8.276843] - (3.5830685225010477*theta[8.0] - 7.161380720145321*theta[8.057104] + 1.8060777240835826*theta[8.276843] + 2.3637971760686236*theta[8.58359] - 0.8659007802831209*theta[8.86024] + 0.274338077775192*theta[9.0]) : 0.0 : True
- 8.58359 : 0.0 : dthetadt[8.58359] - (-2.3441715579038664*theta[8.0] + 4.122165246243398*theta[8.057104] - 4.496017125813501*theta[8.276843] + 0.8567652453972836*theta[8.58359] + 2.518320949211015*theta[8.86024] - 0.657062757134355*theta[9.0]) : 0.0 : True
- 8.86024 : 0.0 : dthetadt[8.86024] - (2.282635500205682*theta[8.0] - 3.8786632197240785*theta[8.057104] + 3.3931519180649445*theta[8.276843] - 5.188340906407153*theta[8.58359] + 0.5812330525807557*theta[8.86024] + 2.8099836552797197*theta[9.0]) : 0.0 : True
- 9.0 : 0.0 : dthetadt[9.0] - (-4.999999999999989*theta[8.0] + 8.412424223594346*theta[8.057104] - 6.970256116656801*theta[8.276843] + 8.777114204150497*theta[8.58359] - 18.219282311088037*theta[8.86024] + 12.99999999999998*theta[9.0]) : 0.0 : True
- 9.057104 : 0.0 : dthetadt[9.057104] - (-11.038679241208952*theta[9.0] + 8.755923977938355*theta[9.057104] + 2.8919426153801258*theta[9.276843] - 0.87518639620027*theta[9.58359] + 0.39970520793996167*theta[9.86024] - 0.13370616384921521*theta[10.0]) : 0.0 : True
- 9.276843 : 0.0 : dthetadt[9.276843] - (3.5830685225010477*theta[9.0] - 7.161380720145321*theta[9.057104] + 1.8060777240835826*theta[9.276843] + 2.3637971760686236*theta[9.58359] - 0.8659007802831209*theta[9.86024] + 0.274338077775192*theta[10.0]) : 0.0 : True
- 9.58359 : 0.0 : dthetadt[9.58359] - (-2.3441715579038664*theta[9.0] + 4.122165246243398*theta[9.057104] - 4.496017125813501*theta[9.276843] + 0.8567652453972836*theta[9.58359] + 2.518320949211015*theta[9.86024] - 0.657062757134355*theta[10.0]) : 0.0 : True
- 9.86024 : 0.0 : dthetadt[9.86024] - (2.282635500205682*theta[9.0] - 3.8786632197240785*theta[9.057104] + 3.3931519180649445*theta[9.276843] - 5.188340906407153*theta[9.58359] + 0.5812330525807557*theta[9.86024] + 2.8099836552797197*theta[10.0]) : 0.0 : True
- 10.0 : 0.0 : dthetadt[10.0] - (-4.999999999999989*theta[9.0] + 8.412424223594346*theta[9.057104] - 6.970256116656801*theta[9.276843] + 8.777114204150497*theta[9.58359] - 18.219282311088037*theta[9.86024] + 12.99999999999998*theta[10.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.057104 : 0.0 : dthetadt[0.057104] - (-11.0386792412*theta[0] + 8.75592397794*theta[0.057104] + 2.89194261538*theta[0.276843] - 0.8751863962*theta[0.58359] + 0.39970520794*theta[0.86024] - 0.133706163849*theta[1.0]) : 0.0 : True
+ 0.276843 : 0.0 : dthetadt[0.276843] - (3.5830685225*theta[0] - 7.16138072015*theta[0.057104] + 1.80607772408*theta[0.276843] + 2.36379717607*theta[0.58359] - 0.865900780283*theta[0.86024] + 0.274338077775*theta[1.0]) : 0.0 : True
+ 0.58359 : 0.0 : dthetadt[0.58359] - (-2.3441715579*theta[0] + 4.12216524624*theta[0.057104] - 4.49601712581*theta[0.276843] + 0.856765245397*theta[0.58359] + 2.51832094921*theta[0.86024] - 0.657062757134*theta[1.0]) : 0.0 : True
+ 0.86024 : 0.0 : dthetadt[0.86024] - (2.28263550021*theta[0] - 3.87866321972*theta[0.057104] + 3.39315191806*theta[0.276843] - 5.18834090641*theta[0.58359] + 0.581233052581*theta[0.86024] + 2.80998365528*theta[1.0]) : 0.0 : True
+ 1.0 : 0.0 : dthetadt[1.0] - (-5.0*theta[0] + 8.41242422359*theta[0.057104] - 6.97025611666*theta[0.276843] + 8.77711420415*theta[0.58359] - 18.2192823111*theta[0.86024] + 13.0*theta[1.0]) : 0.0 : True
+ 1.057104 : 0.0 : dthetadt[1.057104] - (-11.0386792412*theta[1.0] + 8.75592397794*theta[1.057104] + 2.89194261538*theta[1.276843] - 0.8751863962*theta[1.58359] + 0.39970520794*theta[1.86024] - 0.133706163849*theta[2.0]) : 0.0 : True
+ 1.276843 : 0.0 : dthetadt[1.276843] - (3.5830685225*theta[1.0] - 7.16138072015*theta[1.057104] + 1.80607772408*theta[1.276843] + 2.36379717607*theta[1.58359] - 0.865900780283*theta[1.86024] + 0.274338077775*theta[2.0]) : 0.0 : True
+ 1.58359 : 0.0 : dthetadt[1.58359] - (-2.3441715579*theta[1.0] + 4.12216524624*theta[1.057104] - 4.49601712581*theta[1.276843] + 0.856765245397*theta[1.58359] + 2.51832094921*theta[1.86024] - 0.657062757134*theta[2.0]) : 0.0 : True
+ 1.86024 : 0.0 : dthetadt[1.86024] - (2.28263550021*theta[1.0] - 3.87866321972*theta[1.057104] + 3.39315191806*theta[1.276843] - 5.18834090641*theta[1.58359] + 0.581233052581*theta[1.86024] + 2.80998365528*theta[2.0]) : 0.0 : True
+ 2.0 : 0.0 : dthetadt[2.0] - (-5.0*theta[1.0] + 8.41242422359*theta[1.057104] - 6.97025611666*theta[1.276843] + 8.77711420415*theta[1.58359] - 18.2192823111*theta[1.86024] + 13.0*theta[2.0]) : 0.0 : True
+ 2.057104 : 0.0 : dthetadt[2.057104] - (-11.0386792412*theta[2.0] + 8.75592397794*theta[2.057104] + 2.89194261538*theta[2.276843] - 0.8751863962*theta[2.58359] + 0.39970520794*theta[2.86024] - 0.133706163849*theta[3.0]) : 0.0 : True
+ 2.276843 : 0.0 : dthetadt[2.276843] - (3.5830685225*theta[2.0] - 7.16138072015*theta[2.057104] + 1.80607772408*theta[2.276843] + 2.36379717607*theta[2.58359] - 0.865900780283*theta[2.86024] + 0.274338077775*theta[3.0]) : 0.0 : True
+ 2.58359 : 0.0 : dthetadt[2.58359] - (-2.3441715579*theta[2.0] + 4.12216524624*theta[2.057104] - 4.49601712581*theta[2.276843] + 0.856765245397*theta[2.58359] + 2.51832094921*theta[2.86024] - 0.657062757134*theta[3.0]) : 0.0 : True
+ 2.86024 : 0.0 : dthetadt[2.86024] - (2.28263550021*theta[2.0] - 3.87866321972*theta[2.057104] + 3.39315191806*theta[2.276843] - 5.18834090641*theta[2.58359] + 0.581233052581*theta[2.86024] + 2.80998365528*theta[3.0]) : 0.0 : True
+ 3.0 : 0.0 : dthetadt[3.0] - (-5.0*theta[2.0] + 8.41242422359*theta[2.057104] - 6.97025611666*theta[2.276843] + 8.77711420415*theta[2.58359] - 18.2192823111*theta[2.86024] + 13.0*theta[3.0]) : 0.0 : True
+ 3.057104 : 0.0 : dthetadt[3.057104] - (-11.0386792412*theta[3.0] + 8.75592397794*theta[3.057104] + 2.89194261538*theta[3.276843] - 0.8751863962*theta[3.58359] + 0.39970520794*theta[3.86024] - 0.133706163849*theta[4.0]) : 0.0 : True
+ 3.276843 : 0.0 : dthetadt[3.276843] - (3.5830685225*theta[3.0] - 7.16138072015*theta[3.057104] + 1.80607772408*theta[3.276843] + 2.36379717607*theta[3.58359] - 0.865900780283*theta[3.86024] + 0.274338077775*theta[4.0]) : 0.0 : True
+ 3.58359 : 0.0 : dthetadt[3.58359] - (-2.3441715579*theta[3.0] + 4.12216524624*theta[3.057104] - 4.49601712581*theta[3.276843] + 0.856765245397*theta[3.58359] + 2.51832094921*theta[3.86024] - 0.657062757134*theta[4.0]) : 0.0 : True
+ 3.86024 : 0.0 : dthetadt[3.86024] - (2.28263550021*theta[3.0] - 3.87866321972*theta[3.057104] + 3.39315191806*theta[3.276843] - 5.18834090641*theta[3.58359] + 0.581233052581*theta[3.86024] + 2.80998365528*theta[4.0]) : 0.0 : True
+ 4.0 : 0.0 : dthetadt[4.0] - (-5.0*theta[3.0] + 8.41242422359*theta[3.057104] - 6.97025611666*theta[3.276843] + 8.77711420415*theta[3.58359] - 18.2192823111*theta[3.86024] + 13.0*theta[4.0]) : 0.0 : True
+ 4.057104 : 0.0 : dthetadt[4.057104] - (-11.0386792412*theta[4.0] + 8.75592397794*theta[4.057104] + 2.89194261538*theta[4.276843] - 0.8751863962*theta[4.58359] + 0.39970520794*theta[4.86024] - 0.133706163849*theta[5.0]) : 0.0 : True
+ 4.276843 : 0.0 : dthetadt[4.276843] - (3.5830685225*theta[4.0] - 7.16138072015*theta[4.057104] + 1.80607772408*theta[4.276843] + 2.36379717607*theta[4.58359] - 0.865900780283*theta[4.86024] + 0.274338077775*theta[5.0]) : 0.0 : True
+ 4.58359 : 0.0 : dthetadt[4.58359] - (-2.3441715579*theta[4.0] + 4.12216524624*theta[4.057104] - 4.49601712581*theta[4.276843] + 0.856765245397*theta[4.58359] + 2.51832094921*theta[4.86024] - 0.657062757134*theta[5.0]) : 0.0 : True
+ 4.86024 : 0.0 : dthetadt[4.86024] - (2.28263550021*theta[4.0] - 3.87866321972*theta[4.057104] + 3.39315191806*theta[4.276843] - 5.18834090641*theta[4.58359] + 0.581233052581*theta[4.86024] + 2.80998365528*theta[5.0]) : 0.0 : True
+ 5.0 : 0.0 : dthetadt[5.0] - (-5.0*theta[4.0] + 8.41242422359*theta[4.057104] - 6.97025611666*theta[4.276843] + 8.77711420415*theta[4.58359] - 18.2192823111*theta[4.86024] + 13.0*theta[5.0]) : 0.0 : True
+ 5.057104 : 0.0 : dthetadt[5.057104] - (-11.0386792412*theta[5.0] + 8.75592397794*theta[5.057104] + 2.89194261538*theta[5.276843] - 0.8751863962*theta[5.58359] + 0.39970520794*theta[5.86024] - 0.133706163849*theta[6.0]) : 0.0 : True
+ 5.276843 : 0.0 : dthetadt[5.276843] - (3.5830685225*theta[5.0] - 7.16138072015*theta[5.057104] + 1.80607772408*theta[5.276843] + 2.36379717607*theta[5.58359] - 0.865900780283*theta[5.86024] + 0.274338077775*theta[6.0]) : 0.0 : True
+ 5.58359 : 0.0 : dthetadt[5.58359] - (-2.3441715579*theta[5.0] + 4.12216524624*theta[5.057104] - 4.49601712581*theta[5.276843] + 0.856765245397*theta[5.58359] + 2.51832094921*theta[5.86024] - 0.657062757134*theta[6.0]) : 0.0 : True
+ 5.86024 : 0.0 : dthetadt[5.86024] - (2.28263550021*theta[5.0] - 3.87866321972*theta[5.057104] + 3.39315191806*theta[5.276843] - 5.18834090641*theta[5.58359] + 0.581233052581*theta[5.86024] + 2.80998365528*theta[6.0]) : 0.0 : True
+ 6.0 : 0.0 : dthetadt[6.0] - (-5.0*theta[5.0] + 8.41242422359*theta[5.057104] - 6.97025611666*theta[5.276843] + 8.77711420415*theta[5.58359] - 18.2192823111*theta[5.86024] + 13.0*theta[6.0]) : 0.0 : True
+ 6.057104 : 0.0 : dthetadt[6.057104] - (-11.0386792412*theta[6.0] + 8.75592397794*theta[6.057104] + 2.89194261538*theta[6.276843] - 0.8751863962*theta[6.58359] + 0.39970520794*theta[6.86024] - 0.133706163849*theta[7.0]) : 0.0 : True
+ 6.276843 : 0.0 : dthetadt[6.276843] - (3.5830685225*theta[6.0] - 7.16138072015*theta[6.057104] + 1.80607772408*theta[6.276843] + 2.36379717607*theta[6.58359] - 0.865900780283*theta[6.86024] + 0.274338077775*theta[7.0]) : 0.0 : True
+ 6.58359 : 0.0 : dthetadt[6.58359] - (-2.3441715579*theta[6.0] + 4.12216524624*theta[6.057104] - 4.49601712581*theta[6.276843] + 0.856765245397*theta[6.58359] + 2.51832094921*theta[6.86024] - 0.657062757134*theta[7.0]) : 0.0 : True
+ 6.86024 : 0.0 : dthetadt[6.86024] - (2.28263550021*theta[6.0] - 3.87866321972*theta[6.057104] + 3.39315191806*theta[6.276843] - 5.18834090641*theta[6.58359] + 0.581233052581*theta[6.86024] + 2.80998365528*theta[7.0]) : 0.0 : True
+ 7.0 : 0.0 : dthetadt[7.0] - (-5.0*theta[6.0] + 8.41242422359*theta[6.057104] - 6.97025611666*theta[6.276843] + 8.77711420415*theta[6.58359] - 18.2192823111*theta[6.86024] + 13.0*theta[7.0]) : 0.0 : True
+ 7.057104 : 0.0 : dthetadt[7.057104] - (-11.0386792412*theta[7.0] + 8.75592397794*theta[7.057104] + 2.89194261538*theta[7.276843] - 0.8751863962*theta[7.58359] + 0.39970520794*theta[7.86024] - 0.133706163849*theta[8.0]) : 0.0 : True
+ 7.276843 : 0.0 : dthetadt[7.276843] - (3.5830685225*theta[7.0] - 7.16138072015*theta[7.057104] + 1.80607772408*theta[7.276843] + 2.36379717607*theta[7.58359] - 0.865900780283*theta[7.86024] + 0.274338077775*theta[8.0]) : 0.0 : True
+ 7.58359 : 0.0 : dthetadt[7.58359] - (-2.3441715579*theta[7.0] + 4.12216524624*theta[7.057104] - 4.49601712581*theta[7.276843] + 0.856765245397*theta[7.58359] + 2.51832094921*theta[7.86024] - 0.657062757134*theta[8.0]) : 0.0 : True
+ 7.86024 : 0.0 : dthetadt[7.86024] - (2.28263550021*theta[7.0] - 3.87866321972*theta[7.057104] + 3.39315191806*theta[7.276843] - 5.18834090641*theta[7.58359] + 0.581233052581*theta[7.86024] + 2.80998365528*theta[8.0]) : 0.0 : True
+ 8.0 : 0.0 : dthetadt[8.0] - (-5.0*theta[7.0] + 8.41242422359*theta[7.057104] - 6.97025611666*theta[7.276843] + 8.77711420415*theta[7.58359] - 18.2192823111*theta[7.86024] + 13.0*theta[8.0]) : 0.0 : True
+ 8.057104 : 0.0 : dthetadt[8.057104] - (-11.0386792412*theta[8.0] + 8.75592397794*theta[8.057104] + 2.89194261538*theta[8.276843] - 0.8751863962*theta[8.58359] + 0.39970520794*theta[8.86024] - 0.133706163849*theta[9.0]) : 0.0 : True
+ 8.276843 : 0.0 : dthetadt[8.276843] - (3.5830685225*theta[8.0] - 7.16138072015*theta[8.057104] + 1.80607772408*theta[8.276843] + 2.36379717607*theta[8.58359] - 0.865900780283*theta[8.86024] + 0.274338077775*theta[9.0]) : 0.0 : True
+ 8.58359 : 0.0 : dthetadt[8.58359] - (-2.3441715579*theta[8.0] + 4.12216524624*theta[8.057104] - 4.49601712581*theta[8.276843] + 0.856765245397*theta[8.58359] + 2.51832094921*theta[8.86024] - 0.657062757134*theta[9.0]) : 0.0 : True
+ 8.86024 : 0.0 : dthetadt[8.86024] - (2.28263550021*theta[8.0] - 3.87866321972*theta[8.057104] + 3.39315191806*theta[8.276843] - 5.18834090641*theta[8.58359] + 0.581233052581*theta[8.86024] + 2.80998365528*theta[9.0]) : 0.0 : True
+ 9.0 : 0.0 : dthetadt[9.0] - (-5.0*theta[8.0] + 8.41242422359*theta[8.057104] - 6.97025611666*theta[8.276843] + 8.77711420415*theta[8.58359] - 18.2192823111*theta[8.86024] + 13.0*theta[9.0]) : 0.0 : True
+ 9.057104 : 0.0 : dthetadt[9.057104] - (-11.0386792412*theta[9.0] + 8.75592397794*theta[9.057104] + 2.89194261538*theta[9.276843] - 0.8751863962*theta[9.58359] + 0.39970520794*theta[9.86024] - 0.133706163849*theta[10]) : 0.0 : True
+ 9.276843 : 0.0 : dthetadt[9.276843] - (3.5830685225*theta[9.0] - 7.16138072015*theta[9.057104] + 1.80607772408*theta[9.276843] + 2.36379717607*theta[9.58359] - 0.865900780283*theta[9.86024] + 0.274338077775*theta[10]) : 0.0 : True
+ 9.58359 : 0.0 : dthetadt[9.58359] - (-2.3441715579*theta[9.0] + 4.12216524624*theta[9.057104] - 4.49601712581*theta[9.276843] + 0.856765245397*theta[9.58359] + 2.51832094921*theta[9.86024] - 0.657062757134*theta[10]) : 0.0 : True
+ 9.86024 : 0.0 : dthetadt[9.86024] - (2.28263550021*theta[9.0] - 3.87866321972*theta[9.057104] + 3.39315191806*theta[9.276843] - 5.18834090641*theta[9.58359] + 0.581233052581*theta[9.86024] + 2.80998365528*theta[10]) : 0.0 : True
+ 10 : 0.0 : dthetadt[10] - (-5.0*theta[9.0] + 8.41242422359*theta[9.057104] - 6.97025611666*theta[9.276843] + 8.77711420415*theta[9.58359] - 18.2192823111*theta[9.86024] + 13.0*theta[10]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 10.0)
- [0.0, 0.057104, 0.276843, 0.58359, 0.86024, 1.0, 1.057104, 1.276843, 1.58359, 1.86024, 2.0, 2.057104, 2.276843, 2.58359, 2.86024, 3.0, 3.057104, 3.276843, 3.58359, 3.86024, 4.0, 4.057104, 4.276843, 4.58359, 4.86024, 5.0, 5.057104, 5.276843, 5.58359, 5.86024, 6.0, 6.057104, 6.276843, 6.58359, 6.86024, 7.0, 7.057104, 7.276843, 7.58359, 7.86024, 8.0, 8.057104, 8.276843, 8.58359, 8.86024, 9.0, 9.057104, 9.276843, 9.58359, 9.86024, 10.0]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..10.0] : 51 : {0, 0.057104, 0.276843, 0.58359, 0.86024, 1.0, 1.057104, 1.276843, 1.58359, 1.86024, 2.0, 2.057104, 2.276843, 2.58359, 2.86024, 3.0, 3.057104, 3.276843, 3.58359, 3.86024, 4.0, 4.057104, 4.276843, 4.58359, 4.86024, 5.0, 5.057104, 5.276843, 5.58359, 5.86024, 6.0, 6.057104, 6.276843, 6.58359, 6.86024, 7.0, 7.057104, 7.276843, 7.58359, 7.86024, 8.0, 8.057104, 8.276843, 8.58359, 8.86024, 9.0, 9.057104, 9.276843, 9.58359, 9.86024, 10}
-11 Declarations: t b c omega theta domegadt dthetadt diffeq1 diffeq2 domegadt_disc_eq dthetadt_disc_eq
+12 Declarations: t_domain t b c omega theta domegadt dthetadt diffeq1 diffeq2 domegadt_disc_eq dthetadt_disc_eq
[[ 0.0000 3.0400]
[-0.0510 3.0374]
[-0.1033 3.0297]
diff --git a/pyomo/dae/tests/simulator_ode_multindex_example.casadi.txt b/pyomo/dae/tests/simulator_ode_multindex_example.casadi.txt
index 5284bf7d395..f5941be088e 100644
--- a/pyomo/dae/tests/simulator_ode_multindex_example.casadi.txt
+++ b/pyomo/dae/tests/simulator_ode_multindex_example.casadi.txt
@@ -1,17 +1,22 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 20)
+ Key : Finite : Members
+ None : False : [0.0..20.0]
+
2 Param Declarations
b : Size=51, Index=t, Domain=Any, Default=(function), Mutable=False
- Key : Value
- 0.0 : 0.25
- 20.0 : 0.25
+ Key : Value
+ 0 : 0.25
+ 20 : 0.25
c : Size=51, Index=t, Domain=Any, Default=(function), Mutable=False
- Key : Value
- 0.0 : 5.0
- 20.0 : 5.0
+ Key : Value
+ 0 : 5.0
+ 20 : 5.0
4 Var Declarations
domegadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.114208 : None : None : None : False : True : Reals
0.553686 : None : None : None : False : True : Reals
1.167181 : None : None : None : False : True : Reals
@@ -61,10 +66,10 @@
18.553686 : None : None : None : False : True : Reals
19.167181 : None : None : None : False : True : Reals
19.72048 : None : None : None : False : True : Reals
- 20.0 : None : None : None : False : True : Reals
+ 20 : None : None : None : False : True : Reals
dthetadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.114208 : None : None : None : False : True : Reals
0.553686 : None : None : None : False : True : Reals
1.167181 : None : None : None : False : True : Reals
@@ -114,118 +119,118 @@
18.553686 : None : None : None : False : True : Reals
19.167181 : None : None : None : False : True : Reals
19.72048 : None : None : None : False : True : Reals
- 20.0 : None : None : None : False : True : Reals
+ 20 : None : None : None : False : True : Reals
omega : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.114208 : None : -0.0584052578938 : None : False : False : Reals
- 0.553686 : None : -0.339032970813 : None : False : False : Reals
- 1.167181 : None : -1.3085813728 : None : False : False : Reals
- 1.72048 : None : -3.31953476548 : None : False : False : Reals
- 2.0 : None : -3.97140607556 : None : False : False : Reals
- 2.114208 : None : -3.79414242508 : None : False : False : Reals
- 2.553686 : None : -1.87415989325 : None : False : False : Reals
- 3.167181 : None : 1.15054360239 : None : False : False : Reals
- 3.72048 : None : 3.15494012302 : None : False : False : Reals
- 4.0 : None : 2.95904997851 : None : False : False : Reals
- 4.114208 : None : 2.56317201727 : None : False : False : Reals
- 4.553686 : None : 0.437993838251 : None : False : False : Reals
- 5.167181 : None : -2.24814541938 : None : False : False : Reals
- 5.72048 : None : -2.17126376795 : None : False : False : Reals
- 6.0 : None : -1.03460743462 : None : False : False : Reals
- 6.114208 : None : -0.511210101297 : None : False : False : Reals
- 6.553686 : None : 1.37740185505 : None : False : False : Reals
- 7.167181 : None : 0.889332600945 : None : False : False : Reals
- 7.72048 : None : 0.279189792878 : None : False : False : Reals
- 8.0 : None : 1.25264983609 : None : False : False : Reals
- 8.114208 : None : 0.360039532424 : None : False : False : Reals
- 8.553686 : None : -0.37650136964 : None : False : False : Reals
- 9.167181 : None : -1.08194831437 : None : False : False : Reals
- 9.72048 : None : 1.47263866269 : None : False : False : Reals
- 10.0 : None : -0.617334302628 : None : False : False : Reals
- 10.114208 : None : -1.42837229393 : None : False : False : Reals
- 10.553686 : None : 1.16122545389 : None : False : False : Reals
- 11.167181 : None : -0.709902142476 : None : False : False : Reals
- 11.72048 : None : -0.143033280586 : None : False : False : Reals
- 12.0 : None : -0.834040087779 : None : False : False : Reals
- 12.114208 : None : -0.264764663766 : None : False : False : Reals
- 12.553686 : None : 0.329045088402 : None : False : False : Reals
- 13.167181 : None : 0.69325749595 : None : False : False : Reals
- 13.72048 : None : -0.912242843384 : None : False : False : Reals
- 14.0 : None : 0.332757536694 : None : False : False : Reals
- 14.114208 : None : 0.81993163423 : None : False : False : Reals
- 14.553686 : None : -0.819092714178 : None : False : False : Reals
- 15.167181 : None : 0.520931768276 : None : False : False : Reals
- 15.72048 : None : 0.0909453934102 : None : False : False : Reals
- 16.0 : None : 0.647547856913 : None : False : False : Reals
- 16.114208 : None : 0.159231570628 : None : False : False : Reals
- 16.553686 : None : -0.230675325002 : None : False : False : Reals
- 17.167181 : None : -0.623976486551 : None : False : False : Reals
- 17.72048 : None : 0.655441416139 : None : False : False : Reals
- 18.0 : None : -0.318472771289 : None : False : False : Reals
- 18.114208 : None : -0.620355007629 : None : False : False : Reals
- 18.553686 : None : 0.682800758587 : None : False : False : Reals
- 19.167181 : None : -0.447135799476 : None : False : False : Reals
- 19.72048 : None : -0.0584028363221 : None : False : False : Reals
- 20.0 : None : -0.709688679945 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.114208 : None : -0.058405257893819534 : None : False : False : Reals
+ 0.553686 : None : -0.3390329708133848 : None : False : False : Reals
+ 1.167181 : None : -1.3085813728049587 : None : False : False : Reals
+ 1.72048 : None : -3.3195347654783536 : None : False : False : Reals
+ 2.0 : None : -3.971406075558495 : None : False : False : Reals
+ 2.114208 : None : -3.7941424250814775 : None : False : False : Reals
+ 2.553686 : None : -1.8741598932456631 : None : False : False : Reals
+ 3.167181 : None : 1.1505436023920077 : None : False : False : Reals
+ 3.72048 : None : 3.154940123020708 : None : False : False : Reals
+ 4.0 : None : 2.9590499785149884 : None : False : False : Reals
+ 4.114208 : None : 2.563172017271247 : None : False : False : Reals
+ 4.553686 : None : 0.4379938382509232 : None : False : False : Reals
+ 5.167181 : None : -2.2481454193787926 : None : False : False : Reals
+ 5.72048 : None : -2.1712637679471816 : None : False : False : Reals
+ 6.0 : None : -1.0346074346163499 : None : False : False : Reals
+ 6.114208 : None : -0.5112101012972221 : None : False : False : Reals
+ 6.553686 : None : 1.3774018550511653 : None : False : False : Reals
+ 7.167181 : None : 0.8893326009446438 : None : False : False : Reals
+ 7.72048 : None : 0.27918979287827206 : None : False : False : Reals
+ 8.0 : None : 1.2526498360902396 : None : False : False : Reals
+ 8.114208 : None : 0.36003953242394293 : None : False : False : Reals
+ 8.553686 : None : -0.3765013696399606 : None : False : False : Reals
+ 9.167181 : None : -1.0819483143725557 : None : False : False : Reals
+ 9.72048 : None : 1.4726386626855357 : None : False : False : Reals
+ 10.0 : None : -0.6173343026282454 : None : False : False : Reals
+ 10.114208 : None : -1.4283722939263455 : None : False : False : Reals
+ 10.553686 : None : 1.161225453894422 : None : False : False : Reals
+ 11.167181 : None : -0.7099021424761176 : None : False : False : Reals
+ 11.72048 : None : -0.1430332805861001 : None : False : False : Reals
+ 12.0 : None : -0.8340400877789892 : None : False : False : Reals
+ 12.114208 : None : -0.26476466376602426 : None : False : False : Reals
+ 12.553686 : None : 0.329045088402266 : None : False : False : Reals
+ 13.167181 : None : 0.6932574959499878 : None : False : False : Reals
+ 13.72048 : None : -0.9122428433839129 : None : False : False : Reals
+ 14.0 : None : 0.33275753669359476 : None : False : False : Reals
+ 14.114208 : None : 0.8199316342299731 : None : False : False : Reals
+ 14.553686 : None : -0.8190927141775143 : None : False : False : Reals
+ 15.167181 : None : 0.5209317682758074 : None : False : False : Reals
+ 15.72048 : None : 0.09094539341023089 : None : False : False : Reals
+ 16.0 : None : 0.6475478569127426 : None : False : False : Reals
+ 16.114208 : None : 0.15923157062775206 : None : False : False : Reals
+ 16.553686 : None : -0.23067532500173027 : None : False : False : Reals
+ 17.167181 : None : -0.623976486550823 : None : False : False : Reals
+ 17.72048 : None : 0.6554414161388513 : None : False : False : Reals
+ 18.0 : None : -0.31847277128926654 : None : False : False : Reals
+ 18.114208 : None : -0.6203550076289002 : None : False : False : Reals
+ 18.553686 : None : 0.6828007585871902 : None : False : False : Reals
+ 19.167181 : None : -0.44713579947646104 : None : False : False : Reals
+ 19.72048 : None : -0.05840283632215004 : None : False : False : Reals
+ 20 : None : -0.7096886799454517 : None : False : False : Reals
theta : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 3.04 : None : False : False : Reals
- 0.114208 : None : 3.03414968935 : None : False : False : Reals
- 0.553686 : None : 2.9527910798 : None : False : False : Reals
- 1.167181 : None : 2.50353492264 : None : False : False : Reals
- 1.72048 : None : 1.25038734969 : None : False : False : Reals
- 2.0 : None : 0.214977757715 : None : False : False : Reals
- 2.114208 : None : -0.224858250303 : None : False : False : Reals
- 2.553686 : None : -1.52697150354 : None : False : False : Reals
- 3.167181 : None : -1.7242388562 : None : False : False : Reals
- 3.72048 : None : -0.449765913555 : None : False : False : Reals
- 4.0 : None : 0.436416451536 : None : False : False : Reals
- 4.114208 : None : 0.745364697702 : None : False : False : Reals
- 4.553686 : None : 1.41918820376 : None : False : False : Reals
- 5.167181 : None : 0.804036306996 : None : False : False : Reals
- 5.72048 : None : -0.582363112315 : None : False : False : Reals
- 6.0 : None : -1.04032085734 : None : False : False : Reals
- 6.114208 : None : -1.13000769903 : None : False : False : Reals
- 6.553686 : None : -0.918025239162 : None : False : False : Reals
- 7.167181 : None : 0.20387757648 : None : False : False : Reals
- 7.72048 : None : -0.24868114103 : None : False : False : Reals
- 8.0 : None : 0.126051094754 : None : False : False : Reals
- 8.114208 : None : 0.241463566602 : None : False : False : Reals
- 8.553686 : None : -0.202793188079 : None : False : False : Reals
- 9.167181 : None : 0.117930402934 : None : False : False : Reals
- 9.72048 : None : 0.0203203438467 : None : False : False : Reals
- 10.0 : None : 0.145997082822 : None : False : False : Reals
- 10.114208 : None : 0.0525273120298 : None : False : False : Reals
- 10.553686 : None : -0.0560898179522 : None : False : False : Reals
- 11.167181 : None : -0.118326206618 : None : False : False : Reals
- 11.72048 : None : 0.176485174765 : None : False : False : Reals
- 12.0 : None : -0.0601043180384 : None : False : False : Reals
- 12.114208 : None : -0.161024576532 : None : False : False : Reals
- 12.553686 : None : 0.135994004702 : None : False : False : Reals
- 13.167181 : None : -0.0872828635129 : None : False : False : Reals
- 13.72048 : None : -0.0146516681089 : None : False : False : Reals
- 14.0 : None : -0.0985083880672 : None : False : False : Reals
- 14.114208 : None : -0.0280094635414 : None : False : False : Reals
- 14.553686 : None : 0.0397261585264 : None : False : False : Reals
- 15.167181 : None : 0.084135158928 : None : False : False : Reals
- 15.72048 : None : -0.101500735908 : None : False : False : Reals
- 16.0 : None : 0.0418304000833 : None : False : False : Reals
- 16.114208 : None : 0.0955793974675 : None : False : False : Reals
- 16.553686 : None : -0.106759456842 : None : False : False : Reals
- 17.167181 : None : 0.0727930738426 : None : False : False : Reals
- 17.72048 : None : 0.0109256078817 : None : False : False : Reals
- 18.0 : None : 0.0961324284498 : None : False : False : Reals
- 18.114208 : None : 0.0223686975544 : None : False : False : Reals
- 18.553686 : None : -0.0279649824344 : None : False : False : Reals
- 19.167181 : None : -0.0807160101341 : None : False : False : Reals
- 19.72048 : None : 0.0866781225585 : None : False : False : Reals
- 20.0 : None : -0.0505391218538 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 3.04 : None : False : False : Reals
+ 0.114208 : None : 3.034149689345565 : None : False : False : Reals
+ 0.553686 : None : 2.9527910797992223 : None : False : False : Reals
+ 1.167181 : None : 2.503534922642672 : None : False : False : Reals
+ 1.72048 : None : 1.2503873496946145 : None : False : False : Reals
+ 2.0 : None : 0.2149777577146028 : None : False : False : Reals
+ 2.114208 : None : -0.22485825030310633 : None : False : False : Reals
+ 2.553686 : None : -1.5269715035397742 : None : False : False : Reals
+ 3.167181 : None : -1.7242388562041047 : None : False : False : Reals
+ 3.72048 : None : -0.4497659135545762 : None : False : False : Reals
+ 4.0 : None : 0.43641645153623326 : None : False : False : Reals
+ 4.114208 : None : 0.7453646977022939 : None : False : False : Reals
+ 4.553686 : None : 1.4191882037645018 : None : False : False : Reals
+ 5.167181 : None : 0.8040363069964814 : None : False : False : Reals
+ 5.72048 : None : -0.5823631123145809 : None : False : False : Reals
+ 6.0 : None : -1.0403208573355598 : None : False : False : Reals
+ 6.114208 : None : -1.1300076990343362 : None : False : False : Reals
+ 6.553686 : None : -0.9180252391621243 : None : False : False : Reals
+ 7.167181 : None : 0.2038775764796361 : None : False : False : Reals
+ 7.72048 : None : -0.2486811410298265 : None : False : False : Reals
+ 8.0 : None : 0.12605109475399842 : None : False : False : Reals
+ 8.114208 : None : 0.24146356660202348 : None : False : False : Reals
+ 8.553686 : None : -0.20279318807903304 : None : False : False : Reals
+ 9.167181 : None : 0.11793040293354336 : None : False : False : Reals
+ 9.72048 : None : 0.02032034384667457 : None : False : False : Reals
+ 10.0 : None : 0.14599708282230592 : None : False : False : Reals
+ 10.114208 : None : 0.052527312029783904 : None : False : False : Reals
+ 10.553686 : None : -0.05608981795218815 : None : False : False : Reals
+ 11.167181 : None : -0.11832620661813081 : None : False : False : Reals
+ 11.72048 : None : 0.17648517476460931 : None : False : False : Reals
+ 12.0 : None : -0.0601043180383843 : None : False : False : Reals
+ 12.114208 : None : -0.16102457653244065 : None : False : False : Reals
+ 12.553686 : None : 0.13599400470178863 : None : False : False : Reals
+ 13.167181 : None : -0.08728286351287967 : None : False : False : Reals
+ 13.72048 : None : -0.014651668108868976 : None : False : False : Reals
+ 14.0 : None : -0.09850838806721349 : None : False : False : Reals
+ 14.114208 : None : -0.028009463541409152 : None : False : False : Reals
+ 14.553686 : None : 0.0397261585263506 : None : False : False : Reals
+ 15.167181 : None : 0.08413515892803465 : None : False : False : Reals
+ 15.72048 : None : -0.10150073590817216 : None : False : False : Reals
+ 16.0 : None : 0.041830400083312666 : None : False : False : Reals
+ 16.114208 : None : 0.09557939746750668 : None : False : False : Reals
+ 16.553686 : None : -0.10675945684210272 : None : False : False : Reals
+ 17.167181 : None : 0.07279307384256278 : None : False : False : Reals
+ 17.72048 : None : 0.010925607881672905 : None : False : False : Reals
+ 18.0 : None : 0.09613242844978831 : None : False : False : Reals
+ 18.114208 : None : 0.022368697554386544 : None : False : False : Reals
+ 18.553686 : None : -0.027964982434405053 : None : False : False : Reals
+ 19.167181 : None : -0.08071601013411439 : None : False : False : Reals
+ 19.72048 : None : 0.0866781225585102 : None : False : False : Reals
+ 20 : None : -0.05053912185375176 : None : False : False : Reals
4 Constraint Declarations
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : domegadt[0.0] - (-0.25*omega[0.0] - 5.0*sin(theta[0.0])) : 0.0 : True
+ 0 : 0.0 : domegadt[0] - (-0.25*omega[0] - 5.0*sin(theta[0])) : 0.0 : True
0.114208 : 0.0 : domegadt[0.114208] - (-0.25*omega[0.114208] - 5*sin(theta[0.114208])) : 0.0 : True
0.553686 : 0.0 : domegadt[0.553686] - (-0.25*omega[0.553686] - 5*sin(theta[0.553686])) : 0.0 : True
1.167181 : 0.0 : domegadt[1.167181] - (-0.25*omega[1.167181] - 5*sin(theta[1.167181])) : 0.0 : True
@@ -275,10 +280,10 @@
18.553686 : 0.0 : domegadt[18.553686] - (-0.025*omega[18.553686] - 50*sin(theta[18.553686])) : 0.0 : True
19.167181 : 0.0 : domegadt[19.167181] - (-0.025*omega[19.167181] - 50*sin(theta[19.167181])) : 0.0 : True
19.72048 : 0.0 : domegadt[19.72048] - (-0.025*omega[19.72048] - 50*sin(theta[19.72048])) : 0.0 : True
- 20.0 : 0.0 : domegadt[20.0] - (-0.25*omega[20.0] - 5.0*sin(theta[20.0])) : 0.0 : True
+ 20 : 0.0 : domegadt[20] - (-0.25*omega[20] - 5.0*sin(theta[20])) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dthetadt[0.0] - omega[0.0] : 0.0 : True
+ 0 : 0.0 : dthetadt[0] - omega[0] : 0.0 : True
0.114208 : 0.0 : dthetadt[0.114208] - omega[0.114208] : 0.0 : True
0.553686 : 0.0 : dthetadt[0.553686] - omega[0.553686] : 0.0 : True
1.167181 : 0.0 : dthetadt[1.167181] - omega[1.167181] : 0.0 : True
@@ -328,14 +333,14 @@
18.553686 : 0.0 : dthetadt[18.553686] - omega[18.553686] : 0.0 : True
19.167181 : 0.0 : dthetadt[19.167181] - omega[19.167181] : 0.0 : True
19.72048 : 0.0 : dthetadt[19.72048] - omega[19.72048] : 0.0 : True
- 20.0 : 0.0 : dthetadt[20.0] - omega[20.0] : 0.0 : True
+ 20 : 0.0 : dthetadt[20] - omega[20] : 0.0 : True
domegadt_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.114208 : 0.0 : domegadt[0.114208] - (-5.5193396206*omega[0.0] + 4.37796198897*omega[0.114208] + 1.44597130769*omega[0.553686] - 0.4375931981*omega[1.167181] + 0.19985260397*omega[1.72048] - 0.0668530819246*omega[2.0]) : 0.0 : True
- 0.553686 : 0.0 : domegadt[0.553686] - (1.79153426125*omega[0.0] - 3.58069036007*omega[0.114208] + 0.903038862042*omega[0.553686] + 1.18189858803*omega[1.167181] - 0.432950390142*omega[1.72048] + 0.137169038888*omega[2.0]) : 0.0 : True
- 1.167181 : 0.0 : domegadt[1.167181] - (-1.17208577895*omega[0.0] + 2.06108262312*omega[0.114208] - 2.24800856291*omega[0.553686] + 0.428382622699*omega[1.167181] + 1.25916047461*omega[1.72048] - 0.328531378567*omega[2.0]) : 0.0 : True
- 1.72048 : 0.0 : domegadt[1.72048] - (1.1413177501*omega[0.0] - 1.93933160986*omega[0.114208] + 1.69657595903*omega[0.553686] - 2.5941704532*omega[1.167181] + 0.29061652629*omega[1.72048] + 1.40499182764*omega[2.0]) : 0.0 : True
- 2.0 : 0.0 : domegadt[2.0] - (-2.5*omega[0.0] + 4.2062121118*omega[0.114208] - 3.48512805833*omega[0.553686] + 4.38855710208*omega[1.167181] - 9.10964115554*omega[1.72048] + 6.5*omega[2.0]) : 0.0 : True
+ 0.114208 : 0.0 : domegadt[0.114208] - (-5.5193396206*omega[0] + 4.37796198897*omega[0.114208] + 1.44597130769*omega[0.553686] - 0.4375931981*omega[1.167181] + 0.19985260397*omega[1.72048] - 0.0668530819246*omega[2.0]) : 0.0 : True
+ 0.553686 : 0.0 : domegadt[0.553686] - (1.79153426125*omega[0] - 3.58069036007*omega[0.114208] + 0.903038862042*omega[0.553686] + 1.18189858803*omega[1.167181] - 0.432950390142*omega[1.72048] + 0.137169038888*omega[2.0]) : 0.0 : True
+ 1.167181 : 0.0 : domegadt[1.167181] - (-1.17208577895*omega[0] + 2.06108262312*omega[0.114208] - 2.24800856291*omega[0.553686] + 0.428382622699*omega[1.167181] + 1.25916047461*omega[1.72048] - 0.328531378567*omega[2.0]) : 0.0 : True
+ 1.72048 : 0.0 : domegadt[1.72048] - (1.1413177501*omega[0] - 1.93933160986*omega[0.114208] + 1.69657595903*omega[0.553686] - 2.5941704532*omega[1.167181] + 0.29061652629*omega[1.72048] + 1.40499182764*omega[2.0]) : 0.0 : True
+ 2.0 : 0.0 : domegadt[2.0] - (-2.5*omega[0] + 4.2062121118*omega[0.114208] - 3.48512805833*omega[0.553686] + 4.38855710208*omega[1.167181] - 9.10964115554*omega[1.72048] + 6.5*omega[2.0]) : 0.0 : True
2.114208 : 0.0 : domegadt[2.114208] - (-5.5193396206*omega[2.0] + 4.37796198897*omega[2.114208] + 1.44597130769*omega[2.553686] - 0.4375931981*omega[3.167181] + 0.19985260397*omega[3.72048] - 0.0668530819246*omega[4.0]) : 0.0 : True
2.553686 : 0.0 : domegadt[2.553686] - (1.79153426125*omega[2.0] - 3.58069036007*omega[2.114208] + 0.903038862042*omega[2.553686] + 1.18189858803*omega[3.167181] - 0.432950390142*omega[3.72048] + 0.137169038888*omega[4.0]) : 0.0 : True
3.167181 : 0.0 : domegadt[3.167181] - (-1.17208577895*omega[2.0] + 2.06108262312*omega[2.114208] - 2.24800856291*omega[2.553686] + 0.428382622699*omega[3.167181] + 1.25916047461*omega[3.72048] - 0.328531378567*omega[4.0]) : 0.0 : True
@@ -376,18 +381,18 @@
17.167181 : 0.0 : domegadt[17.167181] - (-1.17208577895*omega[16.0] + 2.06108262312*omega[16.114208] - 2.24800856291*omega[16.553686] + 0.428382622699*omega[17.167181] + 1.25916047461*omega[17.72048] - 0.328531378567*omega[18.0]) : 0.0 : True
17.72048 : 0.0 : domegadt[17.72048] - (1.1413177501*omega[16.0] - 1.93933160986*omega[16.114208] + 1.69657595903*omega[16.553686] - 2.5941704532*omega[17.167181] + 0.29061652629*omega[17.72048] + 1.40499182764*omega[18.0]) : 0.0 : True
18.0 : 0.0 : domegadt[18.0] - (-2.5*omega[16.0] + 4.2062121118*omega[16.114208] - 3.48512805833*omega[16.553686] + 4.38855710208*omega[17.167181] - 9.10964115554*omega[17.72048] + 6.5*omega[18.0]) : 0.0 : True
- 18.114208 : 0.0 : domegadt[18.114208] - (-5.5193396206*omega[18.0] + 4.37796198897*omega[18.114208] + 1.44597130769*omega[18.553686] - 0.4375931981*omega[19.167181] + 0.19985260397*omega[19.72048] - 0.0668530819246*omega[20.0]) : 0.0 : True
- 18.553686 : 0.0 : domegadt[18.553686] - (1.79153426125*omega[18.0] - 3.58069036007*omega[18.114208] + 0.903038862042*omega[18.553686] + 1.18189858803*omega[19.167181] - 0.432950390142*omega[19.72048] + 0.137169038888*omega[20.0]) : 0.0 : True
- 19.167181 : 0.0 : domegadt[19.167181] - (-1.17208577895*omega[18.0] + 2.06108262312*omega[18.114208] - 2.24800856291*omega[18.553686] + 0.428382622699*omega[19.167181] + 1.25916047461*omega[19.72048] - 0.328531378567*omega[20.0]) : 0.0 : True
- 19.72048 : 0.0 : domegadt[19.72048] - (1.1413177501*omega[18.0] - 1.93933160986*omega[18.114208] + 1.69657595903*omega[18.553686] - 2.5941704532*omega[19.167181] + 0.29061652629*omega[19.72048] + 1.40499182764*omega[20.0]) : 0.0 : True
- 20.0 : 0.0 : domegadt[20.0] - (-2.5*omega[18.0] + 4.2062121118*omega[18.114208] - 3.48512805833*omega[18.553686] + 4.38855710208*omega[19.167181] - 9.10964115554*omega[19.72048] + 6.5*omega[20.0]) : 0.0 : True
+ 18.114208 : 0.0 : domegadt[18.114208] - (-5.5193396206*omega[18.0] + 4.37796198897*omega[18.114208] + 1.44597130769*omega[18.553686] - 0.4375931981*omega[19.167181] + 0.19985260397*omega[19.72048] - 0.0668530819246*omega[20]) : 0.0 : True
+ 18.553686 : 0.0 : domegadt[18.553686] - (1.79153426125*omega[18.0] - 3.58069036007*omega[18.114208] + 0.903038862042*omega[18.553686] + 1.18189858803*omega[19.167181] - 0.432950390142*omega[19.72048] + 0.137169038888*omega[20]) : 0.0 : True
+ 19.167181 : 0.0 : domegadt[19.167181] - (-1.17208577895*omega[18.0] + 2.06108262312*omega[18.114208] - 2.24800856291*omega[18.553686] + 0.428382622699*omega[19.167181] + 1.25916047461*omega[19.72048] - 0.328531378567*omega[20]) : 0.0 : True
+ 19.72048 : 0.0 : domegadt[19.72048] - (1.1413177501*omega[18.0] - 1.93933160986*omega[18.114208] + 1.69657595903*omega[18.553686] - 2.5941704532*omega[19.167181] + 0.29061652629*omega[19.72048] + 1.40499182764*omega[20]) : 0.0 : True
+ 20 : 0.0 : domegadt[20] - (-2.5*omega[18.0] + 4.2062121118*omega[18.114208] - 3.48512805833*omega[18.553686] + 4.38855710208*omega[19.167181] - 9.10964115554*omega[19.72048] + 6.5*omega[20]) : 0.0 : True
dthetadt_disc_eq : Size=50, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.114208 : 0.0 : dthetadt[0.114208] - (-5.5193396206*theta[0.0] + 4.37796198897*theta[0.114208] + 1.44597130769*theta[0.553686] - 0.4375931981*theta[1.167181] + 0.19985260397*theta[1.72048] - 0.0668530819246*theta[2.0]) : 0.0 : True
- 0.553686 : 0.0 : dthetadt[0.553686] - (1.79153426125*theta[0.0] - 3.58069036007*theta[0.114208] + 0.903038862042*theta[0.553686] + 1.18189858803*theta[1.167181] - 0.432950390142*theta[1.72048] + 0.137169038888*theta[2.0]) : 0.0 : True
- 1.167181 : 0.0 : dthetadt[1.167181] - (-1.17208577895*theta[0.0] + 2.06108262312*theta[0.114208] - 2.24800856291*theta[0.553686] + 0.428382622699*theta[1.167181] + 1.25916047461*theta[1.72048] - 0.328531378567*theta[2.0]) : 0.0 : True
- 1.72048 : 0.0 : dthetadt[1.72048] - (1.1413177501*theta[0.0] - 1.93933160986*theta[0.114208] + 1.69657595903*theta[0.553686] - 2.5941704532*theta[1.167181] + 0.29061652629*theta[1.72048] + 1.40499182764*theta[2.0]) : 0.0 : True
- 2.0 : 0.0 : dthetadt[2.0] - (-2.5*theta[0.0] + 4.2062121118*theta[0.114208] - 3.48512805833*theta[0.553686] + 4.38855710208*theta[1.167181] - 9.10964115554*theta[1.72048] + 6.5*theta[2.0]) : 0.0 : True
+ 0.114208 : 0.0 : dthetadt[0.114208] - (-5.5193396206*theta[0] + 4.37796198897*theta[0.114208] + 1.44597130769*theta[0.553686] - 0.4375931981*theta[1.167181] + 0.19985260397*theta[1.72048] - 0.0668530819246*theta[2.0]) : 0.0 : True
+ 0.553686 : 0.0 : dthetadt[0.553686] - (1.79153426125*theta[0] - 3.58069036007*theta[0.114208] + 0.903038862042*theta[0.553686] + 1.18189858803*theta[1.167181] - 0.432950390142*theta[1.72048] + 0.137169038888*theta[2.0]) : 0.0 : True
+ 1.167181 : 0.0 : dthetadt[1.167181] - (-1.17208577895*theta[0] + 2.06108262312*theta[0.114208] - 2.24800856291*theta[0.553686] + 0.428382622699*theta[1.167181] + 1.25916047461*theta[1.72048] - 0.328531378567*theta[2.0]) : 0.0 : True
+ 1.72048 : 0.0 : dthetadt[1.72048] - (1.1413177501*theta[0] - 1.93933160986*theta[0.114208] + 1.69657595903*theta[0.553686] - 2.5941704532*theta[1.167181] + 0.29061652629*theta[1.72048] + 1.40499182764*theta[2.0]) : 0.0 : True
+ 2.0 : 0.0 : dthetadt[2.0] - (-2.5*theta[0] + 4.2062121118*theta[0.114208] - 3.48512805833*theta[0.553686] + 4.38855710208*theta[1.167181] - 9.10964115554*theta[1.72048] + 6.5*theta[2.0]) : 0.0 : True
2.114208 : 0.0 : dthetadt[2.114208] - (-5.5193396206*theta[2.0] + 4.37796198897*theta[2.114208] + 1.44597130769*theta[2.553686] - 0.4375931981*theta[3.167181] + 0.19985260397*theta[3.72048] - 0.0668530819246*theta[4.0]) : 0.0 : True
2.553686 : 0.0 : dthetadt[2.553686] - (1.79153426125*theta[2.0] - 3.58069036007*theta[2.114208] + 0.903038862042*theta[2.553686] + 1.18189858803*theta[3.167181] - 0.432950390142*theta[3.72048] + 0.137169038888*theta[4.0]) : 0.0 : True
3.167181 : 0.0 : dthetadt[3.167181] - (-1.17208577895*theta[2.0] + 2.06108262312*theta[2.114208] - 2.24800856291*theta[2.553686] + 0.428382622699*theta[3.167181] + 1.25916047461*theta[3.72048] - 0.328531378567*theta[4.0]) : 0.0 : True
@@ -428,15 +433,16 @@
17.167181 : 0.0 : dthetadt[17.167181] - (-1.17208577895*theta[16.0] + 2.06108262312*theta[16.114208] - 2.24800856291*theta[16.553686] + 0.428382622699*theta[17.167181] + 1.25916047461*theta[17.72048] - 0.328531378567*theta[18.0]) : 0.0 : True
17.72048 : 0.0 : dthetadt[17.72048] - (1.1413177501*theta[16.0] - 1.93933160986*theta[16.114208] + 1.69657595903*theta[16.553686] - 2.5941704532*theta[17.167181] + 0.29061652629*theta[17.72048] + 1.40499182764*theta[18.0]) : 0.0 : True
18.0 : 0.0 : dthetadt[18.0] - (-2.5*theta[16.0] + 4.2062121118*theta[16.114208] - 3.48512805833*theta[16.553686] + 4.38855710208*theta[17.167181] - 9.10964115554*theta[17.72048] + 6.5*theta[18.0]) : 0.0 : True
- 18.114208 : 0.0 : dthetadt[18.114208] - (-5.5193396206*theta[18.0] + 4.37796198897*theta[18.114208] + 1.44597130769*theta[18.553686] - 0.4375931981*theta[19.167181] + 0.19985260397*theta[19.72048] - 0.0668530819246*theta[20.0]) : 0.0 : True
- 18.553686 : 0.0 : dthetadt[18.553686] - (1.79153426125*theta[18.0] - 3.58069036007*theta[18.114208] + 0.903038862042*theta[18.553686] + 1.18189858803*theta[19.167181] - 0.432950390142*theta[19.72048] + 0.137169038888*theta[20.0]) : 0.0 : True
- 19.167181 : 0.0 : dthetadt[19.167181] - (-1.17208577895*theta[18.0] + 2.06108262312*theta[18.114208] - 2.24800856291*theta[18.553686] + 0.428382622699*theta[19.167181] + 1.25916047461*theta[19.72048] - 0.328531378567*theta[20.0]) : 0.0 : True
- 19.72048 : 0.0 : dthetadt[19.72048] - (1.1413177501*theta[18.0] - 1.93933160986*theta[18.114208] + 1.69657595903*theta[18.553686] - 2.5941704532*theta[19.167181] + 0.29061652629*theta[19.72048] + 1.40499182764*theta[20.0]) : 0.0 : True
- 20.0 : 0.0 : dthetadt[20.0] - (-2.5*theta[18.0] + 4.2062121118*theta[18.114208] - 3.48512805833*theta[18.553686] + 4.38855710208*theta[19.167181] - 9.10964115554*theta[19.72048] + 6.5*theta[20.0]) : 0.0 : True
+ 18.114208 : 0.0 : dthetadt[18.114208] - (-5.5193396206*theta[18.0] + 4.37796198897*theta[18.114208] + 1.44597130769*theta[18.553686] - 0.4375931981*theta[19.167181] + 0.19985260397*theta[19.72048] - 0.0668530819246*theta[20]) : 0.0 : True
+ 18.553686 : 0.0 : dthetadt[18.553686] - (1.79153426125*theta[18.0] - 3.58069036007*theta[18.114208] + 0.903038862042*theta[18.553686] + 1.18189858803*theta[19.167181] - 0.432950390142*theta[19.72048] + 0.137169038888*theta[20]) : 0.0 : True
+ 19.167181 : 0.0 : dthetadt[19.167181] - (-1.17208577895*theta[18.0] + 2.06108262312*theta[18.114208] - 2.24800856291*theta[18.553686] + 0.428382622699*theta[19.167181] + 1.25916047461*theta[19.72048] - 0.328531378567*theta[20]) : 0.0 : True
+ 19.72048 : 0.0 : dthetadt[19.72048] - (1.1413177501*theta[18.0] - 1.93933160986*theta[18.114208] + 1.69657595903*theta[18.553686] - 2.5941704532*theta[19.167181] + 0.29061652629*theta[19.72048] + 1.40499182764*theta[20]) : 0.0 : True
+ 20 : 0.0 : dthetadt[20] - (-2.5*theta[18.0] + 4.2062121118*theta[18.114208] - 3.48512805833*theta[18.553686] + 4.38855710208*theta[19.167181] - 9.10964115554*theta[19.72048] + 6.5*theta[20]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 20.0)
- [0.0, 0.114208, 0.553686, 1.167181, 1.72048, 2.0, 2.114208, 2.553686, 3.167181, 3.72048, 4.0, 4.114208, 4.553686, 5.167181, 5.72048, 6.0, 6.114208, 6.553686, 7.167181, 7.72048, 8.0, 8.114208, 8.553686, 9.167181, 9.72048, 10.0, 10.114208, 10.553686, 11.167181, 11.72048, 12.0, 12.114208, 12.553686, 13.167181, 13.72048, 14.0, 14.114208, 14.553686, 15.167181, 15.72048, 16.0, 16.114208, 16.553686, 17.167181, 17.72048, 18.0, 18.114208, 18.553686, 19.167181, 19.72048, 20.0]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..20.0] : 51 : {0, 0.114208, 0.553686, 1.167181, 1.72048, 2.0, 2.114208, 2.553686, 3.167181, 3.72048, 4.0, 4.114208, 4.553686, 5.167181, 5.72048, 6.0, 6.114208, 6.553686, 7.167181, 7.72048, 8.0, 8.114208, 8.553686, 9.167181, 9.72048, 10.0, 10.114208, 10.553686, 11.167181, 11.72048, 12.0, 12.114208, 12.553686, 13.167181, 13.72048, 14.0, 14.114208, 14.553686, 15.167181, 15.72048, 16.0, 16.114208, 16.553686, 17.167181, 17.72048, 18.0, 18.114208, 18.553686, 19.167181, 19.72048, 20}
1 Suffix Declarations
var_input : Direction=Suffix.LOCAL, Datatype=Suffix.FLOAT
@@ -444,7 +450,7 @@
b : {0: 0.25, 15: 0.025}
c : {0: 5.0, 7: 50}
-12 Declarations: t b c omega theta domegadt dthetadt diffeq1 diffeq2 var_input domegadt_disc_eq dthetadt_disc_eq
+13 Declarations: t_domain t b c omega theta domegadt dthetadt diffeq1 diffeq2 var_input domegadt_disc_eq dthetadt_disc_eq
[[ 0.0000 3.0400]
[-0.1033 3.0297]
[-0.2223 2.9972]
diff --git a/pyomo/dae/tests/simulator_ode_multindex_example.scipy.txt b/pyomo/dae/tests/simulator_ode_multindex_example.scipy.txt
index c6f7eb2a731..e9c48a85619 100644
--- a/pyomo/dae/tests/simulator_ode_multindex_example.scipy.txt
+++ b/pyomo/dae/tests/simulator_ode_multindex_example.scipy.txt
@@ -1,17 +1,22 @@
+1 RangeSet Declarations
+ t_domain : Dimen=1, Size=Inf, Bounds=(0, 20)
+ Key : Finite : Members
+ None : False : [0.0..20.0]
+
2 Param Declarations
b : Size=51, Index=t, Domain=Any, Default=(function), Mutable=False
- Key : Value
- 0.0 : 0.25
- 20.0 : 0.25
+ Key : Value
+ 0 : 0.25
+ 20 : 0.25
c : Size=51, Index=t, Domain=Any, Default=(function), Mutable=False
- Key : Value
- 0.0 : 5.0
- 20.0 : 5.0
+ Key : Value
+ 0 : 5.0
+ 20 : 5.0
4 Var Declarations
domegadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.114208 : None : None : None : False : True : Reals
0.553686 : None : None : None : False : True : Reals
1.167181 : None : None : None : False : True : Reals
@@ -61,10 +66,10 @@
18.553686 : None : None : None : False : True : Reals
19.167181 : None : None : None : False : True : Reals
19.72048 : None : None : None : False : True : Reals
- 20.0 : None : None : None : False : True : Reals
+ 20 : None : None : None : False : True : Reals
dthetadt : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : None : None : False : True : Reals
+ 0 : None : None : None : False : True : Reals
0.114208 : None : None : None : False : True : Reals
0.553686 : None : None : None : False : True : Reals
1.167181 : None : None : None : False : True : Reals
@@ -114,118 +119,118 @@
18.553686 : None : None : None : False : True : Reals
19.167181 : None : None : None : False : True : Reals
19.72048 : None : None : None : False : True : Reals
- 20.0 : None : None : None : False : True : Reals
+ 20 : None : None : None : False : True : Reals
omega : Size=51, Index=t
- Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 0.0 : None : False : False : Reals
- 0.114208 : None : -0.05840518728529297 : None : False : False : Reals
- 0.553686 : None : -0.33903113606858515 : None : False : False : Reals
- 1.167181 : None : -1.3085688609164685 : None : False : False : Reals
- 1.72048 : None : -3.3195203705189478 : None : False : False : Reals
- 2.0 : None : -3.971410941890654 : None : False : False : Reals
- 2.114208 : None : -3.794155763934756 : None : False : False : Reals
- 2.553686 : None : -1.8741811376093058 : None : False : False : Reals
- 3.167181 : None : 1.1505292588051295 : None : False : False : Reals
- 3.72048 : None : 3.154946556040063 : None : False : False : Reals
- 4.0 : None : 2.959035861817344 : None : False : False : Reals
- 4.114208 : None : 2.5631530289304605 : None : False : False : Reals
- 4.553686 : None : 0.43797341860473205 : None : False : False : Reals
- 5.167181 : None : -2.2481549877719074 : None : False : False : Reals
- 5.72048 : None : -2.1712250550780148 : None : False : False : Reals
- 6.0 : None : -1.0345638202207303 : None : False : False : Reals
- 6.114208 : None : -0.5111683195844986 : None : False : False : Reals
- 6.553686 : None : 1.377426954760057 : None : False : False : Reals
- 7.167181 : None : 0.8825691591707236 : None : False : False : Reals
- 7.72048 : None : 0.27562060781474523 : None : False : False : Reals
- 8.0 : None : 1.2415085602797453 : None : False : False : Reals
- 8.114208 : None : 0.35730521416534533 : None : False : False : Reals
- 8.553686 : None : -0.37311945747578856 : None : False : False : Reals
- 9.167181 : None : -1.0722771136763014 : None : False : False : Reals
- 9.72048 : None : 1.4590559399332974 : None : False : False : Reals
- 10.0 : None : -0.6123414908986775 : None : False : False : Reals
- 10.114208 : None : -1.4155461020839633 : None : False : False : Reals
- 10.553686 : None : 1.1509214227179199 : None : False : False : Reals
- 11.167181 : None : -0.7024778138324768 : None : False : False : Reals
- 11.72048 : None : -0.14325678317959728 : None : False : False : Reals
- 12.0 : None : -0.8258030249397513 : None : False : False : Reals
- 12.114208 : None : -0.2608206367935919 : None : False : False : Reals
- 12.553686 : None : 0.32465103254108085 : None : False : False : Reals
- 13.167181 : None : 0.6878250767369194 : None : False : False : Reals
- 13.72048 : None : -0.9036518232694581 : None : False : False : Reals
- 14.0 : None : 0.3308573279261381 : None : False : False : Reals
- 14.114208 : None : 0.8126956228555574 : None : False : False : Reals
- 14.553686 : None : -0.8120209365021307 : None : False : False : Reals
- 15.167181 : None : 0.5142100976785681 : None : False : False : Reals
- 15.72048 : None : 0.09083076802710022 : None : False : False : Reals
- 16.0 : None : 0.6396823348545468 : None : False : False : Reals
- 16.114208 : None : 0.1564774468707475 : None : False : False : Reals
- 16.553686 : None : -0.22695009220371032 : None : False : False : Reals
- 17.167181 : None : -0.617549500630223 : None : False : False : Reals
- 17.72048 : None : 0.6477468256651164 : None : False : False : Reals
- 18.0 : None : -0.3158907127113538 : None : False : False : Reals
- 18.114208 : None : -0.6134419278390677 : None : False : False : Reals
- 18.553686 : None : 0.6752404445081958 : None : False : False : Reals
- 19.167181 : None : -0.44095848270081756 : None : False : False : Reals
- 19.72048 : None : -0.0588549574535181 : None : False : False : Reals
- 20.0 : None : -0.7008015257685942 : None : False : False : Reals
+ Key : Lower : Value : Upper : Fixed : Stale : Domain
+ 0 : None : 0.0 : None : False : False : Reals
+ 0.114208 : None : -0.058405187285294195 : None : False : False : Reals
+ 0.553686 : None : -0.33903113606862134 : None : False : False : Reals
+ 1.167181 : None : -1.3085688609150452 : None : False : False : Reals
+ 1.72048 : None : -3.31952037051244 : None : False : False : Reals
+ 2.0 : None : -3.971410941883181 : None : False : False : Reals
+ 2.114208 : None : -3.794155763932924 : None : False : False : Reals
+ 2.553686 : None : -1.8741811376079704 : None : False : False : Reals
+ 3.167181 : None : 1.1505292588108365 : None : False : False : Reals
+ 3.72048 : None : 3.154946556035166 : None : False : False : Reals
+ 4.0 : None : 2.959035861802614 : None : False : False : Reals
+ 4.114208 : None : 2.5631530289145052 : None : False : False : Reals
+ 4.553686 : None : 0.4379734185906421 : None : False : False : Reals
+ 5.167181 : None : -2.248154987775849 : None : False : False : Reals
+ 5.72048 : None : -2.1712250550654764 : None : False : False : Reals
+ 6.0 : None : -1.0345638202810505 : None : False : False : Reals
+ 6.114208 : None : -0.5111683196448624 : None : False : False : Reals
+ 6.553686 : None : 1.3774269547518034 : None : False : False : Reals
+ 7.167181 : None : 0.8825665374859655 : None : False : False : Reals
+ 7.72048 : None : 0.27562047281634294 : None : False : False : Reals
+ 8.0 : None : 1.2415058465162314 : None : False : False : Reals
+ 8.114208 : None : 0.35730456410650024 : None : False : False : Reals
+ 8.553686 : None : -0.3731196650042301 : None : False : False : Reals
+ 9.167181 : None : -1.0722761698065337 : None : False : False : Reals
+ 9.72048 : None : 1.4590545730151265 : None : False : False : Reals
+ 10.0 : None : -0.6123397652055359 : None : False : False : Reals
+ 10.114208 : None : -1.415541985540449 : None : False : False : Reals
+ 10.553686 : None : 1.1509162752908881 : None : False : False : Reals
+ 11.167181 : None : -0.7024752330347858 : None : False : False : Reals
+ 11.72048 : None : -0.1432556050232465 : None : False : False : Reals
+ 12.0 : None : -0.8257996540267197 : None : False : False : Reals
+ 12.114208 : None : -0.2608198563731877 : None : False : False : Reals
+ 12.553686 : None : 0.32465023719004854 : None : False : False : Reals
+ 13.167181 : None : 0.687827050810741 : None : False : False : Reals
+ 13.72048 : None : -0.9036508823122054 : None : False : False : Reals
+ 14.0 : None : 0.3308577276666084 : None : False : False : Reals
+ 14.114208 : None : 0.8126951733281738 : None : False : False : Reals
+ 14.553686 : None : -0.8120194244613957 : None : False : False : Reals
+ 15.167181 : None : 0.5149700579884425 : None : False : False : Reals
+ 15.72048 : None : 0.09140755969911052 : None : False : False : Reals
+ 16.0 : None : 0.6408803696134242 : None : False : False : Reals
+ 16.114208 : None : 0.15645357590100106 : None : False : False : Reals
+ 16.553686 : None : -0.22704119234917242 : None : False : False : Reals
+ 17.167181 : None : -0.6191162053231865 : None : False : False : Reals
+ 17.72048 : None : 0.6490797810655413 : None : False : False : Reals
+ 18.0 : None : -0.3168853362944906 : None : False : False : Reals
+ 18.114208 : None : -0.6148167194961369 : None : False : False : Reals
+ 18.553686 : None : 0.6767642508328946 : None : False : False : Reals
+ 19.167181 : None : -0.4416381895821208 : None : False : False : Reals
+ 19.72048 : None : -0.05924807687055422 : None : False : False : Reals
+ 20 : None : -0.7021382565310725 : None : False : False : Reals
theta : Size=51, Index=t
Key : Lower : Value : Upper : Fixed : Stale : Domain
- 0.0 : None : 3.04 : None : False : False : Reals
+ 0 : None : 3.04 : None : False : False : Reals
0.114208 : None : 3.0341497528311976 : None : False : False : Reals
- 0.553686 : None : 2.9527913528861425 : None : False : False : Reals
- 1.167181 : None : 2.5035431097094536 : None : False : False : Reals
- 1.72048 : None : 1.250409076931434 : None : False : False : Reals
- 2.0 : None : 0.2149992820516392 : None : False : False : Reals
- 2.114208 : None : -0.22483825221628687 : None : False : False : Reals
- 2.553686 : None : -1.5269607820151825 : None : False : False : Reals
- 3.167181 : None : -1.7242386053151164 : None : False : False : Reals
- 3.72048 : None : -0.44975861576214854 : None : False : False : Reals
- 4.0 : None : 0.4364237868596348 : None : False : False : Reals
- 4.114208 : None : 0.745370313201912 : None : False : False : Reals
- 4.553686 : None : 1.4191848260304087 : None : False : False : Reals
- 5.167181 : None : 0.8040177571749516 : None : False : False : Reals
- 5.72048 : None : -0.582373877459648 : None : False : False : Reals
- 6.0 : None : -1.0403203419082923 : None : False : False : Reals
- 6.114208 : None : -1.130002131905896 : None : False : False : Reals
- 6.553686 : None : -0.9180028294238982 : None : False : False : Reals
- 7.167181 : None : 0.20185782102257516 : None : False : False : Reals
- 7.72048 : None : -0.24640554939492132 : None : False : False : Reals
- 8.0 : None : 0.12482160078413115 : None : False : False : Reals
- 8.114208 : None : 0.23921983201296348 : None : False : False : Reals
- 8.553686 : None : -0.2009164892007298 : None : False : False : Reals
- 9.167181 : None : 0.11679693171358822 : None : False : False : Reals
- 9.72048 : None : 0.020249278736801844 : None : False : False : Reals
- 10.0 : None : 0.14459231552267343 : None : False : False : Reals
- 10.114208 : None : 0.05189881142697694 : None : False : False : Reals
- 10.553686 : None : -0.055429688437884345 : None : False : False : Reals
- 11.167181 : None : -0.1173456963960226 : None : False : False : Reals
- 11.72048 : None : 0.17484066431249684 : None : False : False : Reals
- 12.0 : None : -0.05970562477162156 : None : False : False : Reals
- 12.114208 : None : -0.15959680847739643 : None : False : False : Reals
- 12.553686 : None : 0.13481157135799096 : None : False : False : Reals
- 13.167181 : None : -0.08632839719832919 : None : False : False : Reals
- 13.72048 : None : -0.014730329786527718 : None : False : False : Reals
- 14.0 : None : -0.09752308108750504 : None : False : False : Reals
- 14.114208 : None : -0.027553350797782164 : None : False : False : Reals
- 14.553686 : None : 0.039155763642574316 : None : False : False : Reals
- 15.167181 : None : 0.08325365710114234 : None : False : False : Reals
- 15.72048 : None : -0.10031207198145223 : None : False : False : Reals
- 16.0 : None : 0.04146954000357528 : None : False : False : Reals
- 16.114208 : None : 0.09450602902280374 : None : False : False : Reals
- 16.553686 : None : -0.1055728698158523 : None : False : False : Reals
- 17.167181 : None : 0.07181952046982307 : None : False : False : Reals
- 17.72048 : None : 0.01094553524698505 : None : False : False : Reals
- 18.0 : None : 0.09494833123377587 : None : False : False : Reals
- 18.114208 : None : 0.021966789396689307 : None : False : False : Reals
- 18.553686 : None : -0.027478259693785853 : None : False : False : Reals
- 19.167181 : None : -0.07989350894282768 : None : False : False : Reals
- 19.72048 : None : 0.08565791488122151 : None : False : False : Reals
- 20.0 : None : -0.05014197803019069 : None : False : False : Reals
+ 0.553686 : None : 2.952791352886469 : None : False : False : Reals
+ 1.167181 : None : 2.5035431097104848 : None : False : False : Reals
+ 1.72048 : None : 1.2504090769340763 : None : False : False : Reals
+ 2.0 : None : 0.2149992820563138 : None : False : False : Reals
+ 2.114208 : None : -0.22483825221105344 : None : False : False : Reals
+ 2.553686 : None : -1.5269607820099191 : None : False : False : Reals
+ 3.167181 : None : -1.724238605307232 : None : False : False : Reals
+ 3.72048 : None : -0.44975861575283443 : None : False : False : Reals
+ 4.0 : None : 0.4364237868660944 : None : False : False : Reals
+ 4.114208 : None : 0.7453703132064221 : None : False : False : Reals
+ 4.553686 : None : 1.4191848260280298 : None : False : False : Reals
+ 5.167181 : None : 0.8040177571655689 : None : False : False : Reals
+ 5.72048 : None : -0.5823738774640546 : None : False : False : Reals
+ 6.0 : None : -1.0403203419068496 : None : False : False : Reals
+ 6.114208 : None : -1.1300021319049922 : None : False : False : Reals
+ 6.553686 : None : -0.918002829433194 : None : False : False : Reals
+ 7.167181 : None : 0.20185730426682835 : None : False : False : Reals
+ 7.72048 : None : -0.24640490276083396 : None : False : False : Reals
+ 8.0 : None : 0.1248214092757704 : None : False : False : Reals
+ 8.114208 : None : 0.23921940998892322 : None : False : False : Reals
+ 8.553686 : None : -0.2009161571773981 : None : False : False : Reals
+ 9.167181 : None : 0.1167967464378912 : None : False : False : Reals
+ 9.72048 : None : 0.02024929741104771 : None : False : False : Reals
+ 10.0 : None : 0.14459196008941178 : None : False : False : Reals
+ 10.114208 : None : 0.05189862253299783 : None : False : False : Reals
+ 10.553686 : None : -0.05542958791147051 : None : False : False : Reals
+ 11.167181 : None : -0.11734522275641625 : None : False : False : Reals
+ 11.72048 : None : 0.17484013820917388 : None : False : False : Reals
+ 12.0 : None : -0.059705314250694294 : None : False : False : Reals
+ 12.114208 : None : -0.15959601827036457 : None : False : False : Reals
+ 12.553686 : None : 0.13481121629837955 : None : False : False : Reals
+ 13.167181 : None : -0.08632837250339669 : None : False : False : Reals
+ 13.72048 : None : -0.014730341376782419 : None : False : False : Reals
+ 14.0 : None : -0.09752293966360358 : None : False : False : Reals
+ 14.114208 : None : -0.027553231423011346 : None : False : False : Reals
+ 14.553686 : None : 0.03915567726594777 : None : False : False : Reals
+ 15.167181 : None : 0.08347115070504912 : None : False : False : Reals
+ 15.72048 : None : -0.10051816249482715 : None : False : False : Reals
+ 16.0 : None : 0.04160622675878921 : None : False : False : Reals
+ 16.114208 : None : 0.09471852438845295 : None : False : False : Reals
+ 16.553686 : None : -0.10581370440217815 : None : False : False : Reals
+ 17.167181 : None : 0.07192807816164457 : None : False : False : Reals
+ 17.72048 : None : 0.011013929454180213 : None : False : False : Reals
+ 18.0 : None : 0.09512809761403275 : None : False : False : Reals
+ 18.114208 : None : 0.02197150116316636 : None : False : False : Reals
+ 18.553686 : None : -0.027491631888481027 : None : False : False : Reals
+ 19.167181 : None : -0.0800921197110689 : None : False : False : Reals
+ 19.72048 : None : 0.08583694827746997 : None : False : False : Reals
+ 20 : None : -0.05029209908472264 : None : False : False : Reals
4 Constraint Declarations
diffeq1 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : domegadt[0.0] - (-0.25*omega[0.0] - 5.0*sin(theta[0.0])) : 0.0 : True
+ 0 : 0.0 : domegadt[0] - (-0.25*omega[0] - 5.0*sin(theta[0])) : 0.0 : True
0.114208 : 0.0 : domegadt[0.114208] - (-0.25*omega[0.114208] - 5*sin(theta[0.114208])) : 0.0 : True
0.553686 : 0.0 : domegadt[0.553686] - (-0.25*omega[0.553686] - 5*sin(theta[0.553686])) : 0.0 : True
1.167181 : 0.0 : domegadt[1.167181] - (-0.25*omega[1.167181] - 5*sin(theta[1.167181])) : 0.0 : True
@@ -275,10 +280,10 @@
18.553686 : 0.0 : domegadt[18.553686] - (-0.025*omega[18.553686] - 50*sin(theta[18.553686])) : 0.0 : True
19.167181 : 0.0 : domegadt[19.167181] - (-0.025*omega[19.167181] - 50*sin(theta[19.167181])) : 0.0 : True
19.72048 : 0.0 : domegadt[19.72048] - (-0.025*omega[19.72048] - 50*sin(theta[19.72048])) : 0.0 : True
- 20.0 : 0.0 : domegadt[20.0] - (-0.25*omega[20.0] - 5.0*sin(theta[20.0])) : 0.0 : True
+ 20 : 0.0 : domegadt[20] - (-0.25*omega[20] - 5.0*sin(theta[20])) : 0.0 : True
diffeq2 : Size=51, Index=t, Active=True
Key : Lower : Body : Upper : Active
- 0.0 : 0.0 : dthetadt[0.0] - omega[0.0] : 0.0 : True
+ 0 : 0.0 : dthetadt[0] - omega[0] : 0.0 : True
0.114208 : 0.0 : dthetadt[0.114208] - omega[0.114208] : 0.0 : True
0.553686 : 0.0 : dthetadt[0.553686] - omega[0.553686] : 0.0 : True
1.167181 : 0.0 : dthetadt[1.167181] - omega[1.167181] : 0.0 : True
@@ -328,115 +333,116 @@
18.553686 : 0.0 : dthetadt[18.553686] - omega[18.553686] : 0.0 : True
19.167181 : 0.0 : dthetadt[19.167181] - omega[19.167181] : 0.0 : True
19.72048 : 0.0 : dthetadt[19.72048] - omega[19.72048] : 0.0 : True
- 20.0 : 0.0 : dthetadt[20.0] - omega[20.0] : 0.0 : True
+ 20 : 0.0 : dthetadt[20] - omega[20] : 0.0 : True
domegadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.114208 : 0.0 : domegadt[0.114208] - (-5.519339620604476*omega[0.0] + 4.377961988969178*omega[0.114208] + 1.4459713076900629*omega[0.553686] - 0.437593198100135*omega[1.167181] + 0.19985260396998084*omega[1.72048] - 0.06685308192460761*omega[2.0]) : 0.0 : True
- 0.553686 : 0.0 : domegadt[0.553686] - (1.7915342612505238*omega[0.0] - 3.5806903600726603*omega[0.114208] + 0.9030388620417913*omega[0.553686] + 1.1818985880343118*omega[1.167181] - 0.43295039014156045*omega[1.72048] + 0.137169038887596*omega[2.0]) : 0.0 : True
- 1.167181 : 0.0 : domegadt[1.167181] - (-1.1720857789519332*omega[0.0] + 2.061082623121699*omega[0.114208] - 2.2480085629067506*omega[0.553686] + 0.4283826226986418*omega[1.167181] + 1.2591604746055074*omega[1.72048] - 0.3285313785671775*omega[2.0]) : 0.0 : True
- 1.72048 : 0.0 : domegadt[1.72048] - (1.141317750102841*omega[0.0] - 1.9393316098620392*omega[0.114208] + 1.6965759590324723*omega[0.553686] - 2.5941704532035765*omega[1.167181] + 0.2906165262903779*omega[1.72048] + 1.4049918276398599*omega[2.0]) : 0.0 : True
- 2.0 : 0.0 : domegadt[2.0] - (-2.4999999999999947*omega[0.0] + 4.206212111797173*omega[0.114208] - 3.4851280583284003*omega[0.553686] + 4.388557102075248*omega[1.167181] - 9.109641155544018*omega[1.72048] + 6.49999999999999*omega[2.0]) : 0.0 : True
- 2.114208 : 0.0 : domegadt[2.114208] - (-5.519339620604476*omega[2.0] + 4.377961988969178*omega[2.114208] + 1.4459713076900629*omega[2.553686] - 0.437593198100135*omega[3.167181] + 0.19985260396998084*omega[3.72048] - 0.06685308192460761*omega[4.0]) : 0.0 : True
- 2.553686 : 0.0 : domegadt[2.553686] - (1.7915342612505238*omega[2.0] - 3.5806903600726603*omega[2.114208] + 0.9030388620417913*omega[2.553686] + 1.1818985880343118*omega[3.167181] - 0.43295039014156045*omega[3.72048] + 0.137169038887596*omega[4.0]) : 0.0 : True
- 3.167181 : 0.0 : domegadt[3.167181] - (-1.1720857789519332*omega[2.0] + 2.061082623121699*omega[2.114208] - 2.2480085629067506*omega[2.553686] + 0.4283826226986418*omega[3.167181] + 1.2591604746055074*omega[3.72048] - 0.3285313785671775*omega[4.0]) : 0.0 : True
- 3.72048 : 0.0 : domegadt[3.72048] - (1.141317750102841*omega[2.0] - 1.9393316098620392*omega[2.114208] + 1.6965759590324723*omega[2.553686] - 2.5941704532035765*omega[3.167181] + 0.2906165262903779*omega[3.72048] + 1.4049918276398599*omega[4.0]) : 0.0 : True
- 4.0 : 0.0 : domegadt[4.0] - (-2.4999999999999947*omega[2.0] + 4.206212111797173*omega[2.114208] - 3.4851280583284003*omega[2.553686] + 4.388557102075248*omega[3.167181] - 9.109641155544018*omega[3.72048] + 6.49999999999999*omega[4.0]) : 0.0 : True
- 4.114208 : 0.0 : domegadt[4.114208] - (-5.519339620604476*omega[4.0] + 4.377961988969178*omega[4.114208] + 1.4459713076900629*omega[4.553686] - 0.437593198100135*omega[5.167181] + 0.19985260396998084*omega[5.72048] - 0.06685308192460761*omega[6.0]) : 0.0 : True
- 4.553686 : 0.0 : domegadt[4.553686] - (1.7915342612505238*omega[4.0] - 3.5806903600726603*omega[4.114208] + 0.9030388620417913*omega[4.553686] + 1.1818985880343118*omega[5.167181] - 0.43295039014156045*omega[5.72048] + 0.137169038887596*omega[6.0]) : 0.0 : True
- 5.167181 : 0.0 : domegadt[5.167181] - (-1.1720857789519332*omega[4.0] + 2.061082623121699*omega[4.114208] - 2.2480085629067506*omega[4.553686] + 0.4283826226986418*omega[5.167181] + 1.2591604746055074*omega[5.72048] - 0.3285313785671775*omega[6.0]) : 0.0 : True
- 5.72048 : 0.0 : domegadt[5.72048] - (1.141317750102841*omega[4.0] - 1.9393316098620392*omega[4.114208] + 1.6965759590324723*omega[4.553686] - 2.5941704532035765*omega[5.167181] + 0.2906165262903779*omega[5.72048] + 1.4049918276398599*omega[6.0]) : 0.0 : True
- 6.0 : 0.0 : domegadt[6.0] - (-2.4999999999999947*omega[4.0] + 4.206212111797173*omega[4.114208] - 3.4851280583284003*omega[4.553686] + 4.388557102075248*omega[5.167181] - 9.109641155544018*omega[5.72048] + 6.49999999999999*omega[6.0]) : 0.0 : True
- 6.114208 : 0.0 : domegadt[6.114208] - (-5.519339620604476*omega[6.0] + 4.377961988969178*omega[6.114208] + 1.4459713076900629*omega[6.553686] - 0.437593198100135*omega[7.167181] + 0.19985260396998084*omega[7.72048] - 0.06685308192460761*omega[8.0]) : 0.0 : True
- 6.553686 : 0.0 : domegadt[6.553686] - (1.7915342612505238*omega[6.0] - 3.5806903600726603*omega[6.114208] + 0.9030388620417913*omega[6.553686] + 1.1818985880343118*omega[7.167181] - 0.43295039014156045*omega[7.72048] + 0.137169038887596*omega[8.0]) : 0.0 : True
- 7.167181 : 0.0 : domegadt[7.167181] - (-1.1720857789519332*omega[6.0] + 2.061082623121699*omega[6.114208] - 2.2480085629067506*omega[6.553686] + 0.4283826226986418*omega[7.167181] + 1.2591604746055074*omega[7.72048] - 0.3285313785671775*omega[8.0]) : 0.0 : True
- 7.72048 : 0.0 : domegadt[7.72048] - (1.141317750102841*omega[6.0] - 1.9393316098620392*omega[6.114208] + 1.6965759590324723*omega[6.553686] - 2.5941704532035765*omega[7.167181] + 0.2906165262903779*omega[7.72048] + 1.4049918276398599*omega[8.0]) : 0.0 : True
- 8.0 : 0.0 : domegadt[8.0] - (-2.4999999999999947*omega[6.0] + 4.206212111797173*omega[6.114208] - 3.4851280583284003*omega[6.553686] + 4.388557102075248*omega[7.167181] - 9.109641155544018*omega[7.72048] + 6.49999999999999*omega[8.0]) : 0.0 : True
- 8.114208 : 0.0 : domegadt[8.114208] - (-5.519339620604476*omega[8.0] + 4.377961988969178*omega[8.114208] + 1.4459713076900629*omega[8.553686] - 0.437593198100135*omega[9.167181] + 0.19985260396998084*omega[9.72048] - 0.06685308192460761*omega[10.0]) : 0.0 : True
- 8.553686 : 0.0 : domegadt[8.553686] - (1.7915342612505238*omega[8.0] - 3.5806903600726603*omega[8.114208] + 0.9030388620417913*omega[8.553686] + 1.1818985880343118*omega[9.167181] - 0.43295039014156045*omega[9.72048] + 0.137169038887596*omega[10.0]) : 0.0 : True
- 9.167181 : 0.0 : domegadt[9.167181] - (-1.1720857789519332*omega[8.0] + 2.061082623121699*omega[8.114208] - 2.2480085629067506*omega[8.553686] + 0.4283826226986418*omega[9.167181] + 1.2591604746055074*omega[9.72048] - 0.3285313785671775*omega[10.0]) : 0.0 : True
- 9.72048 : 0.0 : domegadt[9.72048] - (1.141317750102841*omega[8.0] - 1.9393316098620392*omega[8.114208] + 1.6965759590324723*omega[8.553686] - 2.5941704532035765*omega[9.167181] + 0.2906165262903779*omega[9.72048] + 1.4049918276398599*omega[10.0]) : 0.0 : True
- 10.0 : 0.0 : domegadt[10.0] - (-2.4999999999999947*omega[8.0] + 4.206212111797173*omega[8.114208] - 3.4851280583284003*omega[8.553686] + 4.388557102075248*omega[9.167181] - 9.109641155544018*omega[9.72048] + 6.49999999999999*omega[10.0]) : 0.0 : True
- 10.114208 : 0.0 : domegadt[10.114208] - (-5.519339620604476*omega[10.0] + 4.377961988969178*omega[10.114208] + 1.4459713076900629*omega[10.553686] - 0.437593198100135*omega[11.167181] + 0.19985260396998084*omega[11.72048] - 0.06685308192460761*omega[12.0]) : 0.0 : True
- 10.553686 : 0.0 : domegadt[10.553686] - (1.7915342612505238*omega[10.0] - 3.5806903600726603*omega[10.114208] + 0.9030388620417913*omega[10.553686] + 1.1818985880343118*omega[11.167181] - 0.43295039014156045*omega[11.72048] + 0.137169038887596*omega[12.0]) : 0.0 : True
- 11.167181 : 0.0 : domegadt[11.167181] - (-1.1720857789519332*omega[10.0] + 2.061082623121699*omega[10.114208] - 2.2480085629067506*omega[10.553686] + 0.4283826226986418*omega[11.167181] + 1.2591604746055074*omega[11.72048] - 0.3285313785671775*omega[12.0]) : 0.0 : True
- 11.72048 : 0.0 : domegadt[11.72048] - (1.141317750102841*omega[10.0] - 1.9393316098620392*omega[10.114208] + 1.6965759590324723*omega[10.553686] - 2.5941704532035765*omega[11.167181] + 0.2906165262903779*omega[11.72048] + 1.4049918276398599*omega[12.0]) : 0.0 : True
- 12.0 : 0.0 : domegadt[12.0] - (-2.4999999999999947*omega[10.0] + 4.206212111797173*omega[10.114208] - 3.4851280583284003*omega[10.553686] + 4.388557102075248*omega[11.167181] - 9.109641155544018*omega[11.72048] + 6.49999999999999*omega[12.0]) : 0.0 : True
- 12.114208 : 0.0 : domegadt[12.114208] - (-5.519339620604476*omega[12.0] + 4.377961988969178*omega[12.114208] + 1.4459713076900629*omega[12.553686] - 0.437593198100135*omega[13.167181] + 0.19985260396998084*omega[13.72048] - 0.06685308192460761*omega[14.0]) : 0.0 : True
- 12.553686 : 0.0 : domegadt[12.553686] - (1.7915342612505238*omega[12.0] - 3.5806903600726603*omega[12.114208] + 0.9030388620417913*omega[12.553686] + 1.1818985880343118*omega[13.167181] - 0.43295039014156045*omega[13.72048] + 0.137169038887596*omega[14.0]) : 0.0 : True
- 13.167181 : 0.0 : domegadt[13.167181] - (-1.1720857789519332*omega[12.0] + 2.061082623121699*omega[12.114208] - 2.2480085629067506*omega[12.553686] + 0.4283826226986418*omega[13.167181] + 1.2591604746055074*omega[13.72048] - 0.3285313785671775*omega[14.0]) : 0.0 : True
- 13.72048 : 0.0 : domegadt[13.72048] - (1.141317750102841*omega[12.0] - 1.9393316098620392*omega[12.114208] + 1.6965759590324723*omega[12.553686] - 2.5941704532035765*omega[13.167181] + 0.2906165262903779*omega[13.72048] + 1.4049918276398599*omega[14.0]) : 0.0 : True
- 14.0 : 0.0 : domegadt[14.0] - (-2.4999999999999947*omega[12.0] + 4.206212111797173*omega[12.114208] - 3.4851280583284003*omega[12.553686] + 4.388557102075248*omega[13.167181] - 9.109641155544018*omega[13.72048] + 6.49999999999999*omega[14.0]) : 0.0 : True
- 14.114208 : 0.0 : domegadt[14.114208] - (-5.519339620604476*omega[14.0] + 4.377961988969178*omega[14.114208] + 1.4459713076900629*omega[14.553686] - 0.437593198100135*omega[15.167181] + 0.19985260396998084*omega[15.72048] - 0.06685308192460761*omega[16.0]) : 0.0 : True
- 14.553686 : 0.0 : domegadt[14.553686] - (1.7915342612505238*omega[14.0] - 3.5806903600726603*omega[14.114208] + 0.9030388620417913*omega[14.553686] + 1.1818985880343118*omega[15.167181] - 0.43295039014156045*omega[15.72048] + 0.137169038887596*omega[16.0]) : 0.0 : True
- 15.167181 : 0.0 : domegadt[15.167181] - (-1.1720857789519332*omega[14.0] + 2.061082623121699*omega[14.114208] - 2.2480085629067506*omega[14.553686] + 0.4283826226986418*omega[15.167181] + 1.2591604746055074*omega[15.72048] - 0.3285313785671775*omega[16.0]) : 0.0 : True
- 15.72048 : 0.0 : domegadt[15.72048] - (1.141317750102841*omega[14.0] - 1.9393316098620392*omega[14.114208] + 1.6965759590324723*omega[14.553686] - 2.5941704532035765*omega[15.167181] + 0.2906165262903779*omega[15.72048] + 1.4049918276398599*omega[16.0]) : 0.0 : True
- 16.0 : 0.0 : domegadt[16.0] - (-2.4999999999999947*omega[14.0] + 4.206212111797173*omega[14.114208] - 3.4851280583284003*omega[14.553686] + 4.388557102075248*omega[15.167181] - 9.109641155544018*omega[15.72048] + 6.49999999999999*omega[16.0]) : 0.0 : True
- 16.114208 : 0.0 : domegadt[16.114208] - (-5.519339620604476*omega[16.0] + 4.377961988969178*omega[16.114208] + 1.4459713076900629*omega[16.553686] - 0.437593198100135*omega[17.167181] + 0.19985260396998084*omega[17.72048] - 0.06685308192460761*omega[18.0]) : 0.0 : True
- 16.553686 : 0.0 : domegadt[16.553686] - (1.7915342612505238*omega[16.0] - 3.5806903600726603*omega[16.114208] + 0.9030388620417913*omega[16.553686] + 1.1818985880343118*omega[17.167181] - 0.43295039014156045*omega[17.72048] + 0.137169038887596*omega[18.0]) : 0.0 : True
- 17.167181 : 0.0 : domegadt[17.167181] - (-1.1720857789519332*omega[16.0] + 2.061082623121699*omega[16.114208] - 2.2480085629067506*omega[16.553686] + 0.4283826226986418*omega[17.167181] + 1.2591604746055074*omega[17.72048] - 0.3285313785671775*omega[18.0]) : 0.0 : True
- 17.72048 : 0.0 : domegadt[17.72048] - (1.141317750102841*omega[16.0] - 1.9393316098620392*omega[16.114208] + 1.6965759590324723*omega[16.553686] - 2.5941704532035765*omega[17.167181] + 0.2906165262903779*omega[17.72048] + 1.4049918276398599*omega[18.0]) : 0.0 : True
- 18.0 : 0.0 : domegadt[18.0] - (-2.4999999999999947*omega[16.0] + 4.206212111797173*omega[16.114208] - 3.4851280583284003*omega[16.553686] + 4.388557102075248*omega[17.167181] - 9.109641155544018*omega[17.72048] + 6.49999999999999*omega[18.0]) : 0.0 : True
- 18.114208 : 0.0 : domegadt[18.114208] - (-5.519339620604476*omega[18.0] + 4.377961988969178*omega[18.114208] + 1.4459713076900629*omega[18.553686] - 0.437593198100135*omega[19.167181] + 0.19985260396998084*omega[19.72048] - 0.06685308192460761*omega[20.0]) : 0.0 : True
- 18.553686 : 0.0 : domegadt[18.553686] - (1.7915342612505238*omega[18.0] - 3.5806903600726603*omega[18.114208] + 0.9030388620417913*omega[18.553686] + 1.1818985880343118*omega[19.167181] - 0.43295039014156045*omega[19.72048] + 0.137169038887596*omega[20.0]) : 0.0 : True
- 19.167181 : 0.0 : domegadt[19.167181] - (-1.1720857789519332*omega[18.0] + 2.061082623121699*omega[18.114208] - 2.2480085629067506*omega[18.553686] + 0.4283826226986418*omega[19.167181] + 1.2591604746055074*omega[19.72048] - 0.3285313785671775*omega[20.0]) : 0.0 : True
- 19.72048 : 0.0 : domegadt[19.72048] - (1.141317750102841*omega[18.0] - 1.9393316098620392*omega[18.114208] + 1.6965759590324723*omega[18.553686] - 2.5941704532035765*omega[19.167181] + 0.2906165262903779*omega[19.72048] + 1.4049918276398599*omega[20.0]) : 0.0 : True
- 20.0 : 0.0 : domegadt[20.0] - (-2.4999999999999947*omega[18.0] + 4.206212111797173*omega[18.114208] - 3.4851280583284003*omega[18.553686] + 4.388557102075248*omega[19.167181] - 9.109641155544018*omega[19.72048] + 6.49999999999999*omega[20.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.114208 : 0.0 : domegadt[0.114208] - (-5.5193396206*omega[0] + 4.37796198897*omega[0.114208] + 1.44597130769*omega[0.553686] - 0.4375931981*omega[1.167181] + 0.19985260397*omega[1.72048] - 0.0668530819246*omega[2.0]) : 0.0 : True
+ 0.553686 : 0.0 : domegadt[0.553686] - (1.79153426125*omega[0] - 3.58069036007*omega[0.114208] + 0.903038862042*omega[0.553686] + 1.18189858803*omega[1.167181] - 0.432950390142*omega[1.72048] + 0.137169038888*omega[2.0]) : 0.0 : True
+ 1.167181 : 0.0 : domegadt[1.167181] - (-1.17208577895*omega[0] + 2.06108262312*omega[0.114208] - 2.24800856291*omega[0.553686] + 0.428382622699*omega[1.167181] + 1.25916047461*omega[1.72048] - 0.328531378567*omega[2.0]) : 0.0 : True
+ 1.72048 : 0.0 : domegadt[1.72048] - (1.1413177501*omega[0] - 1.93933160986*omega[0.114208] + 1.69657595903*omega[0.553686] - 2.5941704532*omega[1.167181] + 0.29061652629*omega[1.72048] + 1.40499182764*omega[2.0]) : 0.0 : True
+ 2.0 : 0.0 : domegadt[2.0] - (-2.5*omega[0] + 4.2062121118*omega[0.114208] - 3.48512805833*omega[0.553686] + 4.38855710208*omega[1.167181] - 9.10964115554*omega[1.72048] + 6.5*omega[2.0]) : 0.0 : True
+ 2.114208 : 0.0 : domegadt[2.114208] - (-5.5193396206*omega[2.0] + 4.37796198897*omega[2.114208] + 1.44597130769*omega[2.553686] - 0.4375931981*omega[3.167181] + 0.19985260397*omega[3.72048] - 0.0668530819246*omega[4.0]) : 0.0 : True
+ 2.553686 : 0.0 : domegadt[2.553686] - (1.79153426125*omega[2.0] - 3.58069036007*omega[2.114208] + 0.903038862042*omega[2.553686] + 1.18189858803*omega[3.167181] - 0.432950390142*omega[3.72048] + 0.137169038888*omega[4.0]) : 0.0 : True
+ 3.167181 : 0.0 : domegadt[3.167181] - (-1.17208577895*omega[2.0] + 2.06108262312*omega[2.114208] - 2.24800856291*omega[2.553686] + 0.428382622699*omega[3.167181] + 1.25916047461*omega[3.72048] - 0.328531378567*omega[4.0]) : 0.0 : True
+ 3.72048 : 0.0 : domegadt[3.72048] - (1.1413177501*omega[2.0] - 1.93933160986*omega[2.114208] + 1.69657595903*omega[2.553686] - 2.5941704532*omega[3.167181] + 0.29061652629*omega[3.72048] + 1.40499182764*omega[4.0]) : 0.0 : True
+ 4.0 : 0.0 : domegadt[4.0] - (-2.5*omega[2.0] + 4.2062121118*omega[2.114208] - 3.48512805833*omega[2.553686] + 4.38855710208*omega[3.167181] - 9.10964115554*omega[3.72048] + 6.5*omega[4.0]) : 0.0 : True
+ 4.114208 : 0.0 : domegadt[4.114208] - (-5.5193396206*omega[4.0] + 4.37796198897*omega[4.114208] + 1.44597130769*omega[4.553686] - 0.4375931981*omega[5.167181] + 0.19985260397*omega[5.72048] - 0.0668530819246*omega[6.0]) : 0.0 : True
+ 4.553686 : 0.0 : domegadt[4.553686] - (1.79153426125*omega[4.0] - 3.58069036007*omega[4.114208] + 0.903038862042*omega[4.553686] + 1.18189858803*omega[5.167181] - 0.432950390142*omega[5.72048] + 0.137169038888*omega[6.0]) : 0.0 : True
+ 5.167181 : 0.0 : domegadt[5.167181] - (-1.17208577895*omega[4.0] + 2.06108262312*omega[4.114208] - 2.24800856291*omega[4.553686] + 0.428382622699*omega[5.167181] + 1.25916047461*omega[5.72048] - 0.328531378567*omega[6.0]) : 0.0 : True
+ 5.72048 : 0.0 : domegadt[5.72048] - (1.1413177501*omega[4.0] - 1.93933160986*omega[4.114208] + 1.69657595903*omega[4.553686] - 2.5941704532*omega[5.167181] + 0.29061652629*omega[5.72048] + 1.40499182764*omega[6.0]) : 0.0 : True
+ 6.0 : 0.0 : domegadt[6.0] - (-2.5*omega[4.0] + 4.2062121118*omega[4.114208] - 3.48512805833*omega[4.553686] + 4.38855710208*omega[5.167181] - 9.10964115554*omega[5.72048] + 6.5*omega[6.0]) : 0.0 : True
+ 6.114208 : 0.0 : domegadt[6.114208] - (-5.5193396206*omega[6.0] + 4.37796198897*omega[6.114208] + 1.44597130769*omega[6.553686] - 0.4375931981*omega[7.167181] + 0.19985260397*omega[7.72048] - 0.0668530819246*omega[8.0]) : 0.0 : True
+ 6.553686 : 0.0 : domegadt[6.553686] - (1.79153426125*omega[6.0] - 3.58069036007*omega[6.114208] + 0.903038862042*omega[6.553686] + 1.18189858803*omega[7.167181] - 0.432950390142*omega[7.72048] + 0.137169038888*omega[8.0]) : 0.0 : True
+ 7.167181 : 0.0 : domegadt[7.167181] - (-1.17208577895*omega[6.0] + 2.06108262312*omega[6.114208] - 2.24800856291*omega[6.553686] + 0.428382622699*omega[7.167181] + 1.25916047461*omega[7.72048] - 0.328531378567*omega[8.0]) : 0.0 : True
+ 7.72048 : 0.0 : domegadt[7.72048] - (1.1413177501*omega[6.0] - 1.93933160986*omega[6.114208] + 1.69657595903*omega[6.553686] - 2.5941704532*omega[7.167181] + 0.29061652629*omega[7.72048] + 1.40499182764*omega[8.0]) : 0.0 : True
+ 8.0 : 0.0 : domegadt[8.0] - (-2.5*omega[6.0] + 4.2062121118*omega[6.114208] - 3.48512805833*omega[6.553686] + 4.38855710208*omega[7.167181] - 9.10964115554*omega[7.72048] + 6.5*omega[8.0]) : 0.0 : True
+ 8.114208 : 0.0 : domegadt[8.114208] - (-5.5193396206*omega[8.0] + 4.37796198897*omega[8.114208] + 1.44597130769*omega[8.553686] - 0.4375931981*omega[9.167181] + 0.19985260397*omega[9.72048] - 0.0668530819246*omega[10.0]) : 0.0 : True
+ 8.553686 : 0.0 : domegadt[8.553686] - (1.79153426125*omega[8.0] - 3.58069036007*omega[8.114208] + 0.903038862042*omega[8.553686] + 1.18189858803*omega[9.167181] - 0.432950390142*omega[9.72048] + 0.137169038888*omega[10.0]) : 0.0 : True
+ 9.167181 : 0.0 : domegadt[9.167181] - (-1.17208577895*omega[8.0] + 2.06108262312*omega[8.114208] - 2.24800856291*omega[8.553686] + 0.428382622699*omega[9.167181] + 1.25916047461*omega[9.72048] - 0.328531378567*omega[10.0]) : 0.0 : True
+ 9.72048 : 0.0 : domegadt[9.72048] - (1.1413177501*omega[8.0] - 1.93933160986*omega[8.114208] + 1.69657595903*omega[8.553686] - 2.5941704532*omega[9.167181] + 0.29061652629*omega[9.72048] + 1.40499182764*omega[10.0]) : 0.0 : True
+ 10.0 : 0.0 : domegadt[10.0] - (-2.5*omega[8.0] + 4.2062121118*omega[8.114208] - 3.48512805833*omega[8.553686] + 4.38855710208*omega[9.167181] - 9.10964115554*omega[9.72048] + 6.5*omega[10.0]) : 0.0 : True
+ 10.114208 : 0.0 : domegadt[10.114208] - (-5.5193396206*omega[10.0] + 4.37796198897*omega[10.114208] + 1.44597130769*omega[10.553686] - 0.4375931981*omega[11.167181] + 0.19985260397*omega[11.72048] - 0.0668530819246*omega[12.0]) : 0.0 : True
+ 10.553686 : 0.0 : domegadt[10.553686] - (1.79153426125*omega[10.0] - 3.58069036007*omega[10.114208] + 0.903038862042*omega[10.553686] + 1.18189858803*omega[11.167181] - 0.432950390142*omega[11.72048] + 0.137169038888*omega[12.0]) : 0.0 : True
+ 11.167181 : 0.0 : domegadt[11.167181] - (-1.17208577895*omega[10.0] + 2.06108262312*omega[10.114208] - 2.24800856291*omega[10.553686] + 0.428382622699*omega[11.167181] + 1.25916047461*omega[11.72048] - 0.328531378567*omega[12.0]) : 0.0 : True
+ 11.72048 : 0.0 : domegadt[11.72048] - (1.1413177501*omega[10.0] - 1.93933160986*omega[10.114208] + 1.69657595903*omega[10.553686] - 2.5941704532*omega[11.167181] + 0.29061652629*omega[11.72048] + 1.40499182764*omega[12.0]) : 0.0 : True
+ 12.0 : 0.0 : domegadt[12.0] - (-2.5*omega[10.0] + 4.2062121118*omega[10.114208] - 3.48512805833*omega[10.553686] + 4.38855710208*omega[11.167181] - 9.10964115554*omega[11.72048] + 6.5*omega[12.0]) : 0.0 : True
+ 12.114208 : 0.0 : domegadt[12.114208] - (-5.5193396206*omega[12.0] + 4.37796198897*omega[12.114208] + 1.44597130769*omega[12.553686] - 0.4375931981*omega[13.167181] + 0.19985260397*omega[13.72048] - 0.0668530819246*omega[14.0]) : 0.0 : True
+ 12.553686 : 0.0 : domegadt[12.553686] - (1.79153426125*omega[12.0] - 3.58069036007*omega[12.114208] + 0.903038862042*omega[12.553686] + 1.18189858803*omega[13.167181] - 0.432950390142*omega[13.72048] + 0.137169038888*omega[14.0]) : 0.0 : True
+ 13.167181 : 0.0 : domegadt[13.167181] - (-1.17208577895*omega[12.0] + 2.06108262312*omega[12.114208] - 2.24800856291*omega[12.553686] + 0.428382622699*omega[13.167181] + 1.25916047461*omega[13.72048] - 0.328531378567*omega[14.0]) : 0.0 : True
+ 13.72048 : 0.0 : domegadt[13.72048] - (1.1413177501*omega[12.0] - 1.93933160986*omega[12.114208] + 1.69657595903*omega[12.553686] - 2.5941704532*omega[13.167181] + 0.29061652629*omega[13.72048] + 1.40499182764*omega[14.0]) : 0.0 : True
+ 14.0 : 0.0 : domegadt[14.0] - (-2.5*omega[12.0] + 4.2062121118*omega[12.114208] - 3.48512805833*omega[12.553686] + 4.38855710208*omega[13.167181] - 9.10964115554*omega[13.72048] + 6.5*omega[14.0]) : 0.0 : True
+ 14.114208 : 0.0 : domegadt[14.114208] - (-5.5193396206*omega[14.0] + 4.37796198897*omega[14.114208] + 1.44597130769*omega[14.553686] - 0.4375931981*omega[15.167181] + 0.19985260397*omega[15.72048] - 0.0668530819246*omega[16.0]) : 0.0 : True
+ 14.553686 : 0.0 : domegadt[14.553686] - (1.79153426125*omega[14.0] - 3.58069036007*omega[14.114208] + 0.903038862042*omega[14.553686] + 1.18189858803*omega[15.167181] - 0.432950390142*omega[15.72048] + 0.137169038888*omega[16.0]) : 0.0 : True
+ 15.167181 : 0.0 : domegadt[15.167181] - (-1.17208577895*omega[14.0] + 2.06108262312*omega[14.114208] - 2.24800856291*omega[14.553686] + 0.428382622699*omega[15.167181] + 1.25916047461*omega[15.72048] - 0.328531378567*omega[16.0]) : 0.0 : True
+ 15.72048 : 0.0 : domegadt[15.72048] - (1.1413177501*omega[14.0] - 1.93933160986*omega[14.114208] + 1.69657595903*omega[14.553686] - 2.5941704532*omega[15.167181] + 0.29061652629*omega[15.72048] + 1.40499182764*omega[16.0]) : 0.0 : True
+ 16.0 : 0.0 : domegadt[16.0] - (-2.5*omega[14.0] + 4.2062121118*omega[14.114208] - 3.48512805833*omega[14.553686] + 4.38855710208*omega[15.167181] - 9.10964115554*omega[15.72048] + 6.5*omega[16.0]) : 0.0 : True
+ 16.114208 : 0.0 : domegadt[16.114208] - (-5.5193396206*omega[16.0] + 4.37796198897*omega[16.114208] + 1.44597130769*omega[16.553686] - 0.4375931981*omega[17.167181] + 0.19985260397*omega[17.72048] - 0.0668530819246*omega[18.0]) : 0.0 : True
+ 16.553686 : 0.0 : domegadt[16.553686] - (1.79153426125*omega[16.0] - 3.58069036007*omega[16.114208] + 0.903038862042*omega[16.553686] + 1.18189858803*omega[17.167181] - 0.432950390142*omega[17.72048] + 0.137169038888*omega[18.0]) : 0.0 : True
+ 17.167181 : 0.0 : domegadt[17.167181] - (-1.17208577895*omega[16.0] + 2.06108262312*omega[16.114208] - 2.24800856291*omega[16.553686] + 0.428382622699*omega[17.167181] + 1.25916047461*omega[17.72048] - 0.328531378567*omega[18.0]) : 0.0 : True
+ 17.72048 : 0.0 : domegadt[17.72048] - (1.1413177501*omega[16.0] - 1.93933160986*omega[16.114208] + 1.69657595903*omega[16.553686] - 2.5941704532*omega[17.167181] + 0.29061652629*omega[17.72048] + 1.40499182764*omega[18.0]) : 0.0 : True
+ 18.0 : 0.0 : domegadt[18.0] - (-2.5*omega[16.0] + 4.2062121118*omega[16.114208] - 3.48512805833*omega[16.553686] + 4.38855710208*omega[17.167181] - 9.10964115554*omega[17.72048] + 6.5*omega[18.0]) : 0.0 : True
+ 18.114208 : 0.0 : domegadt[18.114208] - (-5.5193396206*omega[18.0] + 4.37796198897*omega[18.114208] + 1.44597130769*omega[18.553686] - 0.4375931981*omega[19.167181] + 0.19985260397*omega[19.72048] - 0.0668530819246*omega[20]) : 0.0 : True
+ 18.553686 : 0.0 : domegadt[18.553686] - (1.79153426125*omega[18.0] - 3.58069036007*omega[18.114208] + 0.903038862042*omega[18.553686] + 1.18189858803*omega[19.167181] - 0.432950390142*omega[19.72048] + 0.137169038888*omega[20]) : 0.0 : True
+ 19.167181 : 0.0 : domegadt[19.167181] - (-1.17208577895*omega[18.0] + 2.06108262312*omega[18.114208] - 2.24800856291*omega[18.553686] + 0.428382622699*omega[19.167181] + 1.25916047461*omega[19.72048] - 0.328531378567*omega[20]) : 0.0 : True
+ 19.72048 : 0.0 : domegadt[19.72048] - (1.1413177501*omega[18.0] - 1.93933160986*omega[18.114208] + 1.69657595903*omega[18.553686] - 2.5941704532*omega[19.167181] + 0.29061652629*omega[19.72048] + 1.40499182764*omega[20]) : 0.0 : True
+ 20 : 0.0 : domegadt[20] - (-2.5*omega[18.0] + 4.2062121118*omega[18.114208] - 3.48512805833*omega[18.553686] + 4.38855710208*omega[19.167181] - 9.10964115554*omega[19.72048] + 6.5*omega[20]) : 0.0 : True
dthetadt_disc_eq : Size=50, Index=t, Active=True
- Key : Lower : Body : Upper : Active
- 0.114208 : 0.0 : dthetadt[0.114208] - (-5.519339620604476*theta[0.0] + 4.377961988969178*theta[0.114208] + 1.4459713076900629*theta[0.553686] - 0.437593198100135*theta[1.167181] + 0.19985260396998084*theta[1.72048] - 0.06685308192460761*theta[2.0]) : 0.0 : True
- 0.553686 : 0.0 : dthetadt[0.553686] - (1.7915342612505238*theta[0.0] - 3.5806903600726603*theta[0.114208] + 0.9030388620417913*theta[0.553686] + 1.1818985880343118*theta[1.167181] - 0.43295039014156045*theta[1.72048] + 0.137169038887596*theta[2.0]) : 0.0 : True
- 1.167181 : 0.0 : dthetadt[1.167181] - (-1.1720857789519332*theta[0.0] + 2.061082623121699*theta[0.114208] - 2.2480085629067506*theta[0.553686] + 0.4283826226986418*theta[1.167181] + 1.2591604746055074*theta[1.72048] - 0.3285313785671775*theta[2.0]) : 0.0 : True
- 1.72048 : 0.0 : dthetadt[1.72048] - (1.141317750102841*theta[0.0] - 1.9393316098620392*theta[0.114208] + 1.6965759590324723*theta[0.553686] - 2.5941704532035765*theta[1.167181] + 0.2906165262903779*theta[1.72048] + 1.4049918276398599*theta[2.0]) : 0.0 : True
- 2.0 : 0.0 : dthetadt[2.0] - (-2.4999999999999947*theta[0.0] + 4.206212111797173*theta[0.114208] - 3.4851280583284003*theta[0.553686] + 4.388557102075248*theta[1.167181] - 9.109641155544018*theta[1.72048] + 6.49999999999999*theta[2.0]) : 0.0 : True
- 2.114208 : 0.0 : dthetadt[2.114208] - (-5.519339620604476*theta[2.0] + 4.377961988969178*theta[2.114208] + 1.4459713076900629*theta[2.553686] - 0.437593198100135*theta[3.167181] + 0.19985260396998084*theta[3.72048] - 0.06685308192460761*theta[4.0]) : 0.0 : True
- 2.553686 : 0.0 : dthetadt[2.553686] - (1.7915342612505238*theta[2.0] - 3.5806903600726603*theta[2.114208] + 0.9030388620417913*theta[2.553686] + 1.1818985880343118*theta[3.167181] - 0.43295039014156045*theta[3.72048] + 0.137169038887596*theta[4.0]) : 0.0 : True
- 3.167181 : 0.0 : dthetadt[3.167181] - (-1.1720857789519332*theta[2.0] + 2.061082623121699*theta[2.114208] - 2.2480085629067506*theta[2.553686] + 0.4283826226986418*theta[3.167181] + 1.2591604746055074*theta[3.72048] - 0.3285313785671775*theta[4.0]) : 0.0 : True
- 3.72048 : 0.0 : dthetadt[3.72048] - (1.141317750102841*theta[2.0] - 1.9393316098620392*theta[2.114208] + 1.6965759590324723*theta[2.553686] - 2.5941704532035765*theta[3.167181] + 0.2906165262903779*theta[3.72048] + 1.4049918276398599*theta[4.0]) : 0.0 : True
- 4.0 : 0.0 : dthetadt[4.0] - (-2.4999999999999947*theta[2.0] + 4.206212111797173*theta[2.114208] - 3.4851280583284003*theta[2.553686] + 4.388557102075248*theta[3.167181] - 9.109641155544018*theta[3.72048] + 6.49999999999999*theta[4.0]) : 0.0 : True
- 4.114208 : 0.0 : dthetadt[4.114208] - (-5.519339620604476*theta[4.0] + 4.377961988969178*theta[4.114208] + 1.4459713076900629*theta[4.553686] - 0.437593198100135*theta[5.167181] + 0.19985260396998084*theta[5.72048] - 0.06685308192460761*theta[6.0]) : 0.0 : True
- 4.553686 : 0.0 : dthetadt[4.553686] - (1.7915342612505238*theta[4.0] - 3.5806903600726603*theta[4.114208] + 0.9030388620417913*theta[4.553686] + 1.1818985880343118*theta[5.167181] - 0.43295039014156045*theta[5.72048] + 0.137169038887596*theta[6.0]) : 0.0 : True
- 5.167181 : 0.0 : dthetadt[5.167181] - (-1.1720857789519332*theta[4.0] + 2.061082623121699*theta[4.114208] - 2.2480085629067506*theta[4.553686] + 0.4283826226986418*theta[5.167181] + 1.2591604746055074*theta[5.72048] - 0.3285313785671775*theta[6.0]) : 0.0 : True
- 5.72048 : 0.0 : dthetadt[5.72048] - (1.141317750102841*theta[4.0] - 1.9393316098620392*theta[4.114208] + 1.6965759590324723*theta[4.553686] - 2.5941704532035765*theta[5.167181] + 0.2906165262903779*theta[5.72048] + 1.4049918276398599*theta[6.0]) : 0.0 : True
- 6.0 : 0.0 : dthetadt[6.0] - (-2.4999999999999947*theta[4.0] + 4.206212111797173*theta[4.114208] - 3.4851280583284003*theta[4.553686] + 4.388557102075248*theta[5.167181] - 9.109641155544018*theta[5.72048] + 6.49999999999999*theta[6.0]) : 0.0 : True
- 6.114208 : 0.0 : dthetadt[6.114208] - (-5.519339620604476*theta[6.0] + 4.377961988969178*theta[6.114208] + 1.4459713076900629*theta[6.553686] - 0.437593198100135*theta[7.167181] + 0.19985260396998084*theta[7.72048] - 0.06685308192460761*theta[8.0]) : 0.0 : True
- 6.553686 : 0.0 : dthetadt[6.553686] - (1.7915342612505238*theta[6.0] - 3.5806903600726603*theta[6.114208] + 0.9030388620417913*theta[6.553686] + 1.1818985880343118*theta[7.167181] - 0.43295039014156045*theta[7.72048] + 0.137169038887596*theta[8.0]) : 0.0 : True
- 7.167181 : 0.0 : dthetadt[7.167181] - (-1.1720857789519332*theta[6.0] + 2.061082623121699*theta[6.114208] - 2.2480085629067506*theta[6.553686] + 0.4283826226986418*theta[7.167181] + 1.2591604746055074*theta[7.72048] - 0.3285313785671775*theta[8.0]) : 0.0 : True
- 7.72048 : 0.0 : dthetadt[7.72048] - (1.141317750102841*theta[6.0] - 1.9393316098620392*theta[6.114208] + 1.6965759590324723*theta[6.553686] - 2.5941704532035765*theta[7.167181] + 0.2906165262903779*theta[7.72048] + 1.4049918276398599*theta[8.0]) : 0.0 : True
- 8.0 : 0.0 : dthetadt[8.0] - (-2.4999999999999947*theta[6.0] + 4.206212111797173*theta[6.114208] - 3.4851280583284003*theta[6.553686] + 4.388557102075248*theta[7.167181] - 9.109641155544018*theta[7.72048] + 6.49999999999999*theta[8.0]) : 0.0 : True
- 8.114208 : 0.0 : dthetadt[8.114208] - (-5.519339620604476*theta[8.0] + 4.377961988969178*theta[8.114208] + 1.4459713076900629*theta[8.553686] - 0.437593198100135*theta[9.167181] + 0.19985260396998084*theta[9.72048] - 0.06685308192460761*theta[10.0]) : 0.0 : True
- 8.553686 : 0.0 : dthetadt[8.553686] - (1.7915342612505238*theta[8.0] - 3.5806903600726603*theta[8.114208] + 0.9030388620417913*theta[8.553686] + 1.1818985880343118*theta[9.167181] - 0.43295039014156045*theta[9.72048] + 0.137169038887596*theta[10.0]) : 0.0 : True
- 9.167181 : 0.0 : dthetadt[9.167181] - (-1.1720857789519332*theta[8.0] + 2.061082623121699*theta[8.114208] - 2.2480085629067506*theta[8.553686] + 0.4283826226986418*theta[9.167181] + 1.2591604746055074*theta[9.72048] - 0.3285313785671775*theta[10.0]) : 0.0 : True
- 9.72048 : 0.0 : dthetadt[9.72048] - (1.141317750102841*theta[8.0] - 1.9393316098620392*theta[8.114208] + 1.6965759590324723*theta[8.553686] - 2.5941704532035765*theta[9.167181] + 0.2906165262903779*theta[9.72048] + 1.4049918276398599*theta[10.0]) : 0.0 : True
- 10.0 : 0.0 : dthetadt[10.0] - (-2.4999999999999947*theta[8.0] + 4.206212111797173*theta[8.114208] - 3.4851280583284003*theta[8.553686] + 4.388557102075248*theta[9.167181] - 9.109641155544018*theta[9.72048] + 6.49999999999999*theta[10.0]) : 0.0 : True
- 10.114208 : 0.0 : dthetadt[10.114208] - (-5.519339620604476*theta[10.0] + 4.377961988969178*theta[10.114208] + 1.4459713076900629*theta[10.553686] - 0.437593198100135*theta[11.167181] + 0.19985260396998084*theta[11.72048] - 0.06685308192460761*theta[12.0]) : 0.0 : True
- 10.553686 : 0.0 : dthetadt[10.553686] - (1.7915342612505238*theta[10.0] - 3.5806903600726603*theta[10.114208] + 0.9030388620417913*theta[10.553686] + 1.1818985880343118*theta[11.167181] - 0.43295039014156045*theta[11.72048] + 0.137169038887596*theta[12.0]) : 0.0 : True
- 11.167181 : 0.0 : dthetadt[11.167181] - (-1.1720857789519332*theta[10.0] + 2.061082623121699*theta[10.114208] - 2.2480085629067506*theta[10.553686] + 0.4283826226986418*theta[11.167181] + 1.2591604746055074*theta[11.72048] - 0.3285313785671775*theta[12.0]) : 0.0 : True
- 11.72048 : 0.0 : dthetadt[11.72048] - (1.141317750102841*theta[10.0] - 1.9393316098620392*theta[10.114208] + 1.6965759590324723*theta[10.553686] - 2.5941704532035765*theta[11.167181] + 0.2906165262903779*theta[11.72048] + 1.4049918276398599*theta[12.0]) : 0.0 : True
- 12.0 : 0.0 : dthetadt[12.0] - (-2.4999999999999947*theta[10.0] + 4.206212111797173*theta[10.114208] - 3.4851280583284003*theta[10.553686] + 4.388557102075248*theta[11.167181] - 9.109641155544018*theta[11.72048] + 6.49999999999999*theta[12.0]) : 0.0 : True
- 12.114208 : 0.0 : dthetadt[12.114208] - (-5.519339620604476*theta[12.0] + 4.377961988969178*theta[12.114208] + 1.4459713076900629*theta[12.553686] - 0.437593198100135*theta[13.167181] + 0.19985260396998084*theta[13.72048] - 0.06685308192460761*theta[14.0]) : 0.0 : True
- 12.553686 : 0.0 : dthetadt[12.553686] - (1.7915342612505238*theta[12.0] - 3.5806903600726603*theta[12.114208] + 0.9030388620417913*theta[12.553686] + 1.1818985880343118*theta[13.167181] - 0.43295039014156045*theta[13.72048] + 0.137169038887596*theta[14.0]) : 0.0 : True
- 13.167181 : 0.0 : dthetadt[13.167181] - (-1.1720857789519332*theta[12.0] + 2.061082623121699*theta[12.114208] - 2.2480085629067506*theta[12.553686] + 0.4283826226986418*theta[13.167181] + 1.2591604746055074*theta[13.72048] - 0.3285313785671775*theta[14.0]) : 0.0 : True
- 13.72048 : 0.0 : dthetadt[13.72048] - (1.141317750102841*theta[12.0] - 1.9393316098620392*theta[12.114208] + 1.6965759590324723*theta[12.553686] - 2.5941704532035765*theta[13.167181] + 0.2906165262903779*theta[13.72048] + 1.4049918276398599*theta[14.0]) : 0.0 : True
- 14.0 : 0.0 : dthetadt[14.0] - (-2.4999999999999947*theta[12.0] + 4.206212111797173*theta[12.114208] - 3.4851280583284003*theta[12.553686] + 4.388557102075248*theta[13.167181] - 9.109641155544018*theta[13.72048] + 6.49999999999999*theta[14.0]) : 0.0 : True
- 14.114208 : 0.0 : dthetadt[14.114208] - (-5.519339620604476*theta[14.0] + 4.377961988969178*theta[14.114208] + 1.4459713076900629*theta[14.553686] - 0.437593198100135*theta[15.167181] + 0.19985260396998084*theta[15.72048] - 0.06685308192460761*theta[16.0]) : 0.0 : True
- 14.553686 : 0.0 : dthetadt[14.553686] - (1.7915342612505238*theta[14.0] - 3.5806903600726603*theta[14.114208] + 0.9030388620417913*theta[14.553686] + 1.1818985880343118*theta[15.167181] - 0.43295039014156045*theta[15.72048] + 0.137169038887596*theta[16.0]) : 0.0 : True
- 15.167181 : 0.0 : dthetadt[15.167181] - (-1.1720857789519332*theta[14.0] + 2.061082623121699*theta[14.114208] - 2.2480085629067506*theta[14.553686] + 0.4283826226986418*theta[15.167181] + 1.2591604746055074*theta[15.72048] - 0.3285313785671775*theta[16.0]) : 0.0 : True
- 15.72048 : 0.0 : dthetadt[15.72048] - (1.141317750102841*theta[14.0] - 1.9393316098620392*theta[14.114208] + 1.6965759590324723*theta[14.553686] - 2.5941704532035765*theta[15.167181] + 0.2906165262903779*theta[15.72048] + 1.4049918276398599*theta[16.0]) : 0.0 : True
- 16.0 : 0.0 : dthetadt[16.0] - (-2.4999999999999947*theta[14.0] + 4.206212111797173*theta[14.114208] - 3.4851280583284003*theta[14.553686] + 4.388557102075248*theta[15.167181] - 9.109641155544018*theta[15.72048] + 6.49999999999999*theta[16.0]) : 0.0 : True
- 16.114208 : 0.0 : dthetadt[16.114208] - (-5.519339620604476*theta[16.0] + 4.377961988969178*theta[16.114208] + 1.4459713076900629*theta[16.553686] - 0.437593198100135*theta[17.167181] + 0.19985260396998084*theta[17.72048] - 0.06685308192460761*theta[18.0]) : 0.0 : True
- 16.553686 : 0.0 : dthetadt[16.553686] - (1.7915342612505238*theta[16.0] - 3.5806903600726603*theta[16.114208] + 0.9030388620417913*theta[16.553686] + 1.1818985880343118*theta[17.167181] - 0.43295039014156045*theta[17.72048] + 0.137169038887596*theta[18.0]) : 0.0 : True
- 17.167181 : 0.0 : dthetadt[17.167181] - (-1.1720857789519332*theta[16.0] + 2.061082623121699*theta[16.114208] - 2.2480085629067506*theta[16.553686] + 0.4283826226986418*theta[17.167181] + 1.2591604746055074*theta[17.72048] - 0.3285313785671775*theta[18.0]) : 0.0 : True
- 17.72048 : 0.0 : dthetadt[17.72048] - (1.141317750102841*theta[16.0] - 1.9393316098620392*theta[16.114208] + 1.6965759590324723*theta[16.553686] - 2.5941704532035765*theta[17.167181] + 0.2906165262903779*theta[17.72048] + 1.4049918276398599*theta[18.0]) : 0.0 : True
- 18.0 : 0.0 : dthetadt[18.0] - (-2.4999999999999947*theta[16.0] + 4.206212111797173*theta[16.114208] - 3.4851280583284003*theta[16.553686] + 4.388557102075248*theta[17.167181] - 9.109641155544018*theta[17.72048] + 6.49999999999999*theta[18.0]) : 0.0 : True
- 18.114208 : 0.0 : dthetadt[18.114208] - (-5.519339620604476*theta[18.0] + 4.377961988969178*theta[18.114208] + 1.4459713076900629*theta[18.553686] - 0.437593198100135*theta[19.167181] + 0.19985260396998084*theta[19.72048] - 0.06685308192460761*theta[20.0]) : 0.0 : True
- 18.553686 : 0.0 : dthetadt[18.553686] - (1.7915342612505238*theta[18.0] - 3.5806903600726603*theta[18.114208] + 0.9030388620417913*theta[18.553686] + 1.1818985880343118*theta[19.167181] - 0.43295039014156045*theta[19.72048] + 0.137169038887596*theta[20.0]) : 0.0 : True
- 19.167181 : 0.0 : dthetadt[19.167181] - (-1.1720857789519332*theta[18.0] + 2.061082623121699*theta[18.114208] - 2.2480085629067506*theta[18.553686] + 0.4283826226986418*theta[19.167181] + 1.2591604746055074*theta[19.72048] - 0.3285313785671775*theta[20.0]) : 0.0 : True
- 19.72048 : 0.0 : dthetadt[19.72048] - (1.141317750102841*theta[18.0] - 1.9393316098620392*theta[18.114208] + 1.6965759590324723*theta[18.553686] - 2.5941704532035765*theta[19.167181] + 0.2906165262903779*theta[19.72048] + 1.4049918276398599*theta[20.0]) : 0.0 : True
- 20.0 : 0.0 : dthetadt[20.0] - (-2.4999999999999947*theta[18.0] + 4.206212111797173*theta[18.114208] - 3.4851280583284003*theta[18.553686] + 4.388557102075248*theta[19.167181] - 9.109641155544018*theta[19.72048] + 6.49999999999999*theta[20.0]) : 0.0 : True
+ Key : Lower : Body : Upper : Active
+ 0.114208 : 0.0 : dthetadt[0.114208] - (-5.5193396206*theta[0] + 4.37796198897*theta[0.114208] + 1.44597130769*theta[0.553686] - 0.4375931981*theta[1.167181] + 0.19985260397*theta[1.72048] - 0.0668530819246*theta[2.0]) : 0.0 : True
+ 0.553686 : 0.0 : dthetadt[0.553686] - (1.79153426125*theta[0] - 3.58069036007*theta[0.114208] + 0.903038862042*theta[0.553686] + 1.18189858803*theta[1.167181] - 0.432950390142*theta[1.72048] + 0.137169038888*theta[2.0]) : 0.0 : True
+ 1.167181 : 0.0 : dthetadt[1.167181] - (-1.17208577895*theta[0] + 2.06108262312*theta[0.114208] - 2.24800856291*theta[0.553686] + 0.428382622699*theta[1.167181] + 1.25916047461*theta[1.72048] - 0.328531378567*theta[2.0]) : 0.0 : True
+ 1.72048 : 0.0 : dthetadt[1.72048] - (1.1413177501*theta[0] - 1.93933160986*theta[0.114208] + 1.69657595903*theta[0.553686] - 2.5941704532*theta[1.167181] + 0.29061652629*theta[1.72048] + 1.40499182764*theta[2.0]) : 0.0 : True
+ 2.0 : 0.0 : dthetadt[2.0] - (-2.5*theta[0] + 4.2062121118*theta[0.114208] - 3.48512805833*theta[0.553686] + 4.38855710208*theta[1.167181] - 9.10964115554*theta[1.72048] + 6.5*theta[2.0]) : 0.0 : True
+ 2.114208 : 0.0 : dthetadt[2.114208] - (-5.5193396206*theta[2.0] + 4.37796198897*theta[2.114208] + 1.44597130769*theta[2.553686] - 0.4375931981*theta[3.167181] + 0.19985260397*theta[3.72048] - 0.0668530819246*theta[4.0]) : 0.0 : True
+ 2.553686 : 0.0 : dthetadt[2.553686] - (1.79153426125*theta[2.0] - 3.58069036007*theta[2.114208] + 0.903038862042*theta[2.553686] + 1.18189858803*theta[3.167181] - 0.432950390142*theta[3.72048] + 0.137169038888*theta[4.0]) : 0.0 : True
+ 3.167181 : 0.0 : dthetadt[3.167181] - (-1.17208577895*theta[2.0] + 2.06108262312*theta[2.114208] - 2.24800856291*theta[2.553686] + 0.428382622699*theta[3.167181] + 1.25916047461*theta[3.72048] - 0.328531378567*theta[4.0]) : 0.0 : True
+ 3.72048 : 0.0 : dthetadt[3.72048] - (1.1413177501*theta[2.0] - 1.93933160986*theta[2.114208] + 1.69657595903*theta[2.553686] - 2.5941704532*theta[3.167181] + 0.29061652629*theta[3.72048] + 1.40499182764*theta[4.0]) : 0.0 : True
+ 4.0 : 0.0 : dthetadt[4.0] - (-2.5*theta[2.0] + 4.2062121118*theta[2.114208] - 3.48512805833*theta[2.553686] + 4.38855710208*theta[3.167181] - 9.10964115554*theta[3.72048] + 6.5*theta[4.0]) : 0.0 : True
+ 4.114208 : 0.0 : dthetadt[4.114208] - (-5.5193396206*theta[4.0] + 4.37796198897*theta[4.114208] + 1.44597130769*theta[4.553686] - 0.4375931981*theta[5.167181] + 0.19985260397*theta[5.72048] - 0.0668530819246*theta[6.0]) : 0.0 : True
+ 4.553686 : 0.0 : dthetadt[4.553686] - (1.79153426125*theta[4.0] - 3.58069036007*theta[4.114208] + 0.903038862042*theta[4.553686] + 1.18189858803*theta[5.167181] - 0.432950390142*theta[5.72048] + 0.137169038888*theta[6.0]) : 0.0 : True
+ 5.167181 : 0.0 : dthetadt[5.167181] - (-1.17208577895*theta[4.0] + 2.06108262312*theta[4.114208] - 2.24800856291*theta[4.553686] + 0.428382622699*theta[5.167181] + 1.25916047461*theta[5.72048] - 0.328531378567*theta[6.0]) : 0.0 : True
+ 5.72048 : 0.0 : dthetadt[5.72048] - (1.1413177501*theta[4.0] - 1.93933160986*theta[4.114208] + 1.69657595903*theta[4.553686] - 2.5941704532*theta[5.167181] + 0.29061652629*theta[5.72048] + 1.40499182764*theta[6.0]) : 0.0 : True
+ 6.0 : 0.0 : dthetadt[6.0] - (-2.5*theta[4.0] + 4.2062121118*theta[4.114208] - 3.48512805833*theta[4.553686] + 4.38855710208*theta[5.167181] - 9.10964115554*theta[5.72048] + 6.5*theta[6.0]) : 0.0 : True
+ 6.114208 : 0.0 : dthetadt[6.114208] - (-5.5193396206*theta[6.0] + 4.37796198897*theta[6.114208] + 1.44597130769*theta[6.553686] - 0.4375931981*theta[7.167181] + 0.19985260397*theta[7.72048] - 0.0668530819246*theta[8.0]) : 0.0 : True
+ 6.553686 : 0.0 : dthetadt[6.553686] - (1.79153426125*theta[6.0] - 3.58069036007*theta[6.114208] + 0.903038862042*theta[6.553686] + 1.18189858803*theta[7.167181] - 0.432950390142*theta[7.72048] + 0.137169038888*theta[8.0]) : 0.0 : True
+ 7.167181 : 0.0 : dthetadt[7.167181] - (-1.17208577895*theta[6.0] + 2.06108262312*theta[6.114208] - 2.24800856291*theta[6.553686] + 0.428382622699*theta[7.167181] + 1.25916047461*theta[7.72048] - 0.328531378567*theta[8.0]) : 0.0 : True
+ 7.72048 : 0.0 : dthetadt[7.72048] - (1.1413177501*theta[6.0] - 1.93933160986*theta[6.114208] + 1.69657595903*theta[6.553686] - 2.5941704532*theta[7.167181] + 0.29061652629*theta[7.72048] + 1.40499182764*theta[8.0]) : 0.0 : True
+ 8.0 : 0.0 : dthetadt[8.0] - (-2.5*theta[6.0] + 4.2062121118*theta[6.114208] - 3.48512805833*theta[6.553686] + 4.38855710208*theta[7.167181] - 9.10964115554*theta[7.72048] + 6.5*theta[8.0]) : 0.0 : True
+ 8.114208 : 0.0 : dthetadt[8.114208] - (-5.5193396206*theta[8.0] + 4.37796198897*theta[8.114208] + 1.44597130769*theta[8.553686] - 0.4375931981*theta[9.167181] + 0.19985260397*theta[9.72048] - 0.0668530819246*theta[10.0]) : 0.0 : True
+ 8.553686 : 0.0 : dthetadt[8.553686] - (1.79153426125*theta[8.0] - 3.58069036007*theta[8.114208] + 0.903038862042*theta[8.553686] + 1.18189858803*theta[9.167181] - 0.432950390142*theta[9.72048] + 0.137169038888*theta[10.0]) : 0.0 : True
+ 9.167181 : 0.0 : dthetadt[9.167181] - (-1.17208577895*theta[8.0] + 2.06108262312*theta[8.114208] - 2.24800856291*theta[8.553686] + 0.428382622699*theta[9.167181] + 1.25916047461*theta[9.72048] - 0.328531378567*theta[10.0]) : 0.0 : True
+ 9.72048 : 0.0 : dthetadt[9.72048] - (1.1413177501*theta[8.0] - 1.93933160986*theta[8.114208] + 1.69657595903*theta[8.553686] - 2.5941704532*theta[9.167181] + 0.29061652629*theta[9.72048] + 1.40499182764*theta[10.0]) : 0.0 : True
+ 10.0 : 0.0 : dthetadt[10.0] - (-2.5*theta[8.0] + 4.2062121118*theta[8.114208] - 3.48512805833*theta[8.553686] + 4.38855710208*theta[9.167181] - 9.10964115554*theta[9.72048] + 6.5*theta[10.0]) : 0.0 : True
+ 10.114208 : 0.0 : dthetadt[10.114208] - (-5.5193396206*theta[10.0] + 4.37796198897*theta[10.114208] + 1.44597130769*theta[10.553686] - 0.4375931981*theta[11.167181] + 0.19985260397*theta[11.72048] - 0.0668530819246*theta[12.0]) : 0.0 : True
+ 10.553686 : 0.0 : dthetadt[10.553686] - (1.79153426125*theta[10.0] - 3.58069036007*theta[10.114208] + 0.903038862042*theta[10.553686] + 1.18189858803*theta[11.167181] - 0.432950390142*theta[11.72048] + 0.137169038888*theta[12.0]) : 0.0 : True
+ 11.167181 : 0.0 : dthetadt[11.167181] - (-1.17208577895*theta[10.0] + 2.06108262312*theta[10.114208] - 2.24800856291*theta[10.553686] + 0.428382622699*theta[11.167181] + 1.25916047461*theta[11.72048] - 0.328531378567*theta[12.0]) : 0.0 : True
+ 11.72048 : 0.0 : dthetadt[11.72048] - (1.1413177501*theta[10.0] - 1.93933160986*theta[10.114208] + 1.69657595903*theta[10.553686] - 2.5941704532*theta[11.167181] + 0.29061652629*theta[11.72048] + 1.40499182764*theta[12.0]) : 0.0 : True
+ 12.0 : 0.0 : dthetadt[12.0] - (-2.5*theta[10.0] + 4.2062121118*theta[10.114208] - 3.48512805833*theta[10.553686] + 4.38855710208*theta[11.167181] - 9.10964115554*theta[11.72048] + 6.5*theta[12.0]) : 0.0 : True
+ 12.114208 : 0.0 : dthetadt[12.114208] - (-5.5193396206*theta[12.0] + 4.37796198897*theta[12.114208] + 1.44597130769*theta[12.553686] - 0.4375931981*theta[13.167181] + 0.19985260397*theta[13.72048] - 0.0668530819246*theta[14.0]) : 0.0 : True
+ 12.553686 : 0.0 : dthetadt[12.553686] - (1.79153426125*theta[12.0] - 3.58069036007*theta[12.114208] + 0.903038862042*theta[12.553686] + 1.18189858803*theta[13.167181] - 0.432950390142*theta[13.72048] + 0.137169038888*theta[14.0]) : 0.0 : True
+ 13.167181 : 0.0 : dthetadt[13.167181] - (-1.17208577895*theta[12.0] + 2.06108262312*theta[12.114208] - 2.24800856291*theta[12.553686] + 0.428382622699*theta[13.167181] + 1.25916047461*theta[13.72048] - 0.328531378567*theta[14.0]) : 0.0 : True
+ 13.72048 : 0.0 : dthetadt[13.72048] - (1.1413177501*theta[12.0] - 1.93933160986*theta[12.114208] + 1.69657595903*theta[12.553686] - 2.5941704532*theta[13.167181] + 0.29061652629*theta[13.72048] + 1.40499182764*theta[14.0]) : 0.0 : True
+ 14.0 : 0.0 : dthetadt[14.0] - (-2.5*theta[12.0] + 4.2062121118*theta[12.114208] - 3.48512805833*theta[12.553686] + 4.38855710208*theta[13.167181] - 9.10964115554*theta[13.72048] + 6.5*theta[14.0]) : 0.0 : True
+ 14.114208 : 0.0 : dthetadt[14.114208] - (-5.5193396206*theta[14.0] + 4.37796198897*theta[14.114208] + 1.44597130769*theta[14.553686] - 0.4375931981*theta[15.167181] + 0.19985260397*theta[15.72048] - 0.0668530819246*theta[16.0]) : 0.0 : True
+ 14.553686 : 0.0 : dthetadt[14.553686] - (1.79153426125*theta[14.0] - 3.58069036007*theta[14.114208] + 0.903038862042*theta[14.553686] + 1.18189858803*theta[15.167181] - 0.432950390142*theta[15.72048] + 0.137169038888*theta[16.0]) : 0.0 : True
+ 15.167181 : 0.0 : dthetadt[15.167181] - (-1.17208577895*theta[14.0] + 2.06108262312*theta[14.114208] - 2.24800856291*theta[14.553686] + 0.428382622699*theta[15.167181] + 1.25916047461*theta[15.72048] - 0.328531378567*theta[16.0]) : 0.0 : True
+ 15.72048 : 0.0 : dthetadt[15.72048] - (1.1413177501*theta[14.0] - 1.93933160986*theta[14.114208] + 1.69657595903*theta[14.553686] - 2.5941704532*theta[15.167181] + 0.29061652629*theta[15.72048] + 1.40499182764*theta[16.0]) : 0.0 : True
+ 16.0 : 0.0 : dthetadt[16.0] - (-2.5*theta[14.0] + 4.2062121118*theta[14.114208] - 3.48512805833*theta[14.553686] + 4.38855710208*theta[15.167181] - 9.10964115554*theta[15.72048] + 6.5*theta[16.0]) : 0.0 : True
+ 16.114208 : 0.0 : dthetadt[16.114208] - (-5.5193396206*theta[16.0] + 4.37796198897*theta[16.114208] + 1.44597130769*theta[16.553686] - 0.4375931981*theta[17.167181] + 0.19985260397*theta[17.72048] - 0.0668530819246*theta[18.0]) : 0.0 : True
+ 16.553686 : 0.0 : dthetadt[16.553686] - (1.79153426125*theta[16.0] - 3.58069036007*theta[16.114208] + 0.903038862042*theta[16.553686] + 1.18189858803*theta[17.167181] - 0.432950390142*theta[17.72048] + 0.137169038888*theta[18.0]) : 0.0 : True
+ 17.167181 : 0.0 : dthetadt[17.167181] - (-1.17208577895*theta[16.0] + 2.06108262312*theta[16.114208] - 2.24800856291*theta[16.553686] + 0.428382622699*theta[17.167181] + 1.25916047461*theta[17.72048] - 0.328531378567*theta[18.0]) : 0.0 : True
+ 17.72048 : 0.0 : dthetadt[17.72048] - (1.1413177501*theta[16.0] - 1.93933160986*theta[16.114208] + 1.69657595903*theta[16.553686] - 2.5941704532*theta[17.167181] + 0.29061652629*theta[17.72048] + 1.40499182764*theta[18.0]) : 0.0 : True
+ 18.0 : 0.0 : dthetadt[18.0] - (-2.5*theta[16.0] + 4.2062121118*theta[16.114208] - 3.48512805833*theta[16.553686] + 4.38855710208*theta[17.167181] - 9.10964115554*theta[17.72048] + 6.5*theta[18.0]) : 0.0 : True
+ 18.114208 : 0.0 : dthetadt[18.114208] - (-5.5193396206*theta[18.0] + 4.37796198897*theta[18.114208] + 1.44597130769*theta[18.553686] - 0.4375931981*theta[19.167181] + 0.19985260397*theta[19.72048] - 0.0668530819246*theta[20]) : 0.0 : True
+ 18.553686 : 0.0 : dthetadt[18.553686] - (1.79153426125*theta[18.0] - 3.58069036007*theta[18.114208] + 0.903038862042*theta[18.553686] + 1.18189858803*theta[19.167181] - 0.432950390142*theta[19.72048] + 0.137169038888*theta[20]) : 0.0 : True
+ 19.167181 : 0.0 : dthetadt[19.167181] - (-1.17208577895*theta[18.0] + 2.06108262312*theta[18.114208] - 2.24800856291*theta[18.553686] + 0.428382622699*theta[19.167181] + 1.25916047461*theta[19.72048] - 0.328531378567*theta[20]) : 0.0 : True
+ 19.72048 : 0.0 : dthetadt[19.72048] - (1.1413177501*theta[18.0] - 1.93933160986*theta[18.114208] + 1.69657595903*theta[18.553686] - 2.5941704532*theta[19.167181] + 0.29061652629*theta[19.72048] + 1.40499182764*theta[20]) : 0.0 : True
+ 20 : 0.0 : dthetadt[20] - (-2.5*theta[18.0] + 4.2062121118*theta[18.114208] - 3.48512805833*theta[18.553686] + 4.38855710208*theta[19.167181] - 9.10964115554*theta[19.72048] + 6.5*theta[20]) : 0.0 : True
1 ContinuousSet Declarations
- t : Dim=0, Dimen=1, Size=51, Domain=None, Ordered=Sorted, Bounds=(0.0, 20.0)
- [0.0, 0.114208, 0.553686, 1.167181, 1.72048, 2.0, 2.114208, 2.553686, 3.167181, 3.72048, 4.0, 4.114208, 4.553686, 5.167181, 5.72048, 6.0, 6.114208, 6.553686, 7.167181, 7.72048, 8.0, 8.114208, 8.553686, 9.167181, 9.72048, 10.0, 10.114208, 10.553686, 11.167181, 11.72048, 12.0, 12.114208, 12.553686, 13.167181, 13.72048, 14.0, 14.114208, 14.553686, 15.167181, 15.72048, 16.0, 16.114208, 16.553686, 17.167181, 17.72048, 18.0, 18.114208, 18.553686, 19.167181, 19.72048, 20.0]
+ t : Size=1, Index=None, Ordered=Sorted
+ Key : Dimen : Domain : Size : Members
+ None : 1 : [0.0..20.0] : 51 : {0, 0.114208, 0.553686, 1.167181, 1.72048, 2.0, 2.114208, 2.553686, 3.167181, 3.72048, 4.0, 4.114208, 4.553686, 5.167181, 5.72048, 6.0, 6.114208, 6.553686, 7.167181, 7.72048, 8.0, 8.114208, 8.553686, 9.167181, 9.72048, 10.0, 10.114208, 10.553686, 11.167181, 11.72048, 12.0, 12.114208, 12.553686, 13.167181, 13.72048, 14.0, 14.114208, 14.553686, 15.167181, 15.72048, 16.0, 16.114208, 16.553686, 17.167181, 17.72048, 18.0, 18.114208, 18.553686, 19.167181, 19.72048, 20}
1 Suffix Declarations
var_input : Direction=Suffix.LOCAL, Datatype=Suffix.FLOAT
@@ -444,7 +450,7 @@
b : {0: 0.25, 15: 0.025}
c : {0: 5.0, 7: 50}
-12 Declarations: t b c omega theta domegadt dthetadt diffeq1 diffeq2 var_input domegadt_disc_eq dthetadt_disc_eq
+13 Declarations: t_domain t b c omega theta domegadt dthetadt diffeq1 diffeq2 var_input domegadt_disc_eq dthetadt_disc_eq
[[ 0.0000 3.0400]
[-0.1033 3.0297]
[-0.2223 2.9972]
@@ -487,7 +493,7 @@
[-0.1925 -0.2939]
[ 1.9708 -0.0745]
[ 0.7553 0.2577]
- [-1.6521 0.1458]
+ [-1.6520 0.1458]
[-1.1979 -0.2028]
[ 1.2226 -0.1972]
[ 1.4942 0.1360]
@@ -502,7 +508,7 @@
[ 0.9634 0.1466]
[-0.8766 0.1552]
[-1.1657 -0.0954]
- [ 0.5033 -0.1745]
+ [ 0.5032 -0.1745]
[ 1.2516 0.0412]
[-0.1252 0.1776]
[-1.2256 0.0110]
@@ -522,28 +528,28 @@
[-0.4105 -0.1061]
[ 0.6733 -0.0728]
[ 0.8014 -0.0351]
- [ 0.5985 0.0822]
- [-0.4889 0.0955]
- [-0.7352 -0.0545]
- [ 0.2766 -0.1106]
- [ 0.8106 0.0227]
- [-0.0441 0.1165]
- [-0.8192 0.0107]
- [-0.1896 -0.1129]
- [ 0.7608 -0.0428]
- [ 0.4057 0.1001]
- [-0.6411 0.0712]
- [-0.5866 -0.0793]
- [ 0.4704 -0.0934]
- [ 0.7180 0.0523]
- [-0.2633 0.1079]
- [-0.7896 -0.0212]
- [ 0.0369 -0.1134]
- [ 0.7962 -0.0112]
- [ 0.1902 0.1096]
- [-0.7378 0.0424]
- [-0.3995 -0.0970]
- [ 0.6200 -0.0698]
- [ 0.5744 0.0766]
- [-0.4530 0.0913]
- [-0.7008 -0.0501]]
+ [ 0.5995 0.0824]
+ [-0.4903 0.0957]
+ [-0.7366 -0.0547]
+ [ 0.2776 -0.1108]
+ [ 0.8123 0.0228]
+ [-0.0446 0.1168]
+ [-0.8209 0.0106]
+ [-0.1896 -0.1131]
+ [ 0.7626 -0.0429]
+ [ 0.4062 0.1004]
+ [-0.6427 0.0713]
+ [-0.5875 -0.0795]
+ [ 0.4717 -0.0936]
+ [ 0.7193 0.0524]
+ [-0.2642 0.1081]
+ [-0.7912 -0.0213]
+ [ 0.0373 -0.1136]
+ [ 0.7979 -0.0112]
+ [ 0.1902 0.1098]
+ [-0.7395 0.0425]
+ [-0.4001 -0.0972]
+ [ 0.6215 -0.0700]
+ [ 0.5754 0.0768]
+ [-0.4542 0.0915]
+ [-0.7021 -0.0503]]
diff --git a/pyomo/dae/tests/test_colloc.py b/pyomo/dae/tests/test_colloc.py
index 12941c1046e..688ffe7fe8a 100644
--- a/pyomo/dae/tests/test_colloc.py
+++ b/pyomo/dae/tests/test_colloc.py
@@ -29,12 +29,6 @@
currdir = dirname(abspath(__file__))
exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae'))
-try:
- import numpy
- numpy_available = True
-except ImportError:
- numpy_available = False
-
def repn_to_rounded_dict(repn, digits):
temp = dict()
diff --git a/pyomo/dae/tests/test_contset.py b/pyomo/dae/tests/test_contset.py
index 56bf981e010..a6202afd63e 100644
--- a/pyomo/dae/tests/test_contset.py
+++ b/pyomo/dae/tests/test_contset.py
@@ -2,13 +2,13 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
-#
+#
# Unit Tests for ContinuousSet() Objects
#
@@ -31,13 +31,12 @@ class TestContinuousSet(unittest.TestCase):
def test_init(self):
model = ConcreteModel()
model.t = ContinuousSet(bounds=(0, 1))
- del model.t
+ model = ConcreteModel()
model.t = ContinuousSet(initialize=[1, 2, 3])
- del model.t
-
+
+ model = ConcreteModel()
model.t = ContinuousSet(bounds=(0, 5), initialize=[1, 3, 5])
- del model.t
# Expected ValueError because a ContinuousSet component
# must contain at least two values upon construction
@@ -63,64 +62,83 @@ def test_bad_kwds(self):
def test_valid_declaration(self):
model = ConcreteModel()
model.t = ContinuousSet(bounds=(0, 1))
- self.assertTrue(len(model.t) == 2)
- self.assertTrue(0 in model.t)
- self.assertTrue(1 in model.t)
- del model.t
+ self.assertEqual(len(model.t), 2)
+ self.assertIn(0, model.t)
+ self.assertIn(1, model.t)
+ model = ConcreteModel()
model.t = ContinuousSet(initialize=[1, 2, 3])
- self.assertTrue(len(model.t) == 3)
- self.assertTrue(model.t.first() == 1)
- self.assertTrue(model.t.last() == 3)
- del model.t
-
+ self.assertEqual(len(model.t), 3)
+ self.assertEqual(model.t.first(), 1)
+ self.assertEqual(model.t.last(), 3)
+
+ model = ConcreteModel()
+ model.t = ContinuousSet(bounds=(1, 3), initialize=[1, 2, 3])
+ self.assertEqual(len(model.t), 3)
+ self.assertEqual(model.t.first(), 1)
+ self.assertEqual(model.t.last(), 3)
+
+ model = ConcreteModel()
model.t = ContinuousSet(bounds=(0, 4), initialize=[1, 2, 3])
- self.assertTrue(len(model.t) == 5)
- self.assertTrue(model.t.first() == 0)
- self.assertTrue(model.t.last() == 4)
- del model.t
-
- model.t = ContinuousSet(bounds=(0, 4), initialize=[1, 2, 3, 5])
- self.assertTrue(len(model.t) == 5)
- self.assertTrue(model.t.first() == 0)
- self.assertTrue(model.t.last() == 5)
- self.assertTrue(4 not in model.t)
- del model.t
-
- model.t = ContinuousSet(bounds=(2, 6), initialize=[1, 2, 3, 5])
- self.assertTrue(len(model.t) == 5)
- self.assertTrue(model.t.first() == 1)
- self.assertTrue(model.t.last() == 6)
- del model.t
-
- model.t = ContinuousSet(bounds=(2, 4), initialize=[1, 3, 5])
- self.assertTrue(len(model.t) == 3)
- self.assertTrue(2 not in model.t)
- self.assertTrue(4 not in model.t)
+ self.assertEqual(len(model.t), 5)
+ self.assertEqual(model.t.first(), 0)
+ self.assertEqual(model.t.last(), 4)
+
+ model = ConcreteModel()
+ with self.assertRaisesRegexp(
+ ValueError, "value is not in the domain \[0..4\]"):
+ model.t = ContinuousSet(bounds=(0, 4), initialize=[1, 2, 3, 5])
+ # self.assertEqual(len(model.t), 5)
+ # self.assertEqual(model.t.first(), 0)
+ # self.assertEqual(model.t.last(), 5)
+ # self.assertNotIn(4, model.t)
+ # del model.t
+
+ model = ConcreteModel()
+ with self.assertRaisesRegexp(
+ ValueError, "value is not in the domain \[2..6\]"):
+ model.t = ContinuousSet(bounds=(2, 6), initialize=[1, 2, 3, 5])
+ # self.assertEqual(len(model.t), 5)
+ # self.assertEqual(model.t.first(), 1)
+ # self.assertEqual(model.t.last(), 6)
+ # del model.t
+
+ model = ConcreteModel()
+ with self.assertRaisesRegexp(
+ ValueError, "value is not in the domain \[2..4\]"):
+ model.t = ContinuousSet(bounds=(2, 4), initialize=[1, 3, 5])
+ # self.assertEqual(len(model.t), 3)
+ # self.assertNotIn(2, model.t)
+ # self.assertNotIn(4, model.t)
# test invalid declarations
def test_invalid_declaration(self):
model = ConcreteModel()
model.s = Set(initialize=[1, 2, 3])
-
with self.assertRaises(TypeError):
model.t = ContinuousSet(model.s, bounds=(0, 1))
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(bounds=(0, 0))
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(initialize=[1])
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(bounds=(None, 1))
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(bounds=(0, None))
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(initialize=[(1, 2), (3, 4)])
+ model = ConcreteModel()
with self.assertRaises(ValueError):
model.t = ContinuousSet(initialize=['foo', 'bar'])
@@ -170,9 +188,22 @@ def test_get_lower_element_boundary(self):
temp = m.t.get_lower_element_boundary(0.5)
self.assertIn('Returning the lower bound', log_out.getvalue())
+ def test_duplicate_construct(self):
+ m = ConcreteModel()
+ m.t = ContinuousSet(initialize=[1,2,3])
+ self.assertEqual(m.t, [1,2,3])
+ self.assertEqual(m.t._fe, [1,2,3])
+ m.t.add(1.5)
+ m.t.add(2.5)
+ self.assertEqual(m.t, [1,1.5,2,2.5,3])
+ self.assertEqual(m.t._fe, [1,2,3])
+ m.t.construct()
+ self.assertEqual(m.t, [1,1.5,2,2.5,3])
+ self.assertEqual(m.t._fe, [1,2,3])
+
class TestIO(unittest.TestCase):
-
+
def setUp(self):
#
# Create Model
@@ -203,8 +234,10 @@ def test_io2(self):
OUTPUT.write("end;\n")
OUTPUT.close()
self.model.A = ContinuousSet(bounds=(0, 4))
- self.instance = self.model.create_instance("diffset.dat")
- self.assertEqual(len(self.instance.A), 4)
+ with self.assertRaisesRegexp(
+ ValueError, "The value is not in the domain \[0..4\]"):
+ self.instance = self.model.create_instance("diffset.dat")
+ #self.assertEqual(len(self.instance.A), 4)
def test_io3(self):
OUTPUT = open("diffset.dat", "w")
@@ -213,8 +246,10 @@ def test_io3(self):
OUTPUT.write("end;\n")
OUTPUT.close()
self.model.A = ContinuousSet(bounds=(2, 6))
- self.instance = self.model.create_instance("diffset.dat")
- self.assertEqual(len(self.instance.A), 4)
+ with self.assertRaisesRegexp(
+ ValueError, "The value is not in the domain \[2..6\]"):
+ self.instance = self.model.create_instance("diffset.dat")
+ #self.assertEqual(len(self.instance.A), 4)
def test_io4(self):
OUTPUT = open("diffset.dat", "w")
@@ -223,9 +258,11 @@ def test_io4(self):
OUTPUT.write("end;\n")
OUTPUT.close()
self.model.A = ContinuousSet(bounds=(2, 4))
- self.instance = self.model.create_instance("diffset.dat")
- self.assertEqual(len(self.instance.A), 3)
-
+ with self.assertRaisesRegexp(
+ ValueError, "The value is not in the domain \[2..4\]"):
+ self.instance = self.model.create_instance("diffset.dat")
+ #self.assertEqual(len(self.instance.A), 3)
+
def test_io5(self):
OUTPUT = open("diffset.dat", "w")
OUTPUT.write("data;\n")
diff --git a/pyomo/dae/tests/test_diffvar.py b/pyomo/dae/tests/test_diffvar.py
index e11eeeb007d..98c8b110d63 100644
--- a/pyomo/dae/tests/test_diffvar.py
+++ b/pyomo/dae/tests/test_diffvar.py
@@ -32,7 +32,7 @@ def test_valid(self):
m = ConcreteModel()
m.t = ContinuousSet(bounds=(0, 1))
m.x = ContinuousSet(bounds=(5, 10))
- m.s = Set()
+ m.s = Set(dimen=1)
m.v = Var(m.t)
m.dv = DerivativeVar(m.v)
m.dv2 = DerivativeVar(m.v, wrt=(m.t, m.t))
@@ -42,7 +42,7 @@ def test_valid(self):
self.assertTrue(m.dv._wrt[0] is m.t)
self.assertTrue(m.dv._sVar is m.v)
self.assertTrue(m.v._derivative[('t',)]() is m.dv)
- self.assertTrue(m.dv.type() is DerivativeVar)
+ self.assertTrue(m.dv.ctype is DerivativeVar)
self.assertTrue(m.dv._index is m.t)
self.assertTrue(m.dv2._wrt[0] is m.t)
self.assertTrue(m.dv2._wrt[1] is m.t)
@@ -61,7 +61,7 @@ def test_valid(self):
self.assertTrue(m.dv._wrt[0] is m.t)
self.assertTrue(m.dv._sVar is m.v)
self.assertTrue(m.v._derivative[('t',)]() is m.dv)
- self.assertTrue(m.dv.type() is DerivativeVar)
+ self.assertTrue(m.dv.ctype is DerivativeVar)
self.assertTrue(m.t in m.dv.index_set().set_tuple)
self.assertTrue(m.s in m.dv.index_set().set_tuple)
self.assertTrue(m.dv2._wrt[0] is m.t)
@@ -85,7 +85,7 @@ def test_valid(self):
self.assertTrue(m.v._derivative[('t',)]() is m.dv2)
self.assertTrue(m.v._derivative[('t', 'x')]() is m.dv3)
self.assertTrue(m.v._derivative[('t', 't')]() is m.dv4)
- self.assertTrue(m.dv.type() is DerivativeVar)
+ self.assertTrue(m.dv.ctype is DerivativeVar)
self.assertTrue(m.x in m.dv.index_set().set_tuple)
self.assertTrue(m.t in m.dv.index_set().set_tuple)
self.assertTrue(m.dv3._wrt[0] is m.t)
@@ -163,15 +163,15 @@ def test_reclassification(self):
TransformationFactory('dae.finite_difference').apply_to(m, wrt=m.t)
- self.assertTrue(m.dv.type() is Var)
- self.assertTrue(m.dv2.type() is Var)
+ self.assertTrue(m.dv.ctype is Var)
+ self.assertTrue(m.dv2.ctype is Var)
self.assertTrue(m.dv.is_fully_discretized())
self.assertTrue(m.dv2.is_fully_discretized())
- self.assertTrue(m.dv3.type() is DerivativeVar)
+ self.assertTrue(m.dv3.ctype is DerivativeVar)
self.assertFalse(m.dv3.is_fully_discretized())
TransformationFactory('dae.collocation').apply_to(m, wrt=m.x)
- self.assertTrue(m.dv3.type() is Var)
+ self.assertTrue(m.dv3.ctype is Var)
self.assertTrue(m.dv3.is_fully_discretized())
diff --git a/pyomo/dae/tests/test_finite_diff.py b/pyomo/dae/tests/test_finite_diff.py
index 10c7a1332dc..dbe20a44f7c 100644
--- a/pyomo/dae/tests/test_finite_diff.py
+++ b/pyomo/dae/tests/test_finite_diff.py
@@ -51,19 +51,19 @@ def test_disc_single_index_backward(self):
disc.apply_to(m, nfe=5)
self.assertTrue(hasattr(m, 'dv1_disc_eq'))
- self.assertTrue(len(m.dv1_disc_eq) == 5)
- self.assertTrue(len(m.v1) == 6)
+ self.assertEqual(len(m.dv1_disc_eq), 5)
+ self.assertEqual(len(m.v1), 6)
expected_disc_points = [0, 2.0, 4.0, 6.0, 8.0, 10]
disc_info = m.t.get_discretization_info()
- self.assertTrue(disc_info['scheme'] == 'BACKWARD Difference')
+ self.assertEqual(disc_info['scheme'], 'BACKWARD Difference')
for idx, val in enumerate(list(m.t)):
self.assertAlmostEqual(val, expected_disc_points[idx])
self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars'))
- self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars)
output = \
"""\
@@ -88,12 +88,12 @@ def test_disc_second_order_backward(self):
disc.apply_to(m, nfe=2)
self.assertTrue(hasattr(m, 'dv1dt2_disc_eq'))
- self.assertTrue(len(m.dv1dt2_disc_eq) == 1)
- self.assertTrue(len(m.v1) == 3)
+ self.assertEqual(len(m.dv1dt2_disc_eq), 1)
+ self.assertEqual(len(m.v1), 3)
self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars'))
- self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars)
- self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1dt2, m._pyomo_dae_reclassified_derivativevars)
output = \
"""\
@@ -113,19 +113,19 @@ def test_disc_single_index_forward(self):
disc.apply_to(m, nfe=5, scheme='FORWARD')
self.assertTrue(hasattr(m, 'dv1_disc_eq'))
- self.assertTrue(len(m.dv1_disc_eq) == 5)
- self.assertTrue(len(m.v1) == 6)
+ self.assertEqual(len(m.dv1_disc_eq), 5)
+ self.assertEqual(len(m.v1), 6)
expected_disc_points = [0, 2.0, 4.0, 6.0, 8.0, 10]
disc_info = m.t.get_discretization_info()
- self.assertTrue(disc_info['scheme'] == 'FORWARD Difference')
+ self.assertEqual(disc_info['scheme'], 'FORWARD Difference')
for idx, val in enumerate(list(m.t)):
self.assertAlmostEqual(val, expected_disc_points[idx])
self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars'))
- self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars)
output = \
"""\
@@ -150,12 +150,12 @@ def test_disc_second_order_forward(self):
disc.apply_to(m, nfe=2, scheme='FORWARD')
self.assertTrue(hasattr(m, 'dv1dt2_disc_eq'))
- self.assertTrue(len(m.dv1dt2_disc_eq) == 1)
- self.assertTrue(len(m.v1) == 3)
+ self.assertEqual(len(m.dv1dt2_disc_eq), 1)
+ self.assertEqual(len(m.v1), 3)
self.assertTrue(hasattr(m, '_pyomo_dae_reclassified_derivativevars'))
- self.assertTrue(m.dv1 in m._pyomo_dae_reclassified_derivativevars)
- self.assertTrue(m.dv1dt2 in m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1, m._pyomo_dae_reclassified_derivativevars)
+ self.assertIn(m.dv1dt2, m._pyomo_dae_reclassified_derivativevars)
output = \
"""\
@@ -175,13 +175,13 @@ def test_disc_single_index_central(self):
disc.apply_to(m, nfe=5, scheme='CENTRAL')
self.assertTrue(hasattr(m, 'dv1_disc_eq'))
- self.assertTrue(len(m.dv1_disc_eq) == 4)
- self.assertTrue(len(m.v1) == 6)
+ self.assertEqual(len(m.dv1_disc_eq), 4)
+ self.assertEqual(len(m.v1), 6)
expected_disc_points = [0, 2.0, 4.0, 6.0, 8.0, 10]
disc_info = m.t.get_discretization_info()
- self.assertTrue(disc_info['scheme'] == 'CENTRAL Difference')
+ self.assertEqual(disc_info['scheme'], 'CENTRAL Difference')
for idx, val in enumerate(list(m.t)):
self.assertAlmostEqual(val, expected_disc_points[idx])
@@ -208,8 +208,8 @@ def test_disc_second_order_central(self):
disc.apply_to(m, nfe=2, scheme='CENTRAL')
self.assertTrue(hasattr(m, 'dv1dt2_disc_eq'))
- self.assertTrue(len(m.dv1dt2_disc_eq) == 1)
- self.assertTrue(len(m.v1) == 3)
+ self.assertEqual(len(m.dv1dt2_disc_eq), 1)
+ self.assertEqual(len(m.v1), 3)
output = \
"""\
@@ -232,13 +232,13 @@ def test_disc_multi_index(self):
self.assertTrue(hasattr(m, 'dv1_disc_eq'))
self.assertTrue(hasattr(m, 'dv2_disc_eq'))
- self.assertTrue(len(m.dv2_disc_eq) == 15)
- self.assertTrue(len(m.v2) == 18)
+ self.assertEqual(len(m.dv2_disc_eq), 15)
+ self.assertEqual(len(m.v2), 18)
expected_disc_points = [0, 2.0, 4.0, 6.0, 8.0, 10]
disc_info = m.t.get_discretization_info()
- self.assertTrue(disc_info['scheme'] == 'BACKWARD Difference')
+ self.assertEqual(disc_info['scheme'], 'BACKWARD Difference')
for idx, val in enumerate(list(m.t)):
self.assertAlmostEqual(val, expected_disc_points[idx])
@@ -256,9 +256,9 @@ def test_disc_multi_index2(self):
self.assertTrue(hasattr(m, 'dv2dt_disc_eq'))
self.assertTrue(hasattr(m, 'dv2dt2_disc_eq'))
- self.assertTrue(len(m.dv2dt_disc_eq) == 6)
- self.assertTrue(len(m.dv2dt2_disc_eq) == 6)
- self.assertTrue(len(m.v2) == 9)
+ self.assertEqual(len(m.dv2dt_disc_eq), 6)
+ self.assertEqual(len(m.dv2dt2_disc_eq), 6)
+ self.assertEqual(len(m.v2), 9)
expected_t_disc_points = [0, 5.0, 10]
expected_t2_disc_points = [0, 2.5, 5]
@@ -285,15 +285,15 @@ def test_disc_multidimen_index(self):
self.assertTrue(hasattr(m, 'dv1_disc_eq'))
self.assertTrue(hasattr(m, 'dv2_disc_eq'))
self.assertTrue(hasattr(m, 'dv3_disc_eq'))
- self.assertTrue(len(m.dv2_disc_eq) == 15)
- self.assertTrue(len(m.v2) == 18)
- self.assertTrue(len(m.dv3_disc_eq) == 15)
- self.assertTrue(len(m.v3) == 18)
+ self.assertEqual(len(m.dv2_disc_eq), 15)
+ self.assertEqual(len(m.v2), 18)
+ self.assertEqual(len(m.dv3_disc_eq), 15)
+ self.assertEqual(len(m.v3), 18)
expected_disc_points = [0, 2.0, 4.0, 6.0, 8.0, 10]
disc_info = m.t.get_discretization_info()
- self.assertTrue(disc_info['scheme'] == 'BACKWARD Difference')
+ self.assertEqual(disc_info['scheme'], 'BACKWARD Difference')
for idx, val in enumerate(list(m.t)):
self.assertAlmostEqual(val, expected_disc_points[idx])
diff --git a/pyomo/dae/tests/test_flatten.py b/pyomo/dae/tests/test_flatten.py
new file mode 100644
index 00000000000..04dbc76f269
--- /dev/null
+++ b/pyomo/dae/tests/test_flatten.py
@@ -0,0 +1,157 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+import pyutilib.th as unittest
+
+from pyomo.environ import ConcreteModel, Block, Var, Reference, Set
+from pyomo.dae import ContinuousSet
+# This inport will have to change when we decide where this should go...
+from pyomo.dae.flatten import flatten_dae_variables
+
+class TestCategorize(unittest.TestCase):
+ def _hashRef(self, ref):
+ return tuple(sorted(id(_) for _ in ref.values()))
+
+ def test_flat_model(self):
+ m = ConcreteModel()
+ m.T = ContinuousSet(bounds=(0,1))
+ m.x = Var()
+ m.y = Var([1,2])
+ m.a = Var(m.T)
+ m.b = Var(m.T, [1,2])
+ m.c = Var([3,4], m.T)
+
+ regular, time = flatten_dae_variables(m, m.T)
+ regular_id = set(id(_) for _ in regular)
+ self.assertEqual(len(regular), 3)
+ self.assertIn(id(m.x), regular_id)
+ self.assertIn(id(m.y[1]), regular_id)
+ self.assertIn(id(m.y[2]), regular_id)
+ # Output for debugging
+ #for v in time:
+ # v.pprint()
+ # for _ in v.values():
+ # print" -> ", _.name
+ ref_data = {
+ self._hashRef(Reference(m.a[:])),
+ self._hashRef(Reference(m.b[:,1])),
+ self._hashRef(Reference(m.b[:,2])),
+ self._hashRef(Reference(m.c[3,:])),
+ self._hashRef(Reference(m.c[4,:])),
+ }
+ self.assertEqual(len(time), len(ref_data))
+ for ref in time:
+ self.assertIn(self._hashRef(ref), ref_data)
+
+ def test_1level_model(self):
+ m = ConcreteModel()
+ m.T = ContinuousSet(bounds=(0,1))
+ @m.Block([1,2],m.T)
+ def B(b, i, t):
+ b.x = Var(list(range(2*i, 2*i+2)))
+
+ regular, time = flatten_dae_variables(m, m.T)
+ self.assertEqual(len(regular), 0)
+ # Output for debugging
+ #for v in time:
+ # v.pprint()
+ # for _ in v.values():
+ # print" -> ", _.name
+ ref_data = {
+ self._hashRef(Reference(m.B[1,:].x[2])),
+ self._hashRef(Reference(m.B[1,:].x[3])),
+ self._hashRef(Reference(m.B[2,:].x[4])),
+ self._hashRef(Reference(m.B[2,:].x[5])),
+ }
+ self.assertEqual(len(time), len(ref_data))
+ for ref in time:
+ self.assertIn(self._hashRef(ref), ref_data)
+
+
+ def test_2level_model(self):
+ m = ConcreteModel()
+ m.T = ContinuousSet(bounds=(0,1))
+ @m.Block([1,2],m.T)
+ def B(b, i, t):
+ @b.Block(list(range(2*i, 2*i+2)))
+ def bb(bb, j):
+ bb.y = Var([10,11])
+ b.x = Var(list(range(2*i, 2*i+2)))
+
+ regular, time = flatten_dae_variables(m, m.T)
+ self.assertEqual(len(regular), 0)
+ # Output for debugging
+ #for v in time:
+ # v.pprint()
+ # for _ in v.values():
+ # print" -> ", _.name
+ ref_data = {
+ self._hashRef(Reference(m.B[1,:].x[2])),
+ self._hashRef(Reference(m.B[1,:].x[3])),
+ self._hashRef(Reference(m.B[2,:].x[4])),
+ self._hashRef(Reference(m.B[2,:].x[5])),
+ self._hashRef(Reference(m.B[1,:].bb[2].y[10])),
+ self._hashRef(Reference(m.B[1,:].bb[2].y[11])),
+ self._hashRef(Reference(m.B[1,:].bb[3].y[10])),
+ self._hashRef(Reference(m.B[1,:].bb[3].y[11])),
+ self._hashRef(Reference(m.B[2,:].bb[4].y[10])),
+ self._hashRef(Reference(m.B[2,:].bb[4].y[11])),
+ self._hashRef(Reference(m.B[2,:].bb[5].y[10])),
+ self._hashRef(Reference(m.B[2,:].bb[5].y[11])),
+ }
+ self.assertEqual(len(time), len(ref_data))
+ for ref in time:
+ self.assertIn(self._hashRef(ref), ref_data)
+
+
+ def test_2dim_set(self):
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0,1))
+
+ m.v = Var(m.time, [('a',1), ('b',2)])
+
+ scalar, dae = flatten_dae_variables(m, m.time)
+ self.assertEqual(len(scalar), 0)
+ ref_data = {
+ self._hashRef(Reference(m.v[:,'a',1])),
+ self._hashRef(Reference(m.v[:,'b',2])),
+ }
+ self.assertEqual(len(dae), len(ref_data))
+ for ref in dae:
+ self.assertIn(self._hashRef(ref), ref_data)
+
+
+ def test_indexed_block(self):
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0,1))
+ m.comp = Set(initialize=['a', 'b'])
+
+ def bb_rule(bb, t):
+ bb.dae_var = Var()
+
+ def b_rule(b, c):
+ b.bb = Block(m.time, rule=bb_rule)
+
+ m.b = Block(m.comp, rule=b_rule)
+
+ scalar, dae = flatten_dae_variables(m, m.time)
+ self.assertEqual(len(scalar), 0)
+ ref_data = {
+ self._hashRef(Reference(m.b['a'].bb[:].dae_var)),
+ self._hashRef(Reference(m.b['b'].bb[:].dae_var)),
+ }
+ self.assertEqual(len(dae), len(ref_data))
+ for ref in dae:
+ self.assertIn(self._hashRef(ref), ref_data)
+
+ # TODO: Add tests for Sets with dimen==None
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/pyomo/dae/tests/test_initialization.py b/pyomo/dae/tests/test_initialization.py
new file mode 100644
index 00000000000..08425902b38
--- /dev/null
+++ b/pyomo/dae/tests/test_initialization.py
@@ -0,0 +1,115 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+"""
+Unit Tests for pyomo.dae.init_cond
+"""
+import os
+from os.path import abspath, dirname
+
+from six import StringIO
+
+import pyutilib.th as unittest
+
+from pyomo.core.base import *
+from pyomo.environ import SolverFactory
+from pyomo.common.log import LoggingIntercept
+from pyomo.dae import *
+from pyomo.dae.initialization import *
+from pyomo.core.kernel.component_map import ComponentMap
+
+currdir = dirname(abspath(__file__)) + os.sep
+
+ipopt_available = SolverFactory('ipopt').available()
+
+
+def make_model():
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0, 10))
+ m.space = ContinuousSet(bounds=(0, 5))
+ m.set1 = Set(initialize=['a', 'b', 'c'])
+ m.set2 = Set(initialize=['d', 'e', 'f'])
+ m.fs = Block()
+
+ m.fs.v0 = Var(m.space, initialize=1)
+
+ @m.fs.Block()
+ def b1(b):
+ b.v = Var(m.time, m.space, initialize=1)
+ b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0)
+
+ b.con = Constraint(m.time, m.space,
+ rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x])
+ # Inconsistent
+
+ @b.Block(m.time)
+ def b2(b, t):
+ b.v = Var(initialize=2)
+
+ @m.fs.Block(m.time, m.space)
+ def b2(b, t, x):
+ b.v = Var(m.set1, initialize=2)
+
+ @b.Block(m.set1)
+ def b3(b, c):
+ b.v = Var(m.set2, initialize=3)
+
+ @b.Constraint(m.set2)
+ def con(b, s):
+ return (5*b.v[s] ==
+ m.fs.b2[m.time.first(), m.space.first()].v[c])
+ # inconsistent
+
+ @m.fs.Constraint(m.time)
+ def con1(fs, t):
+ return fs.b1.v[t, m.space.last()] == 5
+ # Will be inconsistent
+
+ @m.fs.Constraint(m.space)
+ def con2(fs, x):
+ return fs.b1.v[m.time.first(), x] == fs.v0[x]
+ # will be consistent
+
+ disc = TransformationFactory('dae.collocation')
+ disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+ disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+
+ return m
+
+
+class TestDaeInitCond(unittest.TestCase):
+
+ def test_get_inconsistent_initial_conditions(self):
+ m = make_model()
+ inconsistent = get_inconsistent_initial_conditions(m, m.time)
+
+ self.assertIn(m.fs.b1.con[m.time[1], m.space[1]], inconsistent)
+ self.assertIn(m.fs.b2[m.time[1], m.space[1]].b3['a'].con['d'],
+ inconsistent)
+ self.assertIn(m.fs.con1[m.time[1]], inconsistent)
+ self.assertNotIn(m.fs.con2[m.space[1]], inconsistent)
+
+
+ @unittest.skipIf(not ipopt_available, 'ipopt is not available')
+ def test_solve_consistent_initial_conditions(self):
+ m = make_model()
+ solver = SolverFactory('ipopt')
+ solve_consistent_initial_conditions(m, m.time, solver)
+ inconsistent = get_inconsistent_initial_conditions(m, m.time)
+ self.assertFalse(inconsistent)
+
+ self.assertTrue(m.fs.con1[m.time[1]].active)
+ self.assertTrue(m.fs.con1[m.time[3]].active)
+ self.assertTrue(m.fs.b1.con[m.time[1], m.space[1]].active)
+ self.assertTrue(m.fs.b1.con[m.time[3], m.space[1]].active)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/pyomo/dae/tests/test_integral.py b/pyomo/dae/tests/test_integral.py
index 0d767f25250..dc5fd2f1312 100644
--- a/pyomo/dae/tests/test_integral.py
+++ b/pyomo/dae/tests/test_integral.py
@@ -73,10 +73,10 @@ def _int4(m, x):
self.assertEqual(len(m.int2), 3)
self.assertEqual(len(m.int3), 2)
self.assertEqual(len(m.int4), 1)
- self.assertTrue(m.int1.type() is Integral)
- self.assertTrue(m.int2.type() is Integral)
- self.assertTrue(m.int3.type() is Integral)
- self.assertTrue(m.int4.type() is Integral)
+ self.assertTrue(m.int1.ctype is Integral)
+ self.assertTrue(m.int2.ctype is Integral)
+ self.assertTrue(m.int3.ctype is Integral)
+ self.assertTrue(m.int4.ctype is Integral)
repn = generate_standard_repn(m.int1.expr)
self.assertEqual(repn.linear_coefs, (0.5, 0.5))
@@ -185,20 +185,20 @@ def _int4(m, x):
self.assertFalse(m.int3.is_fully_discretized())
self.assertFalse(m.int4.is_fully_discretized())
- self.assertTrue(m.int1.type() is Integral)
- self.assertTrue(m.int2.type() is Integral)
- self.assertTrue(m.int3.type() is Integral)
- self.assertTrue(m.int4.type() is Integral)
+ self.assertTrue(m.int1.ctype is Integral)
+ self.assertTrue(m.int2.ctype is Integral)
+ self.assertTrue(m.int3.ctype is Integral)
+ self.assertTrue(m.int4.ctype is Integral)
TransformationFactory('dae.finite_difference').apply_to(m, wrt=m.x)
self.assertTrue(m.int3.is_fully_discretized())
self.assertTrue(m.int4.is_fully_discretized())
- self.assertTrue(m.int1.type() is Expression)
- self.assertTrue(m.int2.type() is Expression)
- self.assertTrue(m.int3.type() is Expression)
- self.assertTrue(m.int4.type() is Expression)
+ self.assertTrue(m.int1.ctype is Expression)
+ self.assertTrue(m.int2.ctype is Expression)
+ self.assertTrue(m.int3.ctype is Expression)
+ self.assertTrue(m.int4.ctype is Expression)
# test DerivativeVar reclassification after discretization
def test_reclassification_collocation(self):
@@ -242,20 +242,20 @@ def _int4(m, x):
self.assertFalse(m.int3.is_fully_discretized())
self.assertFalse(m.int4.is_fully_discretized())
- self.assertTrue(m.int1.type() is Integral)
- self.assertTrue(m.int2.type() is Integral)
- self.assertTrue(m.int3.type() is Integral)
- self.assertTrue(m.int4.type() is Integral)
+ self.assertTrue(m.int1.ctype is Integral)
+ self.assertTrue(m.int2.ctype is Integral)
+ self.assertTrue(m.int3.ctype is Integral)
+ self.assertTrue(m.int4.ctype is Integral)
TransformationFactory('dae.collocation').apply_to(m, wrt=m.x)
self.assertTrue(m.int3.is_fully_discretized())
self.assertTrue(m.int4.is_fully_discretized())
- self.assertTrue(m.int1.type() is Expression)
- self.assertTrue(m.int2.type() is Expression)
- self.assertTrue(m.int3.type() is Expression)
- self.assertTrue(m.int4.type() is Expression)
+ self.assertTrue(m.int1.ctype is Expression)
+ self.assertTrue(m.int2.ctype is Expression)
+ self.assertTrue(m.int3.ctype is Expression)
+ self.assertTrue(m.int4.ctype is Expression)
if __name__ == "__main__":
diff --git a/pyomo/dae/tests/test_misc.py b/pyomo/dae/tests/test_misc.py
index 38a55b5cb61..525fe0675f3 100644
--- a/pyomo/dae/tests/test_misc.py
+++ b/pyomo/dae/tests/test_misc.py
@@ -1007,6 +1007,6 @@ def test_get_index_information(self):
self.assertEqual(index_getter('a',1,0),(2.0,'a'))
-
+
if __name__ == "__main__":
unittest.main()
diff --git a/pyomo/dae/tests/test_set_utils.py b/pyomo/dae/tests/test_set_utils.py
new file mode 100644
index 00000000000..182192f7ee8
--- /dev/null
+++ b/pyomo/dae/tests/test_set_utils.py
@@ -0,0 +1,338 @@
+# ___________________________________________________________________________
+#
+# Pyomo: Python Optimization Modeling Objects
+# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# rights in this software.
+# This software is distributed under the 3-clause BSD License.
+# ___________________________________________________________________________
+
+"""
+Unit Tests for pyomo.dae.set_utils
+"""
+import os
+from os.path import abspath, dirname
+
+from six import StringIO
+
+import pyutilib.th as unittest
+
+from pyomo.core.base import (Block, Constraint, ConcreteModel, Var, Set,
+ TransformationFactory)
+from pyomo.common.log import LoggingIntercept
+from pyomo.dae import *
+from pyomo.dae.set_utils import *
+from pyomo.core.kernel.component_map import ComponentMap
+
+currdir = dirname(abspath(__file__)) + os.sep
+
+
+def make_model():
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0, 10))
+ m.space = ContinuousSet(bounds=(0, 5))
+ m.set1 = Set(initialize=['a', 'b', 'c'])
+ m.set2 = Set(initialize=['d', 'e', 'f'])
+ m.fs = Block()
+
+ m.fs.v0 = Var(m.space, initialize=1)
+
+ @m.fs.Block()
+ def b1(b):
+ b.v = Var(m.time, m.space, initialize=1)
+ b.dv = DerivativeVar(b.v, wrt=m.time, initialize=0)
+
+ b.con = Constraint(m.time, m.space,
+ rule=lambda b, t, x: b.dv[t, x] == 7 - b.v[t, x])
+ # Inconsistent
+
+ @b.Block(m.time)
+ def b2(b, t):
+ b.v = Var(initialize=2)
+
+ @m.fs.Block(m.time, m.space)
+ def b2(b, t, x):
+ b.v = Var(m.set1, initialize=2)
+
+ @b.Block(m.set1)
+ def b3(b, c):
+ b.v = Var(m.set2, initialize=3)
+
+ @b.Constraint(m.set2)
+ def con(b, s):
+ return (5*b.v[s] ==
+ m.fs.b2[m.time.first(), m.space.first()].v[c])
+ # inconsistent
+
+ @m.fs.Constraint(m.time)
+ def con1(fs, t):
+ return fs.b1.v[t, m.space.last()] == 5
+ # Will be inconsistent
+
+ @m.fs.Constraint(m.space)
+ def con2(fs, x):
+ return fs.b1.v[m.time.first(), x] == fs.v0[x]
+ # will be consistent
+
+ disc = TransformationFactory('dae.collocation')
+ disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+ disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+
+ return m
+
+
+class TestDaeSetUtils(unittest.TestCase):
+
+ # Test explicit/implicit index detection functions
+ def test_indexed_by(self):
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0, 10))
+ m.space = ContinuousSet(bounds=(0, 10))
+ m.set = Set(initialize=['a', 'b', 'c'])
+ m.set2 = Set(initialize=[('a', 1), ('b', 2)])
+ m.v = Var()
+ m.v1 = Var(m.time)
+ m.v2 = Var(m.time, m.space)
+ m.v3 = Var(m.set, m.space, m.time)
+ m.v4 = Var(m.time, m.set2)
+ m.v5 = Var(m.set2, m.time, m.space)
+
+ @m.Block()
+ def b(b):
+ b.v = Var()
+ b.v1 = Var(m.time)
+ b.v2 = Var(m.time, m.space)
+ b.v3 = Var(m.set, m.space, m.time)
+
+ @m.Block(m.time)
+ def b1(b):
+ b.v = Var()
+ b.v1 = Var(m.space)
+ b.v2 = Var(m.space, m.set)
+
+ @m.Block(m.time, m.space)
+ def b2(b):
+ b.v = Var()
+ b.v1 = Var(m.set)
+
+ @b.Block()
+ def b(bl):
+ bl.v = Var()
+ bl.v1 = Var(m.set)
+ bl.v2 = Var(m.time)
+
+ @m.Block(m.set2, m.time)
+ def b3(b):
+ b.v = Var()
+ b.v1 = Var(m.space)
+
+ @b.Block(m.space)
+ def b(bb):
+ bb.v = Var(m.set)
+
+ disc = TransformationFactory('dae.collocation')
+ disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+ disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+
+ self.assertFalse(is_explicitly_indexed_by(m.v, m.time))
+ self.assertTrue(is_explicitly_indexed_by(m.b.v2, m.space))
+ self.assertTrue(is_explicitly_indexed_by(m.b.v3, m.time, m.space))
+
+ self.assertFalse(is_in_block_indexed_by(m.v1, m.time))
+ self.assertFalse(is_in_block_indexed_by(m.v2, m.set))
+ self.assertTrue(is_in_block_indexed_by(m.b1[m.time[1]].v2, m.time))
+
+ self.assertTrue(is_in_block_indexed_by(
+ m.b2[m.time[1], m.space[1]].b.v1, m.time))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b2[m.time[1], m.space[1]].b.v2, m.time))
+ self.assertTrue(is_explicitly_indexed_by(
+ m.b2[m.time[1], m.space[1]].b.v2, m.time))
+ self.assertFalse(is_in_block_indexed_by(
+ m.b2[m.time[1], m.space[1]].b.v1, m.set))
+
+ self.assertFalse(is_in_block_indexed_by(
+ m.b2[m.time[1], m.space[1]].b.v1,
+ m.space, stop_at=m.b2[m.time[1], m.space[1]]))
+
+ # Explicit indexing with multi-dimensional set:
+ self.assertTrue(is_explicitly_indexed_by(m.v4, m.time, m.set2))
+ self.assertTrue(is_explicitly_indexed_by(m.v5, m.time, m.set2, m.space))
+
+ # Implicit indexing with multi-dimensional set:
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['a', 1, m.time[1]].v, m.set2))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['a', 1, m.time[1]].v, m.time))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['a', 1, m.time[1]].v1[m.space[1]], m.set2))
+ self.assertFalse(is_in_block_indexed_by(
+ m.b3['a', 1, m.time[1]].v1[m.space[1]], m.space))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set2))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time))
+ self.assertTrue(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.space))
+ self.assertFalse(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.set))
+ self.assertFalse(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time,
+ stop_at=m.b3['b', 2, m.time[2]]))
+ self.assertFalse(is_in_block_indexed_by(
+ m.b3['b', 2, m.time[2]].b[m.space[2]].v['b'], m.time,
+ stop_at=m.b3))
+
+
+ # Test get_index_set_except and _complete_index
+ def test_get_index_set_except(self):
+ '''
+ Tests:
+ For components indexed by 0, 1, 2, 3, 4 sets:
+ get_index_set_except one, then two (if any) of those sets
+ check two items that should be in set_except
+ insert item(s) back into these sets via index_getter
+ '''
+ m = ConcreteModel()
+ m.time = ContinuousSet(bounds=(0, 10))
+ m.space = ContinuousSet(bounds=(0, 10))
+ m.set1 = Set(initialize=['a', 'b', 'c'])
+ m.set2 = Set(initialize=['d', 'e', 'f'])
+ m.v = Var()
+ m.v1 = Var(m.time)
+ m.v2 = Var(m.time, m.space)
+ m.v3 = Var(m.time, m.space, m.set1)
+ m.v4 = Var(m.time, m.space, m.set1, m.set2)
+
+ # Multi-dimensional set:
+ m.set3 = Set(initialize=[('a', 1), ('b', 2)])
+ m.v5 = Var(m.set3)
+ m.v6 = Var(m.time, m.space, m.set3)
+ m.v7 = Var(m.set3, m.space, m.time)
+
+ disc = TransformationFactory('dae.collocation')
+ disc.apply_to(m, wrt=m.time, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+ disc.apply_to(m, wrt=m.space, nfe=5, ncp=2, scheme='LAGRANGE-RADAU')
+
+ # Want this to give a TypeError
+ # info = get_index_set_except(m.v, m.time)
+
+ # Indexed by one set
+ info = get_index_set_except(m.v1, m.time)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue(set_except == [None])
+ # Variable is not indexed by anything except time
+ # Test that index_getter returns only the new value given,
+ # regardless of whether it was part of the set excluded (time):
+ self.assertEqual(index_getter((), -1), -1)
+
+ # Indexed by two sets
+ info = get_index_set_except(m.v2, m.time)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue(m.space[1] in set_except
+ and m.space.last() in set_except)
+ # Here (2,) is the partial index, corresponding to space.
+ # Can be provided as a scalar or tuple. 4, the time index,
+ # should be inserted before (2,)
+ self.assertEqual(index_getter((2,), 4), (4, 2))
+ self.assertEqual(index_getter(2, 4), (4, 2))
+
+ # Case where every set is "omitted," now for multiple sets
+ info = get_index_set_except(m.v2, m.space, m.time)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue(set_except == [None])
+ # 5, 7 are the desired index values for space, time
+ # index_getter should put them in the right order for m.v2,
+ # even if they are not valid indices for m.v2
+ self.assertEqual(index_getter((), 5, 7), (7, 5))
+
+ # Indexed by three sets
+ info = get_index_set_except(m.v3, m.time)
+ # In this case set_except is a product of the two non-time sets
+ # indexing v3
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue((m.space[1], 'b') in set_except
+ and (m.space.last(), 'a') in set_except)
+ # index_getter inserts a scalar index into an index of length 2
+ self.assertEqual(index_getter((2, 'b'), 7), (7, 2, 'b'))
+
+ info = get_index_set_except(m.v3, m.space, m.time)
+ # Two sets omitted. Now set_except is just set1
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue('a' in set_except)
+ # index_getter inserts the two new indices in the right order
+ self.assertEqual(index_getter('b', 1.2, 1.1), (1.1, 1.2, 'b'))
+
+ # Indexed by four sets
+ info = get_index_set_except(m.v4, m.set1, m.space)
+ # set_except is a product, and there are two indices to insert
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue((m.time[1], 'd') in set_except)
+ self.assertEqual(index_getter((4, 'f'), 'b', 8), (4, 8, 'b', 'f'))
+
+ # The intended usage of this function looks something like:
+ index_set = m.v4.index_set()
+ for partial_index in set_except:
+ complete_index = index_getter(partial_index, 'a', m.space[2])
+ self.assertTrue(complete_index in index_set)
+ # Do something for every index of v4 at 'a' and space[2]
+
+ # Indexed by a multi-dimensional set
+ info = get_index_set_except(m.v5, m.set3)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertEqual(set_except, [None])
+ self.assertEqual(index_getter((), ('a', 1)), ('a', 1))
+
+ info = get_index_set_except(m.v6, m.set3, m.time)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertTrue(m.space[1] in set_except)
+ self.assertEqual(index_getter(m.space[1], ('b', 2), m.time[1]),
+ (m.time[1], m.space[1], 'b', 2))
+
+ info = get_index_set_except(m.v7, m.time)
+ set_except = info['set_except']
+ index_getter = info['index_getter']
+ self.assertIn(('a', 1, m.space[1]), set_except)
+ self.assertEqual(index_getter(('a', 1, m.space[1]), m.time[1]),
+ ('a', 1, m.space[1], m.time[1]))
+
+ m.v8 = Var(m.time, m.set3, m.time)
+ with self.assertRaises(ValueError):
+ info = get_index_set_except(m.v8, m.time)
+ with self.assertRaises(ValueError):
+ info = get_index_set_except(m.v8, m.space)
+
+ def test_deactivate_model_at(self):
+ m = make_model()
+
+ deactivate_model_at(m, m.time, m.time[2])
+ self.assertTrue(m.fs.con1[m.time[1]].active)
+ self.assertFalse(m.fs.con1[m.time[2]].active)
+ self.assertTrue(m.fs.con2[m.space[1]].active)
+ self.assertFalse(m.fs.b1.con[m.time[2], m.space[1]].active)
+ self.assertFalse(m.fs.b2[m.time[2], m.space.last()].active)
+ self.assertTrue(m.fs.b2[m.time[2], m.space.last()].b3['a'].con['e'].active)
+
+ deactivate_model_at(m, m.time, [m.time[1], m.time[3]])
+ # disc equations at time.first()
+ self.assertFalse(m.fs.con1[m.time[1]].active)
+ self.assertFalse(m.fs.con1[m.time[3]].active)
+ self.assertFalse(m.fs.b1.con[m.time[1], m.space[1]].active)
+ self.assertFalse(m.fs.b1.con[m.time[3], m.space[1]].active)
+
+ with self.assertRaises(KeyError):
+ deactivate_model_at(m, m.time, m.time[1], allow_skip=False,
+ suppress_warnings=True)
+
+
+if __name__ == "__main__":
+ unittest.main()
diff --git a/pyomo/dae/tests/test_simulator.py b/pyomo/dae/tests/test_simulator.py
index f97278df0dd..9d4a9906443 100644
--- a/pyomo/dae/tests/test_simulator.py
+++ b/pyomo/dae/tests/test_simulator.py
@@ -18,6 +18,10 @@
from pyomo.dae import ContinuousSet, DerivativeVar
from pyomo.dae.diffvar import DAE_Error
from pyomo.dae.simulator import (
+ is_pypy,
+ scipy_available,
+ casadi,
+ casadi_available,
Simulator,
_check_getitemexpression,
_check_productexpression,
@@ -25,7 +29,7 @@
_check_viewsumexpression,
substitute_pyomo2casadi,
)
-from pyomo.core.base.template_expr import (
+from pyomo.core.expr.template_expr import (
IndexTemplate,
_GetItemIndexer,
)
@@ -38,21 +42,8 @@
currdir = dirname(abspath(__file__))
exdir = normpath(join(currdir, '..', '..', '..', 'examples', 'dae'))
-try:
- import casadi
- casadi_available = True
-except ImportError:
- casadi_available = False
-
-try:
- import platform
- if platform.python_implementation() == "PyPy":
- # Scipy is importable into PyPy, but ODE integrators don't work. (2/18)
- raise ImportError
- import scipy
- scipy_available = True
-except ImportError:
- scipy_available = False
+# We will skip tests unless we have scipy and not running in pypy
+scipy_available = scipy_available and not is_pypy
class TestSimulator(unittest.TestCase):
@@ -931,8 +922,8 @@ def test_check_getitemexpression(self):
temp = _check_getitemexpression(e, 0)
self.assertIs(e.arg(0), temp[0])
self.assertIs(e.arg(1), temp[1])
- self.assertIs(m.dv, temp[0]._base)
- self.assertIs(m.v, temp[1]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
+ self.assertIs(m.v, temp[1].arg(0))
temp = _check_getitemexpression(e, 1)
self.assertIsNone(temp)
@@ -940,8 +931,8 @@ def test_check_getitemexpression(self):
temp = _check_getitemexpression(e, 1)
self.assertIs(e.arg(0), temp[1])
self.assertIs(e.arg(1), temp[0])
- self.assertIs(m.dv, temp[0]._base)
- self.assertIs(m.v, temp[1]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
+ self.assertIs(m.v, temp[1].arg(0))
temp = _check_getitemexpression(e, 0)
self.assertIsNone(temp)
@@ -963,36 +954,36 @@ def test_check_productexpression(self):
# Check multiplication by constant
e = 5 * m.dv[t] == m.v[t]
temp = _check_productexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
e = m.v[t] == 5 * m.dv[t]
temp = _check_productexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
# Check multiplication by fixed param
e = m.p * m.dv[t] == m.v[t]
temp = _check_productexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
e = m.v[t] == m.p * m.dv[t]
temp = _check_productexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
# Check multiplication by mutable param
e = m.mp * m.dv[t] == m.v[t]
temp = _check_productexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal
self.assertIs(e.arg(1), temp[1].arg(0))
e = m.v[t] == m.mp * m.dv[t]
temp = _check_productexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(m.mp, temp[1].arg(1)) # Reciprocal
self.assertIs(e.arg(0), temp[1].arg(0))
@@ -1000,14 +991,14 @@ def test_check_productexpression(self):
# Check multiplication by var
e = m.y * m.dv[t] / m.z == m.v[t]
temp = _check_productexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(e.arg(1), temp[1].arg(0).arg(0))
self.assertIs(m.z, temp[1].arg(0).arg(1))
e = m.v[t] == m.y * m.dv[t] / m.z
temp = _check_productexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(e.arg(0), temp[1].arg(0).arg(0))
self.assertIs(m.z, temp[1].arg(0).arg(1))
@@ -1015,14 +1006,14 @@ def test_check_productexpression(self):
# Check having the DerivativeVar in the denominator
e = m.y / (m.dv[t] * m.z) == m.mp
temp = _check_productexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(m.y, temp[1].arg(0))
self.assertIs(e.arg(1), temp[1].arg(1).arg(0))
e = m.mp == m.y / (m.dv[t] * m.z)
temp = _check_productexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(m.y, temp[1].arg(0))
self.assertIs(e.arg(0), temp[1].arg(1).arg(0))
@@ -1044,8 +1035,8 @@ def test_check_negationexpression(self):
temp = _check_negationexpression(e, 0)
self.assertIs(e.arg(0).arg(0), temp[0])
self.assertIs(e.arg(1), temp[1].arg(0))
- self.assertIs(m.dv, temp[0]._base)
- self.assertIs(m.v, temp[1].arg(0)._base)
+ self.assertIs(m.dv, temp[0].arg(0))
+ self.assertIs(m.v, temp[1].arg(0).arg(0))
temp = _check_negationexpression(e, 1)
self.assertIsNone(temp)
@@ -1053,8 +1044,8 @@ def test_check_negationexpression(self):
temp = _check_negationexpression(e, 1)
self.assertIs(e.arg(0), temp[1].arg(0))
self.assertIs(e.arg(1).arg(0), temp[0])
- self.assertIs(m.dv, temp[0]._base)
- self.assertIs(m.v, temp[1].arg(0)._base)
+ self.assertIs(m.dv, temp[0].arg(0))
+ self.assertIs(m.v, temp[1].arg(0).arg(0))
temp = _check_negationexpression(e, 0)
self.assertIsNone(temp)
@@ -1077,7 +1068,7 @@ def test_check_viewsumexpression(self):
e = m.dv[t] + m.y + m.z == m.v[t]
temp = _check_viewsumexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.SumExpression)
self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression)
self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression)
@@ -1089,7 +1080,7 @@ def test_check_viewsumexpression(self):
e = m.v[t] == m.y + m.dv[t] + m.z
temp = _check_viewsumexpression(e, 1)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.SumExpression)
self.assertIs(type(temp[1].arg(0)), EXPR.GetItemExpression)
self.assertIs(type(temp[1].arg(1)), EXPR.MonomialTermExpression)
@@ -1099,7 +1090,7 @@ def test_check_viewsumexpression(self):
e = 5 * m.dv[t] + 5 * m.y - m.z == m.v[t]
temp = _check_viewsumexpression(e, 0)
- self.assertIs(m.dv, temp[0]._base)
+ self.assertIs(m.dv, temp[0].arg(0))
self.assertIs(type(temp[1]), EXPR.DivisionExpression)
self.assertIs(type(temp[1].arg(0).arg(0)), EXPR.GetItemExpression)
diff --git a/pyomo/dataportal/TableData.py b/pyomo/dataportal/TableData.py
index 14a79062347..486e6500867 100644
--- a/pyomo/dataportal/TableData.py
+++ b/pyomo/dataportal/TableData.py
@@ -106,7 +106,7 @@ def clear(self):
self._info = None
def _set_data(self, headers, rows):
- from pyomo.core.base.sets import Set
+ from pyomo.core.base.set import Set
from pyomo.core.base.param import Param
header_index = []
@@ -221,9 +221,9 @@ def _get_table(self):
from pyomo.core.expr import value
tmp = []
- if not self.options.columns is None:
+ if self.options.columns is not None:
tmp.append(self.options.columns)
- if not self.options.set is None:
+ if self.options.set is not None:
# Create column names
if self.options.columns is None:
cols = []
@@ -231,7 +231,7 @@ def _get_table(self):
cols.append(self.options.set.local_name+str(i))
tmp.append(cols)
# Get rows
- if not self.options.sort is None:
+ if self.options.sort is not None:
for data in sorted(self.options.set):
if self.options.set.dimen > 1:
tmp.append(list(data))
@@ -243,12 +243,11 @@ def _get_table(self):
tmp.append(list(data))
else:
tmp.append([data])
- elif not self.options.param is None:
+ elif self.options.param is not None:
if type(self.options.param) in (list,tuple):
_param = self.options.param
else:
_param = [self.options.param]
- tmp = []
# Collect data
for index in _param[0]:
if index is None:
@@ -267,5 +266,5 @@ def _get_table(self):
cols.append('I'+str(i))
for param in _param:
cols.append(param)
- tmp = [cols] + tmp
+ tmp.insert(0,cols)
return tmp
diff --git a/pyomo/dataportal/parse_datacmds.py b/pyomo/dataportal/parse_datacmds.py
index fa105d38d98..d4a94fb28a6 100644
--- a/pyomo/dataportal/parse_datacmds.py
+++ b/pyomo/dataportal/parse_datacmds.py
@@ -2,14 +2,15 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
__all__ = ['parse_data_commands']
+import bisect
import sys
import os
import os.path
@@ -18,9 +19,12 @@
from inspect import getfile, currentframe
from six.moves import xrange
-from pyutilib.misc import flatten_list
-from pyutilib.ply import t_newline, t_ignore, _find_column, p_error, ply_init
-
+from pyutilib.misc import flatten_list, import_file
+
+from pyomo.common import config
+from pyomo.common.fileutils import this_file_dir
+
+_re_number = r'[-+]?(?:[0-9]+\.?[0-9]*|\.[0-9]+)(?:[eE][-+]?[0-9]+)?'
## -----------------------------------------------------------
##
@@ -29,7 +33,6 @@
## -----------------------------------------------------------
_parse_info = None
-debugging = False
states = (
('data','inclusive'),
@@ -67,11 +70,13 @@
"EQ",
"TR",
"ASTERISK",
+ "NUM_VAL",
#"NONWORD",
- "INT_VAL",
- "FLOAT_VAL",
] + list(reserved.values())
+# Ignore space and tab
+t_ignore = " \t\r"
+
# Regular expression rules
t_COMMA = r","
t_LBRACKET = r"\["
@@ -85,9 +90,29 @@
t_RPAREN = r"\)"
t_ASTERISK = r"\*"
+#
+# Notes on PLY tokenization
+# - token functions (beginning with "t_") are prioritized in the order
+# that they are declared in this module
+#
+def t_newline(t):
+ r'[\n]+'
+ t.lexer.lineno += len(t.value)
+ t.lexer.linepos.extend(t.lexpos+i for i,_ in enumerate(t.value))
+
# Discard comments
+_re_singleline_comment = r'(?:\#[^\n]*)'
+_re_multiline_comment = r'(?:/\*(?:[\n]|.)*?\*/)'
+@lex.TOKEN('|'.join([_re_singleline_comment, _re_multiline_comment]))
def t_COMMENT(t):
- r'(\#[^\n]*)|(/\*(.*?).?(\*/))'
+ # Single-line and multi-line strings
+ nlines = t.value.count('\n')
+ t.lexer.lineno += nlines
+ # We will never need to determine column numbers within this comment
+ # block, so it is sufficient to just worry about the *last* newline
+ # in the comment
+ lastpos = t.lexpos + t.value.rfind('\n')
+ t.lexer.linepos.extend(lastpos for i in range(nlines))
def t_COLONEQ(t):
r':='
@@ -99,38 +124,32 @@ def t_SEMICOLON(t):
t.lexer.begin('INITIAL')
return t
+# Numbers must be followed by a delimiter token (EOF is not a concern,
+# as valid DAT files always end with a ';').
+@lex.TOKEN(_re_number + r'(?=[\s()\[\]{}:;,])')
+def t_NUM_VAL(t):
+ _num = float(t.value)
+ if '.' in t.value:
+ t.value = _num
+ else:
+ _int = int(_num)
+ t.value = _int if _num == _int else _num
+ return t
+
def t_WORDWITHLBRACKET(t):
- r'[a-zA-Z0-9_][a-zA-Z0-9_\.\-]*\['
- if t.value in reserved:
- t.type = reserved[t.value] # Check for reserved words
+ r'[a-zA-Z_][a-zA-Z0-9_\.\-]*\['
return t
def t_WORD(t):
- r'[a-zA-Z_0-9][a-zA-Z_0-9\.+\-]*'
+ r'[a-zA-Z_][a-zA-Z_0-9\.+\-]*'
if t.value in reserved:
t.type = reserved[t.value] # Check for reserved words
return t
def t_STRING(t):
r'[a-zA-Z0-9_\.+\-\\\/]+'
- if t.value in reserved:
- t.type = reserved[t.value] # Check for reserved words
- return t
-
-def t_FLOAT_VAL(t):
- '[-+]?[0-9]+(\.([0-9]+)?([eE][-+]?[0-9]+)?|[eE][-+]?[0-9]+)'
- try:
- t.value = float(t.value)
- #t.type = "FLOAT_VAL"
- return t
- except:
- print("ERROR: "+t.value)
- raise IOError
-
-def t_INT_VAL(t):
- '[-+]?[0-9]+([eE][-+]?[0-9]+)?'
- #t.type = "INT_VAL"
- t.value = int(t.value)
+ # Note: RE guarantees the string has no embedded quotation characters
+ t.value = '"'+t.value+'"'
return t
def t_data_BRACKETEDSTRING(t):
@@ -138,23 +157,40 @@ def t_data_BRACKETEDSTRING(t):
# NO SPACES
# a[1,_df,'foo bar']
# [1,*,'foo bar']
- if t.value in reserved:
- t.type = reserved[t.value] # Check for reserved words
return t
+_re_quoted_str = r'"(?:[^"]|"")*"'
+@lex.TOKEN("|".join([_re_quoted_str, _re_quoted_str.replace('"',"'")]))
def t_QUOTEDSTRING(t):
- r'"([^"]|\"\")*"|\'([^\']|\'\')*\''
- if t.value in reserved:
- t.type = reserved[t.value] # Check for reserved words
+ # Normalize the quotes to use '"', and replace doubled ("escaped")
+ # quotation characters with a single character
+ t.value = '"' + t.value[1:-1].replace(2*t.value[0], t.value[0]) + '"'
return t
#t_NONWORD = r"[^\.A-Za-z0-9,;:=<>\*\(\)\#{}\[\] \n\t\r]+"
# Error handling rule
-def t_error(t): #pragma:nocover
- raise IOError("ERROR: Token %s Value %s Line %s Column %s" % (t.type, t.value, t.lineno, t.lexpos))
- t.lexer.skip(1)
-
+def t_error(t):
+ raise IOError("ERROR: Token %s Value %s Line %s Column %s"
+ % (t.type, t.value, t.lineno, t.lexpos))
+
+## DEBUGGING: uncomment to get tokenization information
+# def _wrap(_name, _fcn):
+# def _wrapper(t):
+# print(_name + ": %s" % (t.value,))
+# return _fcn(t)
+# _wrapper.__doc__ = _fcn.__doc__
+# return _wrapper
+# import inspect
+# for _name in list(globals()):
+# if _name.startswith('t_') and inspect.isfunction(globals()[_name]):
+# globals()[_name] = _wrap(_name, globals()[_name])
+
+def _lex_token_position(t):
+ i = bisect.bisect_left(t.lexer.linepos, t.lexpos)
+ if i:
+ return t.lexpos - t.lexer.linepos[i-1]
+ return t.lexpos
## -----------------------------------------------------------
##
@@ -234,7 +270,7 @@ def p_statement(p):
p[0] = [p[1]]+ [p[2]] + [p[4]]
else:
# Not necessary, but nice to document how statement could end up None
- p[0] = None
+ p[0] = None
#print(p[0])
def p_datastar(p):
@@ -249,19 +285,19 @@ def p_datastar(p):
def p_data(p):
'''
- data : data WORD
+ data : data NUM_VAL
+ | data WORD
| data STRING
| data QUOTEDSTRING
| data BRACKETEDSTRING
| data SET
| data TABLE
| data PARAM
- | data INT_VAL
- | data FLOAT_VAL
| data LPAREN
| data RPAREN
| data COMMA
| data ASTERISK
+ | NUM_VAL
| WORD
| STRING
| QUOTEDSTRING
@@ -269,8 +305,6 @@ def p_data(p):
| SET
| TABLE
| PARAM
- | INT_VAL
- | FLOAT_VAL
| LPAREN
| RPAREN
| COMMA
@@ -282,8 +316,8 @@ def p_data(p):
tmp = p[1]
else:
tmp = p[2]
- if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp:
- tmp = tmp[1:-1]
+ #if type(tmp) is str and tmp[0] == '"' and tmp[-1] == '"' and len(tmp) > 2 and not ' ' in tmp:
+ # tmp = tmp[1:-1]
# Grow items list according to parsed item length
if single_item:
@@ -307,22 +341,20 @@ def p_args(p):
def p_arg(p):
'''
- arg : arg COMMA WORD
+ arg : arg COMMA NUM_VAL
+ | arg COMMA WORD
| arg COMMA STRING
| arg COMMA QUOTEDSTRING
| arg COMMA SET
| arg COMMA TABLE
| arg COMMA PARAM
- | arg COMMA INT_VAL
- | arg COMMA FLOAT_VAL
+ | NUM_VAL
| WORD
| STRING
| QUOTEDSTRING
| SET
| TABLE
| PARAM
- | INT_VAL
- | FLOAT_VAL
'''
# Locate and handle item as necessary
single_item = len(p) == 2
@@ -355,7 +387,8 @@ def p_itemstar(p):
def p_items(p):
'''
- items : items WORD
+ items : items NUM_VAL
+ | items WORD
| items STRING
| items QUOTEDSTRING
| items COMMA
@@ -372,8 +405,7 @@ def p_items(p):
| items SET
| items TABLE
| items PARAM
- | items INT_VAL
- | items FLOAT_VAL
+ | NUM_VAL
| WORD
| STRING
| QUOTEDSTRING
@@ -391,8 +423,6 @@ def p_items(p):
| SET
| TABLE
| PARAM
- | INT_VAL
- | FLOAT_VAL
'''
# Locate and handle item as necessary
single_item = len(p) == 2
@@ -413,6 +443,13 @@ def p_items(p):
tmp_lst.append(tmp)
p[0] = tmp_lst
+def p_error(p):
+ if p is None:
+ tmp = "Syntax error at end of file."
+ else:
+ tmp = "Syntax error at token '%s' with value '%s' (line %s, column %s)"\
+ % (p.type, p.value, p.lineno, _lex_token_position(p))
+ raise IOError(tmp)
# --------------------------------------------------------------
# the DAT file lexer and yaccer only need to be
@@ -430,7 +467,6 @@ def p_items(p):
#
def parse_data_commands(data=None, filename=None, debug=0, outputdir=None):
- global debugging
global dat_lexer
global dat_yaccer
@@ -459,21 +495,21 @@ def parse_data_commands(data=None, filename=None, debug=0, outputdir=None):
os.remove(tabmodule+".py")
if os.path.exists(tabmodule+".pyc"):
os.remove(tabmodule+".pyc")
- debugging=True
dat_lexer = lex.lex()
#
tmpsyspath = sys.path
sys.path.append(outputdir)
- dat_yaccer = yacc.yacc(debug=debug,
- tabmodule=tabmodule,
- outputdir=outputdir,
- optimize=True)
+ dat_yaccer = yacc.yacc(debug=debug,
+ tabmodule=tabmodule,
+ outputdir=outputdir,
+ optimize=True)
sys.path = tmpsyspath
#
# Initialize parse object
#
+ dat_lexer.linepos = []
global _parse_info
_parse_info = {}
_parse_info[None] = []
@@ -481,32 +517,17 @@ def parse_data_commands(data=None, filename=None, debug=0, outputdir=None):
#
# Parse the file
#
- global _parsedata
- if not data is None:
- _parsedata=data
- ply_init(_parsedata)
- dat_yaccer.parse(data, lexer=dat_lexer, debug=debug)
- elif not filename is None:
- f = open(filename, 'r')
- try:
- data = f.read()
- except Exception:
- e = sys.exc_info()[1]
- f.close()
- del f
- raise e
- f.close()
- del f
- _parsedata=data
- ply_init(_parsedata)
- dat_yaccer.parse(data, lexer=dat_lexer, debug=debug)
- else:
- _parse_info = None
- #
- # Disable parsing I/O
- #
- debugging=False
- #print(_parse_info)
+ if filename is not None:
+ if data is not None:
+ raise ValueError("parse_data_commands: cannot specify both "
+ "data and filename arguments")
+ with open(filename, 'r') as FILE:
+ data = FILE.read()
+
+ if data is None:
+ return None
+
+ dat_yaccer.parse(data, lexer=dat_lexer, debug=debug)
return _parse_info
if __name__ == '__main__':
diff --git a/pyomo/dataportal/plugins/db_table.py b/pyomo/dataportal/plugins/db_table.py
index 5ab060575c4..bdf1aaa212a 100644
--- a/pyomo/dataportal/plugins/db_table.py
+++ b/pyomo/dataportal/plugins/db_table.py
@@ -21,33 +21,14 @@
from decimal import Decimal
from six import iteritems
-try:
- import pyodbc
- pyodbc_available=True
-except ImportError:
- pyodbc_available=False
-
-try:
- import pypyodbc
- pypyodbc_available=True
-except Exception:
- pypyodbc_available=False
-
-try:
- import sqlite3
- sqlite3_available=True
-except ImportError:
- sqlite3_available=False
-
-try:
- import pymysql
- pymysql_available=True
-except ImportError:
- pymysql_available=False
-
+from pyomo.common.dependencies import attempt_import
from pyomo.dataportal import TableData
from pyomo.dataportal.factory import DataManagerFactory
+pyodbc, pyodbc_available = attempt_import('pyodbc')
+pypyodbc, pypyodbc_available = attempt_import('pypyodbc')
+sqlite3, sqlite3_available = attempt_import('sqlite3')
+pymysql, pymysql_available = attempt_import('pymysql')
# format=
# using=
diff --git a/pyomo/dataportal/plugins/json_dict.py b/pyomo/dataportal/plugins/json_dict.py
index f8eef1f061d..23e1576bacb 100644
--- a/pyomo/dataportal/plugins/json_dict.py
+++ b/pyomo/dataportal/plugins/json_dict.py
@@ -11,20 +11,16 @@
import os.path
import json
import six
-try:
- import yaml
- yaml_available = True
-except ImportError:
- yaml_available = False
from pyutilib.misc import Options
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
from pyomo.dataportal.factory import DataManagerFactory
def detuplize(d, sort=False):
#print("detuplize %s" % str(d))
- if type(d) in (list,set):
+ if type(d) in (list,tuple,set):
ans = []
for item in d:
if type(item) in (list,tuple,set):
@@ -227,7 +223,7 @@ def read(self):
if not os.path.exists(self.filename):
raise IOError("Cannot find file '%s'" % self.filename)
INPUT = open(self.filename, 'r')
- jdata = yaml.load(INPUT)
+ jdata = yaml.load(INPUT, **yaml_load_args)
INPUT.close()
if jdata is None:
raise IOError("Empty YAML file")
diff --git a/pyomo/dataportal/plugins/sheet.py b/pyomo/dataportal/plugins/sheet.py
index bbafc4fd636..a7d1abb2b05 100644
--- a/pyomo/dataportal/plugins/sheet.py
+++ b/pyomo/dataportal/plugins/sheet.py
@@ -2,49 +2,41 @@
#
# Pyomo: Python Optimization Modeling Objects
# Copyright 2017 National Technology and Engineering Solutions of Sandia, LLC
-# Under the terms of Contract DE-NA0003525 with National Technology and
-# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
+# Under the terms of Contract DE-NA0003525 with National Technology and
+# Engineering Solutions of Sandia, LLC, the U.S. Government retains certain
# rights in this software.
# This software is distributed under the 3-clause BSD License.
# ___________________________________________________________________________
import os.path
import six
-from pyutilib.excel import ExcelSpreadsheet
import pyutilib.common
-
-try:
- import win32com
- win32com_available=True
-except ImportError:
- win32com_available=False
-_excel_available = False #pragma:nocover
-if win32com_available:
- from pyutilib.excel.spreadsheet_win32com import ExcelSpreadsheet_win32com
- tmp = ExcelSpreadsheet_win32com()
- try:
- tmp._excel_dispatch()
- tmp._excel_quit()
- _excel_available = True
- except:
- pass
-try:
- import openpyxl
- openpyxl_available=True
-except (ImportError, SyntaxError):
- # Some versions of openpyxl contain python2.6-incompatible syntax
- openpyxl_available=False
-try:
- import xlrd
- xlrd_available=True
-except ImportError:
- xlrd_available=False
+from pyutilib.excel.spreadsheet import ExcelSpreadsheet, Interfaces
from pyomo.dataportal import TableData
-from pyomo.dataportal.plugins.db_table import pyodbc_available, pyodbc_db_Table, pypyodbc_available, pypyodbc_db_Table
+# from pyomo.dataportal.plugins.db_table import (
+# pyodbc_available, pyodbc_db_Table, pypyodbc_available, pypyodbc_db_Table
+# )
from pyomo.dataportal.factory import DataManagerFactory
+def _attempt_open_excel():
+ if _attempt_open_excel.result is None:
+ from pyutilib.excel.spreadsheet_win32com import (
+ ExcelSpreadsheet_win32com
+ )
+ try:
+ tmp = ExcelSpreadsheet_win32com()
+ tmp._excel_dispatch()
+ tmp._excel_quit()
+ _attempt_open_excel.result = True
+ except:
+ _attempt_open_excel.result = False
+ return _attempt_open_excel.result
+
+_attempt_open_excel.result = None
+
+
class SheetTable(TableData):
def __init__(self, ctype=None):
@@ -92,142 +84,135 @@ def close(self):
-if pyodbc_available or not pypyodbc_available:
- pyodbc_db_base = pyodbc_db_Table
-else:
- pyodbc_db_base = pypyodbc_db_Table
-
-#
-# FIXME: The pyodbc interface doesn't work right now. We will disable it.
-#
-pyodbc_available = False
-
-if (win32com_available and _excel_available) or xlrd_available:
-
- @DataManagerFactory.register("xls", "Excel XLS file interface")
- class SheetTable_xls(SheetTable):
-
- def __init__(self):
- if win32com_available and _excel_available:
- SheetTable.__init__(self, ctype='win32com')
- else:
- SheetTable.__init__(self, ctype='xlrd')
-
- def available(self):
- return win32com_available or xlrd_available
-
- def requirements(self):
- return "win32com or xlrd"
-
-elif pyodbc_available:
-
- @DataManagerFactory.register("xls", "Excel XLS file interface")
- class pyodbc_xls(pyodbc_db_base):
-
- def __init__(self):
- pyodbc_db_base.__init__(self)
-
- def requirements(self):
- return "pyodbc or pypyodbc"
-
- def open(self):
- if self.filename is None:
- raise IOError("No filename specified")
- if not os.path.exists(self.filename):
- raise IOError("Cannot find file '%s'" % self.filename)
- return pyodbc_db_base.open(self)
-
-
-if (win32com_available and _excel_available) or openpyxl_available:
-
- @DataManagerFactory.register("xlsx", "Excel XLSX file interface")
- class SheetTable_xlsx(SheetTable):
+@DataManagerFactory.register("xls", "Excel XLS file interface")
+class SheetTable_xls(SheetTable):
- def __init__(self):
- if win32com_available and _excel_available:
- SheetTable.__init__(self, ctype='win32com')
- else:
- SheetTable.__init__(self, ctype='openpyxl')
-
- def available(self):
- return win32com_available or openpyxl_available
-
- def requirements(self):
- return "win32com or openpyxl"
-
-elif pyodbc_available:
- #
- # This class is OK, but the pyodbc interface doesn't work right now.
- #
-
- @DataManagerFactory.register("xlsx", "Excel XLSX file interface")
- class SheetTable_xlsx(pyodbc_db_base):
-
- def __init__(self):
- pyodbc_db_base.__init__(self)
+ def __init__(self):
+ if Interfaces()['win32com'].available and _attempt_open_excel():
+ SheetTable.__init__(self, ctype='win32com')
+ elif Interfaces()['xlrd'].available:
+ SheetTable.__init__(self, ctype='xlrd')
+ else:
+ raise RuntimeError("No excel interface is available; install %s"
+ % self.requirements())
- def requirements(self):
- return "pyodbc or pypyodbc"
+ def available(self):
+ _inter = Interfaces()
+ return (_inter['win32com'].available and _attempt_open_excel()) \
+ or _inter['xlrd'].available
- def open(self):
- if self.filename is None:
- raise IOError("No filename specified")
- if not os.path.exists(self.filename):
- raise IOError("Cannot find file '%s'" % self.filename)
- return pyodbc_db_base.open(self)
+ def requirements(self):
+ return "win32com or xlrd"
-if pyodbc_available:
+# @DataManagerFactory.register("xls", "Excel XLS file interface")
+# class pyodbc_xls(pyodbc_db_base):
- @DataManagerFactory.register("xlsb", "Excel XLSB file interface")
- class SheetTable_xlsb(pyodbc_db_base):
+# def __init__(self):
+# pyodbc_db_base.__init__(self)
- def __init__(self):
- pyodbc_db_base.__init__(self)
+# def requirements(self):
+# return "pyodbc or pypyodbc"
- def requirements(self):
- return "pyodbc or pypyodbc"
+# def open(self):
+# if self.filename is None:
+# raise IOError("No filename specified")
+# if not os.path.exists(self.filename):
+# raise IOError("Cannot find file '%s'" % self.filename)
+# return pyodbc_db_base.open(self)
- def open(self):
- if self.filename is None:
- raise IOError("No filename specified")
- if not os.path.exists(self.filename):
- raise IOError("Cannot find file '%s'" % self.filename)
- return pyodbc_db_base.open(self)
+@DataManagerFactory.register("xlsx", "Excel XLSX file interface")
+class SheetTable_xlsx(SheetTable):
-if (win32com_available and _excel_available) or openpyxl_available:
+ def __init__(self):
+ if Interfaces()['win32com'].available and _attempt_open_excel():
+ SheetTable.__init__(self, ctype='win32com')
+ elif Interfaces()['openpyxl'].available:
+ SheetTable.__init__(self, ctype='openpyxl')
+ else:
+ raise RuntimeError("No excel interface is available; install %s"
+ % self.requirements())
- @DataManagerFactory.register("xlsm", "Excel XLSM file interface")
- class SheetTable_xlsm(SheetTable):
+ def available(self):
+ _inter = Interfaces()
+ return (_inter['win32com'].available and _attempt_open_excel()) \
+ or _inter['openpyxl'].available
- def __init__(self):
- if win32com_available and _excel_available:
- SheetTable.__init__(self, ctype='win32com')
- else:
- SheetTable.__init__(self, ctype='openpyxl')
+ def requirements(self):
+ return "win32com or openpyxl"
- def available(self):
- return win32com_available or openpyxl_available
+#
+# This class is OK, but the pyodbc interface doesn't work right now.
+#
- def requirements(self):
- return "win32com or openpyxl"
+# @DataManagerFactory.register("xlsx", "Excel XLSX file interface")
+# class SheetTable_xlsx(pyodbc_db_base):
+#
+# def __init__(self):
+# pyodbc_db_base.__init__(self)
+#
+# def requirements(self):
+# return "pyodbc or pypyodbc"
+#
+# def open(self):
+# if self.filename is None:
+# raise IOError("No filename specified")
+# if not os.path.exists(self.filename):
+# raise IOError("Cannot find file '%s'" % self.filename)
+# return pyodbc_db_base.open(self)
-elif pyodbc_available:
- @DataManagerFactory.register("xlsm", "Excel XLSM file interface")
- class SheetTable_xlsm(pyodbc_db_base):
+# @DataManagerFactory.register("xlsb", "Excel XLSB file interface")
+# class SheetTable_xlsb(pyodbc_db_base):
+#
+# def __init__(self):
+# pyodbc_db_base.__init__(self)
+#
+# def requirements(self):
+# return "pyodbc or pypyodbc"
+#
+# def open(self):
+# if self.filename is None:
+# raise IOError("No filename specified")
+# if not os.path.exists(self.filename):
+# raise IOError("Cannot find file '%s'" % self.filename)
+# return pyodbc_db_base.open(self)
+
+
+@DataManagerFactory.register("xlsm", "Excel XLSM file interface")
+class SheetTable_xlsm(SheetTable):
+
+ def __init__(self):
+ if Interfaces()['win32com'].available and _attempt_open_excel():
+ SheetTable.__init__(self, ctype='win32com')
+ elif Interfaces()['openpyxl'].available:
+ SheetTable.__init__(self, ctype='openpyxl')
+ else:
+ raise RuntimeError("No excel interface is available; install %s"
+ % self.requirements())
- def __init__(self):
- pyodbc_db_base.__init__(self)
+ def available(self):
+ _inter = Interfaces()
+ return (_inter['win32com'].available and _attempt_open_excel()) \
+ or _inter['openpyxl'].available
- def requirements(self):
- return "pyodbc or pypyodbc"
+ def requirements(self):
+ return "win32com or openpyxl"
- def open(self):
- if self.filename is None:
- raise IOError("No filename specified")
- if not os.path.exists(self.filename):
- raise IOError("Cannot find file '%s'" % self.filename)
- return pyodbc_db_base.open(self)
+# @DataManagerFactory.register("xlsm", "Excel XLSM file interface")
+# class SheetTable_xlsm(pyodbc_db_base):
+#
+# def __init__(self):
+# pyodbc_db_base.__init__(self)
+#
+# def requirements(self):
+# return "pyodbc or pypyodbc"
+#
+# def open(self):
+# if self.filename is None:
+# raise IOError("No filename specified")
+# if not os.path.exists(self.filename):
+# raise IOError("Cannot find file '%s'" % self.filename)
+# return pyodbc_db_base.open(self)
diff --git a/pyomo/dataportal/plugins/xml_table.py b/pyomo/dataportal/plugins/xml_table.py
index 68703a0b2a0..44b80d26fe3 100644
--- a/pyomo/dataportal/plugins/xml_table.py
+++ b/pyomo/dataportal/plugins/xml_table.py
@@ -9,14 +9,29 @@
# ___________________________________________________________________________
import os.path
-try:
- import lxml.etree.ElementTree as ET
-except:
- import xml.etree.ElementTree as ET
-
+from pyomo.common.dependencies import attempt_import
from pyomo.dataportal.factory import DataManagerFactory
from pyomo.dataportal import TableData
+def _xml_importer():
+ try:
+ from lxml import etree
+ return etree
+ except ImportError:
+ pass
+
+ try:
+ # Python 2.5+
+ import xml.etree.cElementTree as etree
+ return etree
+ except ImportError:
+ pass
+
+ # Python 2.5+
+ import xml.etree.ElementTree as etree
+ return etree
+
+ET, ET_available = attempt_import('ET', importer=_xml_importer)
@DataManagerFactory.register("xml", "XML file interface")
class XMLTable(TableData):
diff --git a/pyomo/dataportal/process_data.py b/pyomo/dataportal/process_data.py
index 33df7a350c3..34a941c2e22 100644
--- a/pyomo/dataportal/process_data.py
+++ b/pyomo/dataportal/process_data.py
@@ -18,8 +18,11 @@
import pyutilib.common
from pyutilib.misc import flatten
-from pyomo.dataportal.parse_datacmds import parse_data_commands
+from pyomo.dataportal.parse_datacmds import (
+ parse_data_commands, _re_number
+)
from pyomo.dataportal.factory import DataManagerFactory, UnknownDataManager
+from pyomo.core.base.set import UnknownSetDimen
try:
from collections import OrderedDict
@@ -33,57 +36,77 @@
unicode = str
try:
long
- numlist = (bool, int, float, long)
+ numlist = {bool, int, float, long}
except:
- numlist = (bool, int, float)
+ numlist = {bool, int, float}
logger = logging.getLogger('pyomo.core')
global Lineno
global Filename
+_num_pattern = re.compile("^("+_re_number+")$")
+_str_false_values = {'False','false','FALSE'}
+_str_bool_values = {'True','true','TRUE'}
+_str_bool_values.update(_str_false_values)
+
+def _guess_set_dimen(index):
+ d = 0
+ # Look through the subsets of this index and get their dimen
+ for subset in index.subsets():
+ sub_d = subset.dimen
+ # If the subset has an unknown dimen, then look at the subset's
+ # domain to guess the dimen.
+ if sub_d is UnknownSetDimen:
+ for domain_subset in subset.domain.subsets():
+ sub_d = domain_subset.domain.dimen
+ if sub_d in (UnknownSetDimen, None):
+ # We will guess that None / Unknown domains are dimen==1
+ d += 1
+ else:
+ d += sub_d
+ elif sub_d is None:
+ return None
+ else:
+ d += sub_d
+ return d
def _process_token(token):
+ #print("TOKEN:", token, type(token))
if type(token) is tuple:
return tuple(_process_token(i) for i in token)
- if type(token) in numlist:
+ elif type(token) in numlist:
return token
- if token in ('True','true','TRUE'):
- return True
- if token in ('False','false','FALSE'):
- return False
-
- if token[0] == '[' and token[-1] == ']':
+ elif token in _str_bool_values:
+ return token not in _str_false_values
+ elif token[0] == '"' and token[-1] == '"':
+ # Strip "flag" quotation characters
+ return token[1:-1]
+ elif token[0] == '[' and token[-1] == ']':
vals = []
token = token[1:-1]
for item in token.split(","):
- if item[0] == "'" or item[0] == '"':
+ if item[0] in '"\'' and item[0] == item[-1]:
vals.append( item[1:-1] )
- try:
- vals.append( int(item) )
- continue
- except:
- pass
- try:
- vals.append( float(item) )
- continue
- except:
- pass
- vals.append( item )
+ elif _num_pattern.match(item):
+ _num = float(item)
+ if '.' in item:
+ vals.append(_num)
+ else:
+ _int = int(_num)
+ vals.append(_int if _int == _num else _num)
+ else:
+ vals.append( item )
return tuple(vals)
-
- elif token[0] == "'" or token[0] == '"':
- return token[1:-1]
-
- try:
- return int(token)
- except:
- pass
- try:
- return float(token)
- except:
- pass
- return token
+ elif _num_pattern.match(token):
+ _num = float(token)
+ if '.' in token:
+ return _num
+ else:
+ _int = int(_num)
+ return _int if _int == _num else _num
+ else:
+ return token
def _preprocess_data(cmd):
@@ -321,7 +344,10 @@ def _process_param(cmd, _model, _data, _default, index=None, param=None, ncolumn
finaldata = _process_data_list(pname, ncolumns-1, cmd)
elif not _model is None:
_param = getattr(_model, pname)
- finaldata = _process_data_list(pname, _param.dim(), cmd)
+ _dim = _param.dim()
+ if _dim is UnknownSetDimen:
+ _dim = _guess_set_dimen(_param.index_set())
+ finaldata = _process_data_list(pname, _dim, cmd)
else:
finaldata = _process_data_list(pname, 1, cmd)
for key in finaldata:
@@ -426,7 +452,7 @@ def _process_param(cmd, _model, _data, _default, index=None, param=None, ncolumn
d = 1
else:
index = getattr(_model, sname)
- d = index.dimen
+ d = _guess_set_dimen(index)
#print "SET",sname,d,_model#,getattr(_model,sname).dimen, type(index)
#d = getattr(_model,sname).dimen
np = i-1
@@ -473,7 +499,10 @@ def _process_param(cmd, _model, _data, _default, index=None, param=None, ncolumn
elif _model is None:
d = 1
else:
- d = getattr(_model, param[j-jstart]).dim()
+ _param = getattr(_model, pname)
+ d = _param.dim()
+ if d is UnknownSetDimen:
+ d = _guess_set_dimen(_param.index_set())
if nsets > 0:
np = i-1
dnp = d+np-1
@@ -583,7 +612,6 @@ def _process_include(cmd, _model, _data, _default, options=None):
Filename = cmd[1]
global Lineno
Lineno = 0
-
try:
scenarios = parse_data_commands(filename=cmd[1])
except IOError:
diff --git a/pyomo/dataportal/tester b/pyomo/dataportal/tester
index 20cd4644e75..8a7f6deb358 100755
--- a/pyomo/dataportal/tester
+++ b/pyomo/dataportal/tester
@@ -3,7 +3,6 @@
import sys
import parse_datacmds
-parse_datacmds.debugging=True
debug=int(sys.argv[2])
print(parse_datacmds.parse_data_commands(filename=sys.argv[1], debug=debug))
diff --git a/pyomo/dataportal/tests/data_types.dat b/pyomo/dataportal/tests/data_types.dat
new file mode 100644
index 00000000000..ca6537686c2
--- /dev/null
+++ b/pyomo/dataportal/tests/data_types.dat
@@ -0,0 +1,111 @@
+param: I: p :=
+# simple integers
+ 501 2
+ 502 +2
+ 551 -2
+# scientific integers
+ 510 2E2
+ 511 2E+2
+ 512 2e2
+ 513 2e+2
+ 514 +2E2
+ 515 +2E+2
+ 516 +2e2
+ 517 +2e+2
+ 520 -2E2
+ 521 -2E+2
+ 522 -2e2
+ 523 -2e+2
+# scientific (non-integer)
+ 530 2E-2
+ 531 2e-2
+ 532 +2E-2
+ 533 +2e-2
+ 540 -2E-2
+ 541 -2e-2
+# basic floats
+ 100 1.0
+ 101 1.
+ 102 +1.0
+ 103 +1.
+ 110 -1.0
+ 111 -1.
+ 120 .1
+ 121 +.1
+ 130 -.1
+ 140 1.1
+ 141 +1.1
+ 150 -1.1
+# scientific floats
+ 200 2.E2
+ 201 2.E+2
+ 202 2.e2
+ 203 2.e+2
+ 204 +2.E2
+ 205 +2.E+2
+ 206 +2.e2
+ 207 +2.e+2
+ 210 -2.E2
+ 211 -2.E+2
+ 212 -2.e2
+ 213 -2.e+2
+# scientific floats (negative exponents)
+ 220 2.E-2
+ 221 2.e-2
+ 222 +2.E-2
+ 223 +2.e-2
+ 230 -2.E-2
+ 231 -2.e-2
+# scientific floats (with fractional)
+ 300 2.1E2
+ 301 2.1E+2
+ 302 2.1e2
+ 303 2.1e+2
+ 304 +2.1E2
+ 305 +2.1E+2
+ 306 +2.1e2
+ 307 +2.1e+2
+ 310 -2.1E2
+ 311 -2.1E+2
+ 312 -2.1e2
+ 313 -2.1e+2
+# scientific floats (fractional, negative exponents)
+ 320 2.1E-2
+ 321 2.1e-2
+ 322 +2.1E-2
+ 323 +2.1e-2
+ 330 -2.1E-2
+ 331 -2.1e-2
+# scientific floats (with fractional)
+ 400 .1E2
+ 401 .1E+2
+ 402 .1e2
+ 403 .1e+2
+ 404 +.1E2
+ 405 +.1E+2
+ 406 +.1e2
+ 407 +.1e+2
+ 410 -.1E2
+ 411 -.1E+2
+ 412 -.1e2
+ 413 -.1e+2
+/* scientific floats (fractional, negative exponents) */
+ 420 .1E-2
+ 421 .1e-2
+ 422 +.1E-2
+ 423 +.1e-2
+ 430 -.1E-2
+ 431 -.1e-2
+/*Strings*/
+1000 a_string
+1001 "a_string"
+1002 'a_string'
+1003 "a "" string"
+1004 'a '' string'
+1005 1234_567
+1006 "123"
+/* and
+ a
+ multi-line
+ comment*/
+;
diff --git a/pyomo/dataportal/tests/param4.baseline.csv b/pyomo/dataportal/tests/param4.baseline.csv
index 88d911cffc4..c7c445c98db 100644
--- a/pyomo/dataportal/tests/param4.baseline.csv
+++ b/pyomo/dataportal/tests/param4.baseline.csv
@@ -1,3 +1,4 @@
-1,2,10,11
-2,3,20,21
-3,4,30,31
+I0,I1,p,q
+1,2,10,11
+2,3,20,21
+3,4,30,31
diff --git a/pyomo/dataportal/tests/param4.baseline.tab b/pyomo/dataportal/tests/param4.baseline.tab
index 2ac6bb69353..c1e4154291e 100644
--- a/pyomo/dataportal/tests/param4.baseline.tab
+++ b/pyomo/dataportal/tests/param4.baseline.tab
@@ -1,3 +1,4 @@
+I0 I1 p q
1 2 10 11
2 3 20 21
3 4 30 31
diff --git a/pyomo/dataportal/tests/param4.baseline.xml b/pyomo/dataportal/tests/param4.baseline.xml
index 54198fcd372..1eb379e0666 100644
--- a/pyomo/dataportal/tests/param4.baseline.xml
+++ b/pyomo/dataportal/tests/param4.baseline.xml
@@ -1 +1 @@
-<1 value="2" /><2 value="3" /><10 value="20" /><11 value="21" />
<1 value="3" /><2 value="4" /><10 value="30" /><11 value="31" />
\ No newline at end of file
+
\ No newline at end of file
diff --git a/pyomo/dataportal/tests/test_dataportal.py b/pyomo/dataportal/tests/test_dataportal.py
index 10286b9ab33..e985374c3c0 100644
--- a/pyomo/dataportal/tests/test_dataportal.py
+++ b/pyomo/dataportal/tests/test_dataportal.py
@@ -21,12 +21,6 @@
from pyomo.dataportal.factory import DataManagerFactory
from pyomo.environ import *
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
currdir=dirname(abspath(__file__))+os.sep
example_dir=pyomo_dir+os.sep+".."+os.sep+"examples"+os.sep+"pyomo"+os.sep+"tutorials"+os.sep+"tab"+os.sep
tutorial_dir=pyomo_dir+os.sep+".."+os.sep+"examples"+os.sep+"pyomo"+os.sep+"tutorials"+os.sep
@@ -195,7 +189,7 @@ def test_tableA1_1(self):
data = DataPortal(filename=os.path.abspath(example_dir+'A.tab'), set=model.A)
self.assertEqual(set(data['A']), set(['A1', 'A2', 'A3']))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
def test_tableA1_2(self):
# Importing a single column of data
@@ -204,7 +198,7 @@ def test_tableA1_2(self):
data = DataPortal()
data.load(filename=os.path.abspath(example_dir+'A.tab'), set=model.A)
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
def test_tableA1_3(self):
# Importing a single column of data
@@ -217,7 +211,7 @@ def test_tableA1_3(self):
data.load(set=model.A)
data.disconnect()
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
def test_md1(self):
md = DataPortal()
@@ -434,6 +428,58 @@ def test_md17(self):
except IOError:
pass
+ def test_dat_type_conversion(self):
+ model = AbstractModel()
+ model.I = Set()
+ model.p = Param(model.I, domain=Any)
+ i = model.create_instance(currdir+"data_types.dat")
+ ref = {
+ 50: (int, 2),
+ 55: (int, -2),
+ 51: (int, 200),
+ 52: (int, -200),
+ 53: (float, 0.02),
+ 54: (float, -0.02),
+ 10: (float, 1.),
+ 11: (float, -1.),
+ 12: (float, .1),
+ 13: (float, -.1),
+ 14: (float, 1.1),
+ 15: (float, -1.1),
+ 20: (float, 200.),
+ 21: (float, -200.),
+ 22: (float, .02),
+ 23: (float, -.02),
+ 30: (float, 210.),
+ 31: (float, -210.),
+ 32: (float, .021),
+ 33: (float, -.021),
+ 40: (float, 10.),
+ 41: (float, -10.),
+ 42: (float, .001),
+ 43: (float, -.001),
+ 1000: (str, "a_string"),
+ 1001: (str, "a_string"),
+ 1002: (str, 'a_string'),
+ 1003: (str, 'a " string'),
+ 1004: (str, "a ' string"),
+ 1005: (str, '1234_567'),
+ 1006: (str, '123'),
+ }
+ for k, v in i.p.items():
+ #print(k,v, type(v))
+ if k in ref:
+ err="index %s: (%s, %s) does not match ref %s" % (
+ k, type(v), v, ref[k],)
+ self.assertIs(type(v), ref[k][0], err)
+ self.assertEqual(v, ref[k][1], err)
+ else:
+ n = k // 10
+ err="index %s: (%s, %s) does not match ref %s" % (
+ k, type(v), v, ref[n],)
+ self.assertIs(type(v), ref[n][0], err)
+ self.assertEqual(v, ref[n][1], err)
+
def test_data_namespace(self):
model=AbstractModel()
model.a=Param()
@@ -752,7 +798,7 @@ def test_tableA(self):
data = DataPortal()
data.load(set=model.A, **self.create_options('A'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
def test_tableB(self):
# Importing an unordered set of numeric data
@@ -762,7 +808,7 @@ def test_tableB(self):
data = DataPortal()
data.load(set=model.B, **self.create_options('B'))
instance = model.create_instance(data)
- self.assertEqual(instance.B.data(), set([1, 2, 3]))
+ self.assertEqual(set(instance.B.data()), set([1, 2, 3]))
def test_tableC(self):
# Importing a multi-column table, where all columns are
@@ -773,7 +819,7 @@ def test_tableC(self):
data = DataPortal()
data.load(set=model.C, **self.create_options('C'))
instance = model.create_instance(data)
- self.assertEqual(instance.C.data(), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
def test_tableD(self):
# Importing a 2D array of data as a set.
@@ -783,7 +829,7 @@ def test_tableD(self):
data = DataPortal()
data.load(set=model.C, format='set_array', **self.create_options('D'))
instance = model.create_instance(data)
- self.assertEqual(instance.C.data(), set([('A1',1), ('A2',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A2',2), ('A3',3)]))
def test_tableZ(self):
# Importing a single parameter
@@ -804,7 +850,7 @@ def test_tableY(self):
data = DataPortal()
data.load(param=model.Y, **self.create_options('Y'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
def test_tableXW_1(self):
@@ -819,7 +865,7 @@ def test_tableXW_1(self):
data = DataPortal()
data.load(param=(model.X, model.W), **self.create_options('XW'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
@@ -833,7 +879,7 @@ def test_tableXW_2(self):
data = DataPortal()
data.load(param=(model.X, model.W), **self.create_options('XW'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
@@ -847,7 +893,7 @@ def test_tableXW_3(self):
data = DataPortal()
data.load(index=model.A, param=(model.X, model.W), **self.create_options('XW'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
@@ -861,7 +907,7 @@ def test_tableXW_4(self):
data = DataPortal()
data.load(select=('A', 'W', 'X'), index=model.B, param=(model.R, model.S), **self.create_options('XW'))
instance = model.create_instance(data)
- self.assertEqual(instance.B.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.B.data()), set(['A1','A2','A3']))
self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.R.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
@@ -900,7 +946,7 @@ def test_tableS(self):
data = DataPortal()
data.load(param=model.S, **self.create_options('S'))
instance = model.create_instance(data)
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5})
def test_tablePO(self):
@@ -913,7 +959,7 @@ def test_tablePO(self):
data = DataPortal()
data.load(index=model.J, param=(model.P, model.O), **self.create_options('PO'))
instance = model.create_instance(data)
- self.assertEqual(instance.J.data(), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
+ self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} )
self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4})
@@ -998,7 +1044,7 @@ def test_store_param4(self):
model.p = Param(model.A, initialize={(1,2):10, (2,3):20, (3,4):30})
model.q = Param(model.A, initialize={(1,2):11, (2,3):21, (3,4):31})
data = DataPortal()
- data.store(param=(model.p,model.q), columns=('a','b','c','d'), **self.create_write_options('param4'))
+ data.store(param=(model.p,model.q), **self.create_write_options('param4'))
if self.suffix == '.json':
self.assertMatchesJsonBaseline(currdir+'param4'+self.suffix, currdir+'param4.baseline'+self.suffix)
elif self.suffix == '.yaml':
@@ -1033,7 +1079,7 @@ def create_options(self, name):
return {'filename':os.path.abspath(tutorial_dir+os.sep+'json'+os.sep+name+self.suffix)}
-@unittest.skipIf(not yaml_available, "YAML not available available")
+@unittest.skipIf(not yaml_interface, "YAML interface not available")
class TestYamlPortal(TestTextPortal):
suffix = '.yaml'
@@ -1075,7 +1121,7 @@ def test_tableA1(self):
model=AbstractModel()
model.A = Set()
instance = model.create_instance(currdir+'loadA1.dat')
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
os.remove(currdir+'loadA1.dat')
def test_tableA2(self):
@@ -1104,7 +1150,7 @@ def test_tableA3(self):
model=AbstractModel()
model.A = Set()
instance = model.create_instance(currdir+'loadA3.dat')
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
os.remove(currdir+'loadA3.dat')
def test_tableB1(self):
@@ -1116,7 +1162,7 @@ def test_tableB1(self):
model=AbstractModel()
model.B = Set()
instance = model.create_instance(currdir+'loadB.dat')
- self.assertEqual(instance.B.data(), set([1, 2, 3]))
+ self.assertEqual(set(instance.B.data()), set([1, 2, 3]))
os.remove(currdir+'loadB.dat')
def test_tableC(self):
@@ -1129,7 +1175,7 @@ def test_tableC(self):
model=AbstractModel()
model.C = Set(dimen=2)
instance = model.create_instance(currdir+'loadC.dat')
- self.assertEqual(instance.C.data(), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
os.remove(currdir+'loadC.dat')
def test_tableD(self):
@@ -1141,7 +1187,7 @@ def test_tableD(self):
model=AbstractModel()
model.C = Set(dimen=2)
instance = model.create_instance(currdir+'loadD.dat')
- self.assertEqual(instance.C.data(), set([('A1',1), ('A2',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A2',2), ('A3',3)]))
os.remove(currdir+'loadD.dat')
def test_tableZ(self):
@@ -1166,7 +1212,7 @@ def test_tableY(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.Y = Param(model.A)
instance = model.create_instance(currdir+'loadY.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
os.remove(currdir+'loadY.dat')
@@ -1183,7 +1229,7 @@ def test_tableXW_1(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1214,7 +1260,7 @@ def test_tableXW_3(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1230,7 +1276,7 @@ def test_tableXW_4(self):
model.R = Param(model.B)
model.S = Param(model.B)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.B.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.B.data()), set(['A1','A2','A3']))
self.assertEqual(instance.R.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.S.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1275,7 +1321,7 @@ def test_tableS(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.S = Param(model.A)
instance = model.create_instance(currdir+'loadS.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5})
os.remove(currdir+'loadS.dat')
@@ -1290,7 +1336,7 @@ def test_tablePO(self):
model.P = Param(model.J)
model.O = Param(model.J)
instance = model.create_instance(currdir+'loadPO.dat')
- self.assertEqual(instance.J.data(), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
+ self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} )
self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4})
os.remove(currdir+'loadPO.dat')
@@ -1324,7 +1370,7 @@ def test_tableXW_nested1(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1342,7 +1388,7 @@ def test_tableXW_nested2(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1392,7 +1438,7 @@ def test_tableA1_1(self):
model=AbstractModel()
model.A = Set()
instance = model.create_instance(currdir+'loadA1.dat')
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
os.remove(currdir+'loadA1.dat')
def test_tableA1_2(self):
@@ -1403,7 +1449,7 @@ def test_tableA1_2(self):
model=AbstractModel()
model.A = Set()
instance = model.create_instance(currdir+'loadA1.dat')
- self.assertEqual(instance.A.data(), set(['A1', 'A2', 'A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1', 'A2', 'A3']))
os.remove(currdir+'loadA1.dat')
def test_tableB1_1(self):
@@ -1414,7 +1460,7 @@ def test_tableB1_1(self):
model=AbstractModel()
model.B = Set()
instance = model.create_instance(currdir+'loadB.dat')
- self.assertEqual(instance.B.data(), set([1, 2, 3]))
+ self.assertEqual(set(instance.B.data()), set([1, 2, 3]))
os.remove(currdir+'loadB.dat')
def test_tableB1_2(self):
@@ -1425,7 +1471,7 @@ def test_tableB1_2(self):
model=AbstractModel()
model.B = Set()
instance = model.create_instance(currdir+'loadB.dat')
- self.assertEqual(instance.B.data(), set([1, 2, 3]))
+ self.assertEqual(set(instance.B.data()), set([1, 2, 3]))
os.remove(currdir+'loadB.dat')
def test_tableC_1(self):
@@ -1437,7 +1483,7 @@ def test_tableC_1(self):
model=AbstractModel()
model.C = Set(dimen=2)
instance = model.create_instance(currdir+'loadC.dat')
- self.assertEqual(instance.C.data(), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
os.remove(currdir+'loadC.dat')
def test_tableC_2(self):
@@ -1449,7 +1495,7 @@ def test_tableC_2(self):
model=AbstractModel()
model.C = Set(dimen=2)
instance = model.create_instance(currdir+'loadC.dat')
- self.assertEqual(instance.C.data(), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
+ self.assertEqual(set(instance.C.data()), set([('A1',1), ('A1',2), ('A1',3), ('A2',1), ('A2',2), ('A2',3), ('A3',1), ('A3',2), ('A3',3)]))
os.remove(currdir+'loadC.dat')
def test_tableZ(self):
@@ -1472,7 +1518,7 @@ def test_tableY_1(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.Y = Param(model.A)
instance = model.create_instance(currdir+'loadY.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
os.remove(currdir+'loadY.dat')
@@ -1485,7 +1531,7 @@ def test_tableY_2(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.Y = Param(model.A)
instance = model.create_instance(currdir+'loadY.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.Y.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
os.remove(currdir+'loadY.dat')
@@ -1501,7 +1547,7 @@ def test_tableXW_1_1(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1518,7 +1564,7 @@ def test_tableXW_1_2(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1533,7 +1579,7 @@ def test_tableXW_3_1(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1548,7 +1594,7 @@ def test_tableXW_3_2(self):
model.X = Param(model.A)
model.W = Param(model.A)
instance = model.create_instance(currdir+'loadXW.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3']))
self.assertEqual(instance.X.extract_values(), {'A1':3.3,'A2':3.4,'A3':3.5})
self.assertEqual(instance.W.extract_values(), {'A1':4.3,'A2':4.4,'A3':4.5})
os.remove(currdir+'loadXW.dat')
@@ -1564,7 +1610,7 @@ def test_tableS_1(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.S = Param(model.A)
instance = model.create_instance(currdir+'loadS.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5})
os.remove(currdir+'loadS.dat')
@@ -1579,7 +1625,7 @@ def test_tableS_2(self):
model.A = Set(initialize=['A1','A2','A3','A4'])
model.S = Param(model.A)
instance = model.create_instance(currdir+'loadS.dat')
- self.assertEqual(instance.A.data(), set(['A1','A2','A3','A4']))
+ self.assertEqual(set(instance.A.data()), set(['A1','A2','A3','A4']))
self.assertEqual(instance.S.extract_values(), {'A1':3.3,'A3':3.5})
os.remove(currdir+'loadS.dat')
@@ -1593,7 +1639,7 @@ def test_tablePO_1(self):
model.P = Param(model.J)
model.O = Param(model.J)
instance = model.create_instance(currdir+'loadPO.dat')
- self.assertEqual(instance.J.data(), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
+ self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} )
self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4})
os.remove(currdir+'loadPO.dat')
@@ -1608,7 +1654,7 @@ def test_tablePO_2(self):
model.P = Param(model.J)
model.O = Param(model.J)
instance = model.create_instance(currdir+'loadPO.dat')
- self.assertEqual(instance.J.data(), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
+ self.assertEqual(set(instance.J.data()), set([('A3', 'B3'), ('A1', 'B1'), ('A2', 'B2')]) )
self.assertEqual(instance.P.extract_values(), {('A3', 'B3'): 4.5, ('A1', 'B1'): 4.3, ('A2', 'B2'): 4.4} )
self.assertEqual(instance.O.extract_values(), {('A3', 'B3'): 5.5, ('A1', 'B1'): 5.3, ('A2', 'B2'): 5.4})
os.remove(currdir+'loadPO.dat')
@@ -1628,8 +1674,8 @@ def test_complex_1(self):
model.A = Param(model.I)
model.B = Param(model.J)
instance = model.create_instance(currdir+'loadComplex.dat')
- self.assertEqual(instance.J.data(), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) )
- self.assertEqual(instance.I.data(), set(['I1', 'I2', 'I3']))
+ self.assertEqual(set(instance.J.data()), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) )
+ self.assertEqual(set(instance.I.data()), set(['I1', 'I2', 'I3']))
self.assertEqual(instance.B.extract_values(), {('J311', 'J321'): 'B1', ('J312', 'J322'): 'B2', ('J313', 'J323'): 'B3'} )
self.assertEqual(instance.A.extract_values(), {'I1': 'A1', 'I2': 'A2', 'I3': 'A3'})
os.remove(currdir+'loadComplex.dat')
@@ -1650,8 +1696,8 @@ def test_complex_2(self):
model.A = Param(model.J)
model.B = Param(model.I)
instance = model.create_instance(currdir+'loadComplex.dat')
- self.assertEqual(instance.J.data(), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) )
- self.assertEqual(instance.I.data(), set(['I1', 'I2', 'I3']))
+ self.assertEqual(set(instance.J.data()), set([('J311', 'J321'), ('J312', 'J322'), ('J313', 'J323')]) )
+ self.assertEqual(set(instance.I.data()), set(['I1', 'I2', 'I3']))
self.assertEqual(instance.A.extract_values(), {('J311', 'J321'): 'A1', ('J312', 'J322'): 'A2', ('J313', 'J323'): 'A3'} )
self.assertEqual(instance.B.extract_values(), {'I1': 'B1', 'I2': 'B2', 'I3': 'B3'})
os.remove(currdir+'loadComplex.dat')
diff --git a/pyomo/duality/plugins.py b/pyomo/duality/plugins.py
index 7c79ad5199d..f81e7d1b98a 100644
--- a/pyomo/duality/plugins.py
+++ b/pyomo/duality/plugins.py
@@ -11,7 +11,6 @@
import logging
from six import iteritems
-import pyomo.common
from pyomo.common.deprecation import deprecated
from pyomo.core.base import (Transformation,
TransformationFactory,
diff --git a/pyomo/duality/tests/test_linear_dual.py b/pyomo/duality/tests/test_linear_dual.py
index b4af02bd55d..33eb47d42e5 100644
--- a/pyomo/duality/tests/test_linear_dual.py
+++ b/pyomo/duality/tests/test_linear_dual.py
@@ -19,6 +19,7 @@
import pyutilib.th as unittest
+from pyomo.common.dependencies import yaml, yaml_available, yaml_load_args
import pyomo.opt
from pyomo.environ import *
from pyomo.scripting.util import cleanup
@@ -27,12 +28,6 @@
from six import iteritems
-try:
- import yaml
- yaml_available=True
-except ImportError:
- yaml_available=False
-
solver = None
class CommonTests(object):
@@ -82,7 +77,7 @@ def referenceFile(self, problem, solver):
def getObjective(self, fname):
FILE = open(fname)
- data = yaml.load(FILE)
+ data = yaml.load(FILE, **yaml_load_args)
FILE.close()
solutions = data.get('Solution', [])
ans = []
diff --git a/pyomo/environ/__init__.py b/pyomo/environ/__init__.py
index f5a0683cee8..34f4fd0add2 100644
--- a/pyomo/environ/__init__.py
+++ b/pyomo/environ/__init__.py
@@ -23,8 +23,8 @@ def _do_import(pkg_name):
#
_packages = [
'pyomo.common',
- 'pyomo.opt',
'pyomo.core',
+ 'pyomo.opt',
'pyomo.dataportal',
'pyomo.duality',
'pyomo.checker',
@@ -47,15 +47,17 @@ def _do_import(pkg_name):
#
_optional_packages = set([
'pyomo.contrib.example',
- 'pyomo.contrib.preprocessing',
- 'pyomo.contrib.mindtpy',
- 'pyomo.contrib.gdpopt',
+ 'pyomo.contrib.fme',
'pyomo.contrib.gdpbb',
+ 'pyomo.contrib.gdpopt',
'pyomo.contrib.gdp_bounds',
- 'pyomo.contrib.trustregion',
+ 'pyomo.contrib.mcpp',
+ 'pyomo.contrib.mindtpy',
'pyomo.contrib.multistart',
'pyomo.contrib.petsc',
- 'pyomo.contrib.mcpp',
+ 'pyomo.contrib.preprocessing',
+ 'pyomo.contrib.pynumero',
+ 'pyomo.contrib.trustregion',
])
diff --git a/pyomo/gdp/__init__.py b/pyomo/gdp/__init__.py
index 62c7dd66fc8..7667064aa20 100644
--- a/pyomo/gdp/__init__.py
+++ b/pyomo/gdp/__init__.py
@@ -13,5 +13,5 @@
# Do not import these files: importing them registers the transformation
# plugins with the pyomo script so that they get automatically invoked.
#import pyomo.gdp.bigm
-#import pyomo.gdp.chull
+#import pyomo.gdp.hull
diff --git a/pyomo/gdp/basic_step.py b/pyomo/gdp/basic_step.py
index d7ca6724a40..5ba59e3d940 100644
--- a/pyomo/gdp/basic_step.py
+++ b/pyomo/gdp/basic_step.py
@@ -44,9 +44,9 @@ def apply_basic_step(disjunctions_or_constraints):
# Basic steps only apply to XOR'd disjunctions
#
disjunctions = list(obj for obj in disjunctions_or_constraints
- if obj.type() == Disjunction)
+ if obj.ctype == Disjunction)
constraints = list(obj for obj in disjunctions_or_constraints
- if obj.type() == Constraint)
+ if obj.ctype == Constraint)
for d in disjunctions:
if not d.xor:
raise ValueError(
diff --git a/pyomo/gdp/chull.py b/pyomo/gdp/chull.py
index 662fba7f09c..ecdf76bee29 100644
--- a/pyomo/gdp/chull.py
+++ b/pyomo/gdp/chull.py
@@ -12,15 +12,17 @@
from pyomo.common.plugin import Plugin, implements
from pyomo.core import IPyomoScriptModifyInstance, TransformationFactory
-# This import ensures that gdp.chull is registered, even if pyomo.environ
+# This is now deprecated in so many ways...
+
+# This import ensures that gdp.hull is registered, even if pyomo.environ
# was never imported.
-import pyomo.gdp.plugins.chull
+import pyomo.gdp.plugins.hull
@deprecated('The GDP Pyomo script plugins are deprecated. '
'Use BuildActions or the --transform option.',
version='5.4')
class ConvexHull_Transformation_PyomoScript_Plugin(Plugin):
- """Plugin to automatically call the GDP Convex Hull relaxation within
+ """Plugin to automatically call the GDP Hull Reformulation within
the Pyomo script.
"""
@@ -32,7 +34,7 @@ def apply(self, **kwds):
# Not sure why the ModifyInstance callback started passing the
# model along with the instance. We will ignore it.
model = kwds.pop('model', None)
- xform = TransformationFactory('gdp.chull')
+ xform = TransformationFactory('gdp.hull')
return xform.apply_to(instance, **kwds)
diff --git a/pyomo/gdp/disjunct.py b/pyomo/gdp/disjunct.py
index b371a04b442..680367843b3 100644
--- a/pyomo/gdp/disjunct.py
+++ b/pyomo/gdp/disjunct.py
@@ -13,6 +13,7 @@
from six import iteritems, itervalues
from weakref import ref as weakref_ref
+from pyomo.common.errors import PyomoException
from pyomo.common.modeling import unique_component_name
from pyomo.common.timing import ConstructionTimer
from pyomo.core import (
@@ -26,6 +27,7 @@
from pyomo.core.base.misc import apply_indexed_rule
from pyomo.core.base.indexed_component import ActiveIndexedComponent
+
logger = logging.getLogger('pyomo.gdp')
_rule_returned_none_error = """Disjunction '%s': rule returned None.
@@ -36,7 +38,7 @@
your rule.
"""
-class GDP_Error(Exception):
+class GDP_Error(PyomoException):
"""Exception raised while processing GDP Models"""
@@ -74,28 +76,18 @@ def process(arg):
class _DisjunctData(_BlockData):
+ _Block_reserved_words = set()
+
+ @property
+ def transformation_block(self):
+ return self._transformation_block
+
def __init__(self, component):
_BlockData.__init__(self, component)
self.indicator_var = Var(within=Binary)
-
- def set_value(self, val):
- _indicator_var = self.indicator_var
- # Remove everything
- for k in list(getattr(self, '_decl', {})):
- self.del_component(k)
- self._ctypes = {}
- self._decl = {}
- self._decl_order = []
- # Now copy over everything from the other block. If the other
- # block has an indicator_var, it should override this block's.
- # Otherwise restore this block's indicator_var.
- if val:
- if 'indicator_var' not in val:
- self.add_component('indicator_var', _indicator_var)
- for k in sorted(iterkeys(val)):
- self.add_component(k,val[k])
- else:
- self.add_component('indicator_var', _indicator_var)
+ # pointer to transformation block if this disjunct has been
+ # transformed. None indicates it hasn't been transformed.
+ self._transformation_block = None
def activate(self):
super(_DisjunctData, self).activate()
@@ -175,14 +167,26 @@ def __init__(self, *args, **kwds):
class IndexedDisjunct(Disjunct):
- pass
+ #
+ # HACK: this should be implemented on ActiveIndexedComponent, but
+ # that will take time and a PEP
+ #
+ @property
+ def active(self):
+ return any(d.active for d in itervalues(self._data))
+
+_DisjunctData._Block_reserved_words = set(dir(Disjunct()))
class _DisjunctionData(ActiveComponentData):
- __slots__ = ('disjuncts','xor')
+ __slots__ = ('disjuncts','xor', '_algebraic_constraint')
_NoArgument = (0,)
+ @property
+ def algebraic_constraint(self):
+ return self._algebraic_constraint
+
def __init__(self, component=None):
#
# These lines represent in-lining of the
@@ -195,6 +199,9 @@ def __init__(self, component=None):
self._active = True
self.disjuncts = []
self.xor = True
+ # pointer to XOR (or OR) constraint if this disjunction has been
+ # transformed. None if it has not been transformed
+ self._algebraic_constraint = None
def __getstate__(self):
"""
@@ -207,8 +214,14 @@ def __getstate__(self):
def set_value(self, expr):
for e in expr:
- # The user gave us a proper Disjunct block
- if hasattr(e, 'type') and e.type() == Disjunct:
+ # The user gave us a proper Disjunct block
+ # [ESJ 06/21/2019] This is really an issue with the reclassifier,
+ # but in the case where you are iteratively adding to an
+ # IndexedDisjunct indexed by Any which has already been transformed,
+ # the new Disjuncts are Blocks already. This catches them for who
+ # they are anyway.
+ if isinstance(e, _DisjunctData):
+ #if hasattr(e, 'type') and e.ctype == Disjunct:
self.disjuncts.append(e)
continue
# The user was lazy and gave us a single constraint
@@ -270,6 +283,7 @@ def __init__(self, *args, **kwargs):
self._init_expr = kwargs.pop('expr', None)
self._init_xor = _Initializer.process(kwargs.pop('xor', True))
self._autodisjuncts = None
+ self._algebraic_constraint = None
kwargs.setdefault('ctype', Disjunction)
super(Disjunction, self).__init__(*args, **kwargs)
@@ -429,5 +443,10 @@ def set_value(self, expr):
return super(SimpleDisjunction, self).set_value(expr)
class IndexedDisjunction(Disjunction):
- pass
-
+ #
+ # HACK: this should be implemented on ActiveIndexedComponent, but
+ # that will take time and a PEP
+ #
+ @property
+ def active(self):
+ return any(d.active for d in itervalues(self._data))
diff --git a/pyomo/gdp/plugins/__init__.py b/pyomo/gdp/plugins/__init__.py
index e4b30840bf6..778b0b2e456 100644
--- a/pyomo/gdp/plugins/__init__.py
+++ b/pyomo/gdp/plugins/__init__.py
@@ -10,7 +10,7 @@
def load():
import pyomo.gdp.plugins.bigm
- import pyomo.gdp.plugins.chull
+ import pyomo.gdp.plugins.hull
import pyomo.gdp.plugins.bilinear
import pyomo.gdp.plugins.gdp_var_mover
import pyomo.gdp.plugins.cuttingplane
diff --git a/pyomo/gdp/plugins/bigm.py b/pyomo/gdp/plugins/bigm.py
index 39ad5ad3bcb..e63897829bf 100644
--- a/pyomo/gdp/plugins/bigm.py
+++ b/pyomo/gdp/plugins/bigm.py
@@ -17,33 +17,43 @@
from pyomo.contrib.fbbt.interval import inf
from pyomo.core import (
Block, Connector, Constraint, Param, Set, Suffix, Var,
- Expression, SortComponents, TraversalStrategy, Any, value,
- RangeSet)
+ Expression, SortComponents, TraversalStrategy, value,
+ RangeSet, NonNegativeIntegers)
+from pyomo.core.base.external import ExternalFunction
from pyomo.core.base import Transformation, TransformationFactory
from pyomo.core.base.component import ComponentUID, ActiveComponent
+from pyomo.core.base.PyomoModel import ConcreteModel, AbstractModel
from pyomo.core.kernel.component_map import ComponentMap
from pyomo.core.kernel.component_set import ComponentSet
+import pyomo.core.expr.current as EXPR
from pyomo.gdp import Disjunct, Disjunction, GDP_Error
-from pyomo.gdp.util import target_list
+from pyomo.gdp.util import (target_list, is_child_of, get_src_disjunction,
+ get_src_constraint, get_transformed_constraints,
+ _get_constraint_transBlock, get_src_disjunct,
+ _warn_for_active_disjunction,
+ _warn_for_active_disjunct)
from pyomo.gdp.plugins.gdp_var_mover import HACK_GDP_Disjunct_Reclassifier
from pyomo.repn import generate_standard_repn
from pyomo.common.config import ConfigBlock, ConfigValue
from pyomo.common.modeling import unique_component_name
+from pyomo.common.deprecation import deprecation_warning
+
+from functools import wraps
from six import iterkeys, iteritems
+from weakref import ref as weakref_ref
logger = logging.getLogger('pyomo.gdp.bigm')
NAME_BUFFER = {}
def _to_dict(val):
- if val is None:
- return val
- if isinstance(val, dict):
- return val
+ if isinstance(val, (dict, ComponentMap)):
+ return val
return {None: val}
-@TransformationFactory.register('gdp.bigm', doc="Relax disjunctive model using big-M terms.")
+@TransformationFactory.register('gdp.bigm', doc="Relax disjunctive model using "
+ "big-M terms.")
class BigM_Transformation(Transformation):
"""Relax disjunctive model using big-M terms.
@@ -55,9 +65,11 @@ class BigM_Transformation(Transformation):
targets: the targets to transform [default: the instance]
M values are determined as follows:
- 1) if the constraint CUID appears in the bigM argument dict
- 2) if the constraint parent_component CUID appears in the bigM
+ 1) if the constraint appears in the bigM argument dict
+ 2) if the constraint parent_component appears in the bigM
argument dict
+ 3) if any block which is an ancestor to the constraint appears in
+ the bigM argument dict
3) if 'None' is in the bigM argument dict
4) if the constraint or the constraint parent_component appear in
a BigM Suffix attached to any parent_block() beginning with the
@@ -71,31 +83,22 @@ class BigM_Transformation(Transformation):
Specifying "bigM=N" is automatically mapped to "bigM={None: N}".
- After transformation, every transformed disjunct will have a
- "_gdp_transformation_info" dict containing 2 entries:
-
- 'relaxed': True,
- 'bigm': {
- 'relaxationBlock': ,
- 'relaxedConstraints': ComponentMap(constraint: relaxed_constraint)
- }
-
- In addition, any block or disjunct containing a relaxed disjunction
- will have a "_gdp_transformation_info" dict with the following
- entry:
-
- 'disjunction_or_constraint':
-
- Finally, the transformation will create a new Block with a unique
- name beginning "_pyomo_gdp_bigm_relaxation". That Block will
+ The transformation will create a new Block with a unique
+ name beginning "_pyomo_gdp_bigm_reformulation". That Block will
contain an indexed Block named "relaxedDisjuncts", which will hold
the relaxed disjuncts. This block is indexed by an integer
- indicating the order in which the disjuncts were relaxed. Each
- block will have a "_gdp_transformation_info" dict with the following
- entries:
+ indicating the order in which the disjuncts were relaxed.
+ Each block has a dictionary "_constraintMap":
+
+ 'srcConstraints': ComponentMap(:
+ )
+ 'transformedConstraints': ComponentMap(:
+ )
+
+ All transformed Disjuncts will have a pointer to the block their transformed
+ constraints are on, and all transformed Disjunctions will have a
+ pointer to the corresponding OR or XOR constraint.
- 'src':